diff --git "a/5406.jsonl" "b/5406.jsonl" new file mode 100644--- /dev/null +++ "b/5406.jsonl" @@ -0,0 +1,740 @@ +{"seq_id":"604534535","text":"import os\n\nannee = 0\n\n\nwhile annee==0:\n annee = input(\"entrez une annee \")\n try:\n annee = int(annee)\n except:\n print(\"la valeur donnee de anee est invalide\")\n\nif annee%4==0:\n print(\"annee bissextile\",annee)\nelse:\n print(\"annee non bissextile\",annee) \n\n\nos.system(\"pause\")\n","sub_path":"0_bissextile/bissextil.py","file_name":"bissextil.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"372596903","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views import generic\n\nfrom launch.models import *\n\n\ndef index(request):\n return HttpResponse('Hello, world')\n\nclass GameView(generic.DetailView):\n model = Game\n template_name = 'launch/game.html'\n\n def get_context_data(self, **kwargs):\n context = super(GameView, self).get_context_data(**kwargs)\n context['commandmodule'] = self.object.launchpad.card_set.filter(suit=Card.COMMAND).count()\n context['lifesupport'] = self.object.launchpad.card_set.filter(suit=Card.LIFESUPPORT).count()\n context['sensors'] = self.object.launchpad.card_set.filter(suit=Card.SENSORS).count()\n context['fueltanks'] = self.object.launchpad.card_set.filter(suit=Card.FUELTANKS).count()\n context['engines'] = self.object.launchpad.card_set.filter(suit=Card.ENGINES).count()\n \n for hand in self.object.hand_set.exclude(player=None).exclude(player=self.request.user):\n context[hand.player.username] = hand.card_set.all()\n \n return context\n\ndef initializedeck(drawpile):\n \"\"\"Initialize a deck. All cards start in the draw pile initially.\"\"\"\n for suit in Card.suits:\n createcard(suit[0], 1, drawpile)\n createcard(suit[0], 1, drawpile)\n createcard(suit[0], 1, drawpile)\n createcard(suit[0], 2, drawpile)\n createcard(suit[0], 2, drawpile)\n createcard(suit[0], 3, drawpile)\n createcard(suit[0], 3, drawpile)\n createcard(suit[0], 4, drawpile)\n createcard(suit[0], 4, drawpile)\n createcard(suit[0], 5, drawpile)\n\ndef createcard(suit, number, hand):\n \"\"\"Create a card belonging to the specified hand, with given suit and value.\"\"\"\n c = Card()\n c.suit = suit\n c.number = number\n c.hand = hand\n c.save()\n\ndef deal(game):\n \"\"\"Deal 5 cards to each player in the game.\"\"\"\n for player in game.players.all():\n cards = game.drawpile.card_set.all().order_by('?')[:5]\n for card in cards:\n card.hand = game.hand_set.filter(player=player).get()\n card.save()\n\n@login_required\ndef create(request):\n g = Game()\n g.save()\n\n drawpile = Hand()\n drawpile.game = g\n drawpile.save()\n initializedeck(drawpile)\n \n discardpile = Hand()\n discardpile.game = g\n discardpile.save()\n \n launchpad = Hand()\n launchpad.game = g\n launchpad.save()\n\n g.drawpile = drawpile\n g.discardpile = discardpile\n g.launchpad = launchpad\n g.players.add(request.user)\n g.save()\n \n h = Hand()\n h.player = request.user\n h.game = g\n h.save()\n\n deal(g)\n return HttpResponseRedirect(reverse('launch:game', args=(g.id,)))\n","sub_path":"launch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"333261743","text":"import os\nimport re\n\nfrom app.common import FILE_PATH\nfrom .interface_filter_email import FilterEmailService\nfrom ......common import create_downloaded_attachment_file_name, make_dir, today_date\n\n\nclass FilterEmailOnSubjectNameandAttachmentTypeService(FilterEmailService):\n \"It will filter emails based on subject name and attachment type. It will filter emails with xlsx file attachment\"\n\n def __init__(self, message):\n self.message = message\n self.message.attachments = []\n self.create_folder()\n\n def create_folder(self):\n xlsx_folder_location = os.path.abspath(\n os.path.join(__file__, '..', '..', '..', '..', '..', '..', '..', 'data', 'noon_report', 'xlsx_file',\n str(today_date())))\n make_dir(xlsx_folder_location)\n\n def filter_email(self):\n self.filter_emails_on_subject()\n self.filter_emails_on_attachment_type()\n\n def filter_emails_on_subject(self):\n filtered_emails = []\n for e_mail in self.message.emails:\n if re.match(\"(\\w)+(_Noon_Report_)([0-2][0-9]|(3)[0-1])(-)(((0)[0-9])|((1)[0-2]))(-)\\d{4}\",\n e_mail['Subject']):\n filtered_emails.append(e_mail)\n self.message.emails = filtered_emails\n\n def filter_emails_on_attachment_type(self):\n filtered_emails = []\n for e_mail in self.message.emails:\n if self.check_and_download_attachment_with_xlsx_file(e_mail):\n filtered_emails.append(e_mail)\n self.message.emails = filtered_emails\n\n def check_and_download_attachment_with_xlsx_file(self, e_mail):\n for part in e_mail.walk():\n if part.get_content_maintype() == 'multipart':\n continue\n if part.get('content-Disposition') is None:\n continue\n file = part.get_filename()\n if bool(file):\n extension = file.split('.')[1]\n if extension == \"xlsx\":\n self.download_attachment(e_mail, part, file, \"xlsx\")\n return True\n return False\n\n def download_attachment(self, e_mail, part, file, extension):\n shipname = e_mail[\"Subject\"].split(\"_\", 1)[0]\n filename = create_downloaded_attachment_file_name(shipname, extension)\n filepath = FILE_PATH + str(today_date()) + \"/\" + filename\n with open(file, 'wb') as f:\n f.write(part.get_payload(decode=True))\n os.rename(file, filepath)\n self.message.attachments.append(filename)\n","sub_path":"Python/Tasks/thawe-marine-service/app/core/service/email_parser/strategy/filter_email/fiter_email_on_subjectname_and_xlsx_attachment.py","file_name":"fiter_email_on_subjectname_and_xlsx_attachment.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"307410986","text":"#encoding: utf-8\n'''\nhttps://www.interviewbit.com/problems/grid-unique-paths/\n\nGrid Unique Paths\n\nThe robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked ‘Finish’ in the diagram below).\n\n|---+---+---+---+---+---+---|\n| S | | | | | | |\n|---+---+---+---+---+---+---|\n| | | | | | | |\n|---+---+---+---+---+---+---|\n| | | | | | | E |\n|---+---+---+---+---+---+---|\n\nHow many possible unique paths are there?\n\nNote: A and B will be such that the resulting answer fits in a 32 bit signed integer.\n\nExample :\n\tInput : A = 2, B = 2\n\tOutput : 2\n\t2 possible routes : (0, 0) -> (0, 1) -> (1, 1) \n\t OR : (0, 0) -> (1, 0) -> (1, 1)\n'''\n\n'''\nSolution Outline: (DP)\n\tIn an AxB grid, S=(0,0), E=(A-1, B-1)\n\tLet f(x,y) be the number of ways to reach cell (x,y) from S\n\tf(0,1) = Number of ways to reach (0,1) from S == 1 (R)\n\tf(1,0) = Number of ways to reach (1,0) from S == 1 (D)\n\n\tf(x,0) = Number of ways to reach (x,0) from S == 1 (DD...D)\n\tf(0,y) = Number of ways to reach (0,y) from S == 1 (RR...R)\n\n\tFor any other cell (i,j), Paths to enter it are from its top and left-side cells.\n\t Therefore the number of ways to reach cell (i,j) == f(i,j) = f(i-1, j) + f(i, j-1)\n\n\tFill the table bottom-up, i: 0 to A, j: 0 to B, return f(A-1, B-1)\n\nSample run for the grid above:\n|----+----+----+----+----+----+----|\n| 0 | 1 | 1 | 1 | 1 | 1 | 1 |\n|----+----+----+----+----+----+----|\n| 1 | 2 | 3 | 4 | 5 | 6 | 7 |\n|----+----+----+----+----+----+----|\n| 1 | 3 | 6 | 10 | 15 | 21 | 28 |\n|----+----+----+----+----+----+----|\n\n'''\n\nclass Solution:\n\tdef count_unique_paths(self, A, B):\n\t\tDP = [[0 for j in xrange(B)] for i in xrange(A)]\n\t\tfor i in xrange(A):\n\t\t\tDP[i][0] = 1\n\n\t\tfor j in xrange(B):\n\t\t\tDP[0][j] = 1\n\n\t\tfor i in xrange(1, A):\n\t\t\tfor j in xrange(1, B):\n\t\t\t\tDP[i][j] = DP[i-1][j] + DP[i][j-1]\n\n\t\treturn DP[-1][-1]\n\n\nif __name__ == '__main__':\n\ts = Solution()\n\tassert s.count_unique_paths(2, 2) == 2\n\tassert s.count_unique_paths(3, 7) == 28\n\tassert s.count_unique_paths(3, 3) == 6\n\n","sub_path":"python/interviewbit/math/grid_unique_paths/grid_unique_paths.py","file_name":"grid_unique_paths.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"644614976","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSimple flask-based API to access FreeLing functionalities.\n\"\"\"\n\n__author__ = \"Víctor Peinado\"\n__email__ = \"vitojph@gmail.com\"\n__date__ = \"28/06/2013\"\n\n\nimport freeling\nfrom flask import Flask, Response, request\nfrom flask.ext.restful import Api, Resource\nimport json\n\n# #################################################################\n# FreeLing settings (borrowed from freeling-3.0/APIs/python/sample.py)\n\nPUNCTUATION = u\"\"\".,;:!? \"\"\"\n\n## Modify this line to be your FreeLing installation directory\nFREELINGDIR = \"/usr/local/\"\nDATA = FREELINGDIR + \"share/freeling/\"\nLANG = \"it\"\nfreeling.util_init_locale(\"default\");\n\n# Create language analyzer\n#la=freeling.lang_ident(DATA + \"common/lang_ident/ident.dat\")\n\n# Create options set for maco analyzer. Default values are Ok, except for data files.\nop = freeling.maco_options(LANG)\nop.set_active_modules(0,1,1,1,1,1,0,1,1,1,0)\nop.set_data_files(\"\",\n DATA + LANG + \"/locucions.dat\", \n #DATA + LANG + \"/quantities.dat\", \n \"\",\n DATA + LANG + \"/afixos.dat\", \n DATA + LANG + \"/probabilitats.dat\", \n DATA + LANG + \"/dicc.src\", \n DATA + LANG + \"/np.dat\",\n DATA + \"common/punct.dat\",\n DATA + LANG + \"/corrector/corrector.dat\")\n\n# Create analyzers\ntk = freeling.tokenizer(DATA + LANG + \"/tokenizer.dat\")\nsp = freeling.splitter(DATA + LANG + \"/splitter.dat\")\nmf = freeling.maco(op)\ntg = freeling.hmm_tagger(LANG, DATA + LANG + \"/tagger.dat\", 1, 2)\nsen = freeling.senses(DATA+LANG+\"/senses.dat\")\n\n\n# #################################################################\n# flask API\n\napp = Flask(__name__)\napi = Api(app)\n\n\n# ##############################################################################\n\nclass Splitter(Resource):\n \"\"\"Splits an input text into sentences.\"\"\"\n \n def post(self):\n text = request.json[\"texto\"]\n if text[-1] not in PUNCTUATION: \n text = text + \".\"\n tokens = tk.tokenize(text)\n sentences = sp.split(tokens, 0)\n \n # output list of sentences\n outputSentences = []\n \n for sentence in sentences:\n outputTokens = []\n for w in sentence.get_words():\n outputTokens.append(w.get_form())\n outputSentences.append(dict(oracion=\" \".join(outputTokens)))\n \n return Response(json.dumps(outputSentences), mimetype=\"application/json\")\n\n\n\nclass TokenizerSplitter(Resource):\n \"\"\"Splits an input text into tokenized sentences.\"\"\"\n \n def post(self):\n text = request.json[\"texto\"]\n if text[-1] not in PUNCTUATION: \n text = text + \".\"\n tokens = tk.tokenize(text)\n sentences = sp.split(tokens, 0)\n \n # output list of sentences\n outputSentences = []\n \n for sentence in sentences:\n outputTokens = []\n for w in sentence.get_words():\n outputTokens.append(w.get_form())\n outputSentences.append(dict(oracion=outputTokens))\n \n return Response(json.dumps(outputSentences), mimetype=\"application/json\")\n\n\n# ##############################################################################\n\nclass NERecognizer(Resource):\n \"\"\"Recognizes Named Entities from an input text.\"\"\"\n \n def post(self):\n text = request.json[\"texto\"]\n if text[-1] not in PUNCTUATION: \n text = text + \".\"\n tokens = tk.tokenize(text)\n sentences = sp.split(tokens, 0)\n sentences = mf.analyze(sentences)\n sentences = tg.analyze(sentences)\n \n output = []\n for sentence in sentences:\n words = sentence.get_words()\n for word in words:\n # Person (NP00SP0), Geographical location (NP00G00), Organization (NP00O00), and Others (NP00V00)\n if word.get_tag() in \"NP00SP0 NP00G00 NP00000 NP00V00\".split():\n entities = []\n entities.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))\n output.append(dict(palabra=word.get_form(), entidades=entities))\n\n return Response(json.dumps(output), mimetype=\"application/json\")\n\n\n# ##############################################################################\n\nclass DatesQuatitiesRecognizer(Resource):\n \"\"\"Recognizes dates, currencies, and quatities from an input text.\"\"\"\n \n def post(self):\n text = request.json[\"texto\"]\n if text[-1] not in PUNCTUATION: \n text = text + \".\"\n tokens = tk.tokenize(text)\n sentences = sp.split(tokens, 0)\n sentences = mf.analyze(sentences)\n sentences = tg.analyze(sentences)\n \n output = []\n for sentence in sentences:\n words = sentence.get_words()\n for word in words:\n # dates\n tag = word.get_tag()\n if tag[0] in \"W Z\".split():\n expression = []\n if tag == \"W\":\n expression.append(dict(lema=word.get_lemma(), categoria=\"temporal\"))\n else:\n if tag == \"Z\":\n category = \"numero\"\n elif tag == \"Zd\":\n category = \"partitivo\"\n elif tag == \"Zm\":\n category = \"moneda\"\n elif tag == \"Zp\":\n category = \"porcentaje\"\n elif tag == \"Zu\":\n category = \"magnitud\" \n else:\n category = \"numero\"\n\n expression.append(dict(lema=word.get_lemma(), categoria=category))\n \n output.append(dict(expresion=word.get_form(), entidades=expression))\n\n return Response(json.dumps(output), mimetype=\"application/json\")\n\n\n\n# ##############################################################################\n\n\nclass Tagger(Resource):\n \"\"\"Performs POS tagging from an input text.\"\"\"\n\n def post(self):\n \"\"\"docstring for post\"\"\"\n text = request.json[\"texto\"]\n if text[-1] not in PUNCTUATION: \n text = text + \".\"\n tokens = tk.tokenize(text)\n sentences = sp.split(tokens, 0)\n sentences = mf.analyze(sentences)\n sentences = tg.analyze(sentences)\n\n output = []\n for sentence in sentences:\n words = sentence.get_words()\n for word in words:\n lemmas = []\n lemmas.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))\n output.append(dict(palabra=word.get_form(), lemas=lemmas))\n \n return Response(json.dumps(output), mimetype=\"application/json\")\n\n\n# ##############################################################################\n\n\nclass WSDTagger(Resource):\n \"\"\"Performs POS tagging and WSD from an input text.\"\"\"\n\n def post(self):\n \"\"\"docstring for post\"\"\"\n text = request.json[\"texto\"]\n if text[-1] not in PUNCTUATION: \n text = text + \".\"\n tokens = tk.tokenize(text)\n sentences = sp.split(tokens, 0)\n sentences = mf.analyze(sentences)\n sentences = tg.analyze(sentences)\n sentences = sen.analyze(sentences)\n\n output = []\n for sentence in sentences:\n words = sentence.get_words()\n for word in words:\n lemmas = []\n lemmas.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))\n # split the senses and get just the synset ID\n synsets = []\n [synsets.append(synsetID.split(\":\")[0]) for synsetID in word.get_senses_string().split(\"/\")]\n output.append(dict(palabra=word.get_form(), lemas=lemmas, synsets=synsets))\n \n return Response(json.dumps(output), mimetype=\"application/json\")\n\n\n\n\n# #############################################################################\n# Api resource routing\n# split a text into sentences\napi.add_resource(Splitter, \"/splitter\")\n\n# split a text into tokenized sentences\napi.add_resource(TokenizerSplitter, \"/tokenizersplitter\")\n\n# perform PoS tagging from an input text\napi.add_resource(Tagger, \"/tagger\")\n\n# perform PoS tagging and WSD from an input text\napi.add_resource(WSDTagger, \"/wsdtagger\")\n\n# perform NE recognition from an input text\napi.add_resource(NERecognizer, \"/ner\")\n\n# recognizes dates, currencies and quantities\napi.add_resource(DatesQuatitiesRecognizer, \"/datesquantities\")\n\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\", port=9999)\n","sub_path":"flws/flwsit.py","file_name":"flwsit.py","file_ext":"py","file_size_in_byte":8725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"322133","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2016 Supreeth Herle\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"VBSP Server module.\"\"\"\n\nEMAGE_VERSION = 1\n\nMAX_NUM_CCS = 1\n\nPRT_UE_JOIN = \"join\"\nPRT_UE_LEAVE = \"leave\"\nPRT_VBSP_BYE = \"bye\"\nPRT_VBSP_REGISTER = \"register\"\nPRT_VBSP_TRIGGER_EVENT = \"te\"\nPRT_VBSP_AGENT_SCHEDULED_EVENT = \"sche\"\nPRT_VBSP_SINGLE_EVENT = \"se\"\nPRT_VBSP_HELLO = \"mHello\"\nPRT_VBSP_UES_ID = \"mUEs_id\"\nPRT_VBSP_RRC_MEAS_CONF = \"mUE_rrc_meas_conf\"\nPRT_VBSP_STATS = \"mStats\"\n\nPRT_TYPES = {PRT_VBSP_BYE: None,\n PRT_VBSP_REGISTER: None,\n PRT_UE_JOIN: None,\n PRT_UE_LEAVE: None,\n PRT_VBSP_HELLO: \"hello\",\n PRT_VBSP_UES_ID: \"UEs_id_repl\",\n PRT_VBSP_RRC_MEAS_CONF: None,\n PRT_VBSP_STATS: None}\n\n\nPRT_TYPES_HANDLERS = {PRT_VBSP_BYE: [],\n PRT_VBSP_REGISTER: [],\n PRT_UE_JOIN: [],\n PRT_UE_LEAVE: [],\n PRT_VBSP_HELLO: [],\n PRT_VBSP_UES_ID: []}\n","sub_path":"empower/vbsp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"419045622","text":"import socket\nclass Voilierclient:\n\n def __init__(self):\n self.id=0\n self.ipserv=\"\"\n self.port=0\n self.valsf=0\n self.valgv=0\n self.git=0\n self.lat=0\n self.longe=0\n self.vitvent=0\n self.orientvent=0\n self.taille=0\n \n def initcom(self,ip,port):\n self.ipserveur=ip\n self.port=port\n self.sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n def txrx(self):\n trame=bytearray([self.id, 2, self.valsf, self.valgv]);\n self.sock.sendto(trame,(self.ipserveur , self.port))\n data,addr=self.sock.recvfrom(1024)\n \n lat= ord(data[4])<<24 | ord(data[5])<<16 | ord(data[6])<<8 | ord(data[7])\n\n if ord(data[4]) > 127:\n lat = (~lat) &0xFFFFFFFF\n lat = (lat+1)*-1 \n lat_f = float(lat)/10000000\n\n longe=ord(data[8])<<24 | ord(data[9])<<16 | ord(data[10])<<8 | ord(data[11])\n\n if ord(data[8]) > 127:\n longe = (~longe) &0xFFFFFFFF\n longe = (longe+1)*-1 \n longe_f = float(longe)/10000000\n \n self.id+=1\n self.taille=ord(data[1])\n self.vitvent=ord(data[2])\n self.orientvent=ord(data[3])\n self.lat= float(lat)/10000000\n self.longe=float(longe)/10000000\n self.gite=ord (data[12])\n \n\n##monVoilierClient=Voilierclient()\n##monVoilierClient.intcom(\"127.0.0.1\",5050)\n##monVoilierClient.valsf=30\n##monVoilierClient.id=22\n##monVoilierClient.valgv=90\n##monVoilierClient.txrx()\n##print \"latitude =\",monVoilierClient.lat\n##print \"longitude =\",monVoilierClient.longe\n##print \"id =\",monVoilierClient.id\n##print \"taille =\",monVoilierClient.taille\n##print \"vitvent =\",monVoilierClient.vitvent\n##print \"orientvent =\",monVoilierClient.orientvent\n##print \"gite =\",monVoilierClient.gite\n\n\n\n \n \n \n","sub_path":"classemonvoilier.py","file_name":"classemonvoilier.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"361448300","text":"import pandas as pd\nimport numpy as np\nimport multiprocessing as mp\nfrom functools import partial\nimport itertools\nfrom collections import Counter\nimport sys\nsys.path.append('/vol/research/mammo2/will/python/usefulFunctions')\nimport usefulFunctions as uf\n\n# Returns the mass classification of the ImageSOPIUID\ndef mp_get_mass_classification(sheet_1, ImageSOPIUID):\n #Get index\n index = sheet_1['ImageSOPIUID']\n index = index[index == ImageSOPIUID].index[0]\n out = sheet_1['MassClassification'][index]\n if pd.isnull(out):\n out = 'nan'\n return out\n\n# Returns the Conspicuity of the ImageSOPIUID\ndef mp_get_conspicuity(sheet_1, ImageSOPIUID):\n # Get index\n index = sheet_1['ImageSOPIUID']\n index = index[index == ImageSOPIUID].index[0]\n out = sheet_1['Conspicuity'][index]\n if pd.isnull(out):\n out = 'nan'\n return out\n\n\n# Should be passed ImageSOPIUID of unique lesions only to avoid repeated\n# matches.\n# If unique contralaterals is set to true then the function will only return a\n# single contralateral per Image_SOPIUID ensuring that there is at most one\n# contralateral per patient.\ndef cont_match(properties_to_match, sheet_0,\n unique_contralaterals, Image_SOPIUID):\n index = sheet_0['ImageSOPIUID']\n index = index[index == Image_SOPIUID].index[0]\n # Run through each image in the spreadsheet looking for contralaterals\n tmp_properties = dict(properties_to_match)\n matches = []\n for index in range(len(sheet_0['ImageSOPIUID'])):\n for _ in tmp_properties:\n tmp_properties[_] = sheet_0[_][index]\n if tmp_properties == properties_to_match:\n if sheet_0['ImageSOPIUID'][index] != Image_SOPIUID:\n # Contrilateral found\n if unique_contralaterals == True:\n return [sheet_0['ImageSOPIUID'][index]]\n else:\n matches.append(sheet_0['ImageSOPIUID'][index])\n return matches\n\n\n# Write a funciton that finds all the contralaterals for a given\n# ImageSOPIUID\ndef mp_get_contralateral(sheet_0, one_per_study, items):\n # Get properties for image:\n # StudyIUID,ViewPosition, ImageLaterality, PresentationIntenetType\n # Compute properties of contralateral\n # Search for all contralateral images\n # Through in an if statement incase we want one cont per StudyIUID\n\n count, length, Image_SOPIUID = items\n\n #print(' ', count, '/', length)\n\n # Get properties of the image\n index = sheet_0['ImageSOPIUID']\n index = index[index == Image_SOPIUID].index[0]\n properties_template = {'StudyIUID': '',\n 'ViewPosition': '',\n 'ImageLaterality': '',\n 'PresentationIntentType': ''}\n lesion_properties = dict(properties_template)\n for item in lesion_properties:\n lesion_properties[item] = sheet_0[item][index]\n\n # Set contralateral properties\n cont_properties = dict(lesion_properties)\n if cont_properties['ImageLaterality'] == 'R':\n cont_properties['ImageLaterality'] = 'L'\n else:\n cont_properties['ImageLaterality'] = 'R'\n\n # Search for images that match cont_properties\n return len(cont_match(\n cont_properties, sheet_0,\n one_per_study, Image_SOPIUID))\n\n\n\ndef main():\n pool = mp.Pool()\n # Get list of excel files\n spreadsheet_paths = uf.getFiles(\n '/vol/research/mammo2/will/data/batches/metadata',\n '*IMAGE.xls')\n sheet_0 = pd.ExcelFile(spreadsheet_paths[0]).parse(0)\n sheet_1 = pd.ExcelFile(spreadsheet_paths[0]).parse(1)\n\n print(len(spreadsheet_paths), ' spreadsheets found')\n\n # Create stats dict\n stats_template = {'ImageSOPIUID':0,\n 'StudyIUID':0,\n 'ROIs':0,\n 'Unique ROIs':0,\n 'Unique Contralaterals':0,\n 'Mass Classification':0,\n 'Mass Conspicuity':0}\n stats = {}\n batch_numbers = [1, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 18, 19, 21,\n 22, 23, 30, 31, 32, 33, 40, 42, 43 , 44, 45, 46, 47, 48,\n 49, 50, 51]\n for num in batch_numbers:\n stats.update({'batch ' + str(num): dict(stats_template)})\n\n # Load stats into stats\n all_mass_clasification = []\n all_mass_conspicuities = []\n for path in spreadsheet_paths:\n batch = 'batch ' + path.split('_')[1]\n print('Batch: ', batch)\n sheet_0 = pd.ExcelFile(path).parse(0)\n sheet_1 = pd.ExcelFile(path).parse(1)\n\n # Get ImageSOPIUID\n print(' Getting ImageSOPIUIDs')\n stats[batch]['ImageSOPIUID'] = len(sheet_0['ImageSOPIUID'])\n\n # Get StudyIUID\n print(' Getting StudyIUIDs')\n tmp_id = ''\n for id in sheet_0['StudyIUID'] :\n if id != tmp_id:\n stats[batch]['StudyIUID'] += 1\n tmp_id = id\n\n # Get ROIs\n print(' Getting ROIs')\n stats[batch]['ROIs'] = len(sheet_1['ImageSOPIUID'])\n\n # Get Unique ROIs\n print(' Getting unique_ROIs')\n unique_ROIs = []\n tmp_ImageSOPIUID = ''\n for index, ImageSOPIUID in enumerate(sheet_1['StudyIUID']):\n if ImageSOPIUID != tmp_ImageSOPIUID:\n unique_ROIs.append(sheet_1['ImageSOPIUID'][index])\n tmp_ImageSOPIUID = ImageSOPIUID\n stats[batch]['Unique ROIs'] = len(unique_ROIs)\n\n # Get contralaterals\n # Make sure only unique ROIs are used as args\n print(' Getting contralaterals')\n func = partial(mp_get_contralateral, sheet_0, True)\n results = pool.map(\n func, list(zip(\n range(len(unique_ROIs)),\n itertools.repeat(len(unique_ROIs)),\n unique_ROIs)))\n stats[batch]['Unique Contralaterals'] = sum(results)\n\n # Get mass classifications\n func = partial(mp_get_mass_classification, sheet_1)\n print(' Getting mass classifications')\n results = pool.map(func, unique_ROIs)\n stats[batch]['Mass Classification'] = Counter(results)\n all_mass_clasification.extend(results)\n\n #Get Conspicuity\n func = partial(mp_get_conspicuity, sheet_1)\n print(' Getting mass conspicuities')\n results = pool.map(func, unique_ROIs)\n stats[batch]['Mass Conspicuity'] = Counter(results)\n all_mass_conspicuities.extend(results)\n\n # Total up the stats\n stats_total = dict(stats_template)\n for batch in stats:\n for single_stat in stats[batch]:\n try:\n stats_total[single_stat] += stats[batch][single_stat]\n except:\n pass\n stats_total['Mass Classification'] = Counter(all_mass_clasification)\n stats_total['Mass Conspicuity'] = Counter(all_mass_conspicuities)\n # Print results\n for batch in stats:\n print(batch)\n for single_stat in stats[batch]:\n print(' ', single_stat, ': ', stats[batch][single_stat])\n print('\\nTotals')\n for single_stat in stats_total:\n print(' ', single_stat, ': ', stats_total[single_stat])\n print('Unique ROIs = ', '{:.2f}'.format(\n stats_total['Unique ROIs']\n / stats_total['ImageSOPIUID']\n * 100), '%')\n\nif __name__ == '__main__':\n main()\n","sub_path":"databaseStats.py","file_name":"databaseStats.py","file_ext":"py","file_size_in_byte":7366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"74438631","text":"#!/usr/lib/python\n#-*-coding:utf-8-*-\nimport LMF\nimport pingce\nimport IO\nimport time\npre=\"traindatas/\"\nresult_pre=\"trainresult/\"\nlog_pre=\"logs/\"\n\n\ndef run_consin(train,test,number,hotitem,traintimes=100,alpha=0.02,lamda=1,runtimes=4):\n msg=str()\n now = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n msg=\"===================start:\" + str(now) + \"=========================\\r\\n\"\n\n msg=msg+\"因子数量=\" + str(number) + \"\\r\\n\" + \"训练次数\" + str(traintimes) + \"\\r\\n\" + \"alpha=\" + str(\n alpha) + \"\\r\\n\" + \"lamda=\" + str(lamda) + \"\\r\\n循环次数=\"+str(runtimes)+\"\\r\\n\"\n\n # IO.write_key_list_dict(train, \"train.csv\")\n # IO.write_key_list_dict(test, \"test.csv\")\n\n msg = msg+ \"用户数:\"+str(len(train))+\" 物品数:\"+str(len(hotitem))+\"\\r\\n\"\n currency_totl,recall_totl=0.0,0.0\n for i in range(0,runtimes):\n [P, Q] = LMF.initmodel(train, number)\n train_ = LMF.caiyang(train, hotitem)\n [P, Q] = LMF.lmf(train_, number, traintimes, alpha, P, Q, lamda)\n IO.write_key_list_dict(P, result_pre+str(i+1)+\"p.csv\")\n IO.write_key_list_dict(Q, result_pre+str(i+1)+\"q.csv\")\n\n # temp=LMF.getmax_larten(P,Q,number)\n # IO.write_dict_dict(temp,filepath=\"temp.csv\")\n # print testfunction.read(P,Q,number)\n\n currency,recall=result_consin(P, Q, train ,test, 15,i)\n currency_totl+=currency\n recall_totl+=recall\n\n msg=msg+\"平均准确率:\"+str(currency_totl/runtimes)+\", 平均召回率:\"+str(recall_totl/runtimes)+\"\\r\\n\"\n IO.log(filepath=log_pre+\"log.txt\",msg=msg)\n\ndef result_consin(P,Q,train,test,resy_count,i):\n result = LMF.content_recommendation(P, Q, train, resy_count)\n bzresult = LMF.oresult_to_bzresult(result)\n IO.write_key_list_dict(bzresult,result_pre+str(i+1)+\"_result.csv\")\n currency,recall = pingce.pingce(bzresult, test)\n return currency,recall\n\ndef result(P,Q,train,test,resy_count,i):\n result = LMF.content_recommendation(P, Q, train, resy_count)\n bzresult = LMF.oresult_to_bzresult(result)\n IO.write_key_list_dict(bzresult,result_pre+str(i+1)+\"_result.csv\")\n currency,recall = pingce.pingce(bzresult, test)\n return currency,recall\n\n\ndef run_once(train,test,hotitem,number=30,traintimes=100,alpha=0.02,lamda=0.01):\n msg = str()\n now = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n msg = \"===================start:\" + str(now) + \"=========================\\r\\n\"\n msg = msg + \"因子数量=\" + str(number) + \"\\r\\n\" + \"训练次数\" + str(traintimes) + \"\\r\\n\" + \"alpha=\" + str(\n alpha) + \"\\r\\n\" + \"lamda=\" + str(lamda) + \"\\r\\n\"\n msg = msg + \"用户数:\" + str(len(train)) + \" 物品数:\" + str(len(hotitem)) + \"\\r\\n\"\n\n [P, Q] = LMF.initmodel(train, number)\n train_ = LMF.caiyang(train, hotitem)\n [P, Q] = LMF.lmf(train_, number, traintimes, alpha, P, Q, lamda)\n IO.write_key_list_dict(P, result_pre + \"p.csv\")\n IO.write_key_list_dict(Q, result_pre + \"q.csv\")\n currency_totl, recall_totl = result_consin(P, Q, train, test, 15,0)\n msg = msg + \"平均准确率:\" + str(currency_totl) + \", 平均召回率:\" + str(recall_totl) + \"\\r\\n\"\n IO.log(filepath=log_pre + \"log.txt\", msg=msg)\n\ndef run():\n #test, train, totl_item = IO.train_test_product(filepath=\"data.csv\",pre=\"datas/\",maxline=1000000,rows=3)\n test,train,totl_item=IO.just_read_train_test(pre=pre)\n item_hot = LMF.item_and_hot(train)\n run_once(train,test,number=30,hotitem=item_hot,traintimes=40,alpha=0.02,lamda=0.5)\n\nif __name__==\"__main__\":\n run()","sub_path":"trainLMF.py","file_name":"trainLMF.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"520655534","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nfrom __future__ import unicode_literals\nfrom datetime import date\n\nimport sys\nimport os\n\nAUTHOR = u'OpenOffice Community'\nSITENAME = 'Apache OpenOffice'\nSITEURL = ''\nCURRENTYEAR = date.today().year\n\nPATH = 'content'\n\nTIMEZONE = 'UTC'\n\nDEFAULT_DATE = 'fs'\nDEFAULT_LANG = u'en'\n\n# Save pages using full directory preservation\nPAGE_PATHS = [ '.' ]\nSTATIC_PATHS = [ '.' ]\nPATH_METADATA= '(?P.*)\\..*'\nPAGE_SAVE_AS= '{path_no_ext}.html'\nPAGE_URL= '{path_no_ext}.html'\n#SLUGIFY_SOURCE = 'basename'\n#PAGE_SAVE_AS = '{slug}.html'\n\n# We don't use articles, but we don't want pelican to think\n# that content/ contains articles.\nARTICLE_PATHS = [ 'articles' ]\n\n# Disable these pages\nARCHIVES_SAVE_AS = ''\nARTICLE_SAVE_AS = ''\nAUTHORS_SAVE_AS = ''\nCATEGORIES_SAVE_AS = ''\nINDEX_SAVE_AS = ''\nTAGS_SAVE_AS = ''\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# TOC Generator\n#PLUGIN_PATHS = ['./theme/plugins']\n#PLUGINS = ['toc']\nTOC_HEADERS = r\"h[1-6]\"\n\n# Unused links\nLINKS = ( )\nSOCIAL = ( )\n\n# Blogroll\n#LINKS = (('Pelican', 'https://getpelican.com/'),\n# ('Python.org', 'https://www.python.org/'),\n# ('Jinja2', 'https://palletsprojects.com/p/jinja/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\n#SOCIAL = (('You can add links in your config file', '#'),\n# ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nMARKDOWN = {\n 'extension_configs': {\n 'markdown.extensions.extra': {},\n 'markdown.extensions.admonition': {},\n 'markdown.extensions.codehilite': {\n 'css_class': 'highlight'\n },\n 'markdown.extensions.meta': {},\n 'smarty' : {\n 'smart_angled_quotes' : 'true'\n },\n 'markdown.extensions.toc': {\n 'permalink': 'true',\n },\n }\n}\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"76818559","text":"from data import db_session\nfrom data.jobs import Jobs\nfrom flask import jsonify, Blueprint, request\n\nblueprint = Blueprint(\n 'jobs_api',\n __name__,\n template_folder='templates'\n)\n\n\n@blueprint.route('/api/jobs')\ndef get_jobs():\n db_sess = db_session.create_session()\n jobs = db_sess.query(Jobs).all()\n return jsonify(\n {\n 'jobs':\n [item.to_dict(only=('job', 'team_leader', 'work_size', 'collaborators', 'is_finished'))\n for item in jobs]\n }\n )\n\n\n@blueprint.route('/api/jobs/', methods=['GET'])\ndef get_one_jobs(jobs_id):\n db_sess = db_session.create_session()\n jobs = db_sess.query(Jobs).get(jobs_id)\n if not jobs:\n return jsonify({'error': 'Not found'})\n return jsonify(\n {\n 'jobs': jobs.to_dict(only=(\n 'job', 'team_leader', 'work_size', 'collaborators', 'is_finished'))\n }\n )\n\n\n@blueprint.route('/api/jobs', methods=['POST'])\ndef create_jobs():\n if not request.json:\n return jsonify({'error': 'Empty request'})\n elif not all(key in request.json for key in\n ['id', 'job', 'team_leader', 'work_size', 'collaborators', 'is_finished']):\n return jsonify({'error': 'Bad request'})\n db_sess = db_session.create_session()\n if db_sess.query(Jobs).filter(Jobs.id == request.json['id']).first() is not None:\n return jsonify({'error': 'ID already exists'})\n jobs = Jobs(\n id=request.json['id'],\n team_leader=request.json['team_leader'],\n job=request.json['job'],\n collaborators=request.json['collaborators'],\n work_size=request.json['work_size'],\n is_finished=request.json['is_finished'],\n )\n db_sess.add(jobs)\n db_sess.commit()\n return jsonify({'success': 'OK'})\n\n\n@blueprint.route('/api/jobs/', methods=['DELETE'])\ndef delete_jobs(jobs_id):\n db_sess = db_session.create_session()\n jobs = db_sess.query(Jobs).get(jobs_id)\n if not jobs:\n return jsonify({'error': 'Not found'})\n db_sess.delete(jobs)\n db_sess.commit()\n return jsonify({'success': 'OK'})\n","sub_path":"data/jobs_api.py","file_name":"jobs_api.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"476915018","text":"import os\r\nimport cv2\r\nimport numpy as np\r\nfrom skimage import filters\r\n# 1.Harris corner detector\r\n# 2.normalized cross correlation (NCC)\r\n# 3.set a threshold to keep only matches that have a large NCC score\r\n# 4.RANSAC to robustly estimate the homography from the noisy correspondences\r\n# \fnd corresponding features, estimate a homography\r\n# 1.Harris corner detector\r\n# 1.Harris corner detector\r\n# 1.Harris corner detector\r\n\r\ndef Harris(img):\r\n # img = cv2.resize(img, dsize=(600, 400))\r\n # 灰度化\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n gray = np.float32(gray)\r\n # 高斯滤波\r\n gray = cv2.GaussianBlur(gray, (3, 3), 1.5)\r\n # 角点检测 第三个参数为角点检测的敏感度,其值必须介于3~31之间的奇数\r\n #   • img - 数据类型为 float32 的输入图像。\r\n #   • blockSize - 角点检测中要考虑的领域大小。\r\n #   • ksize - Sobel 求导中使用的窗口大小\r\n #   • k - Harris 角点检测方程中的自由参数,取值参数为 [0,04,0.06].\r\n dst = cv2.cornerHarris(gray, 3, 5, 0.06)\r\n dst[dst < 0.01 * dst.max()]=0\r\n index=np.where(dst!=0)\r\n #NMS\r\n size=2\r\n len=index[0].shape[0]\r\n for i in range(len) :\r\n r=index[0][i]\r\n c=index[1][i]\r\n r0 = np.maximum(r - size, 0)\r\n r1 = np.minimum(r + size, dst.shape[0])\r\n c0 = np.maximum(c - size, 0)\r\n c1 = np.minimum(c + size, dst.shape[1])\r\n if np.any(dst[r,c]\n

{title}

\n {body}\n\n'''\n values = { 'colour' : colour,\n 'symbol' : symbol,\n 'title' : title,\n 'body' : body,\n 'index' : index }\n return template.format_map(values)\n\n# Colours\n# ~ green\n# ~ blue\n# ~ yellow\n\n# Symbols\n# ~ star\n# ~ file-o\n# ~ info-circle\n# ~ pencil-square-o\n# ~ eye\n# ~ key\n\nsolnfilename = args.output.replace('.ipynb', '-soln.ipynb')\nsolnflag = False\n\nprint('Reading input file: ' + args.input.name)\n\nplain = nf.read(args.input, nf.NO_CONVERT)\ncelllist = plain['cells']\nmarkdownlist = [c for c in celllist if c['cell_type']=='markdown']\n\nsolnb = None\n\nif args.headercell is not None:\n print('Reading from headercell: ' + args.headercell.name)\n header = nf.read(args.headercell, nf.NO_CONVERT)\n plain['cells'].insert(0, *header['cells'])\n args.headercell.close()\n\nfor c in markdownlist:\n line = c['source'].split('\\n')\n if 'Prerequisites' in line[0]:\n colour = 'green'\n symbol = 'star'\n title = line[0].lstrip('#')\n body = '\\n'.join(line[1:])\n safetitle = title.replace(' ', '-')\n safetitle = safetitle.replace('`', '')\n index = urlquote(safetitle, safe='?!$\\\\') + '%0A'\n elif 'Overview' in line[0]:\n colour = 'green'\n symbol = 'file-o'\n title = line[0].lstrip('#')\n body = '\\n'.join(line[1:])\n safetitle = title.replace(' ', '-')\n safetitle = safetitle.replace('`', '')\n index = urlquote(safetitle, safe='?!$\\\\') + '%0A'\n elif 'Info' in line[0]:\n colour = 'blue'\n symbol = 'info-circle'\n subtitle = line[0].split(':')\n title = ':'.join(subtitle[1:])\n body = '\\n'.join(line[1:])\n safetitle = title.replace(' ', '-')\n safetitle = safetitle.replace('`', '')\n index = urlquote(safetitle, safe='?!$\\\\') + '%0A'\n elif 'Exercise' in line[0]:\n colour = 'yellow'\n symbol = 'pencil-square-o'\n subtitle = line[0].split(':')\n title = ':'.join(subtitle[1:])\n body = '\\n'.join(line[1:])\n safetitle = title.replace(' ', '-')\n safetitle = safetitle.replace('`', '')\n link = './' + solnfilename.split('/')[-1] + '#' + urlquote(safetitle, safe='?!$\\\\') + '%0A'\n #print(link)\n body += '\\n\\n [Solution]({link})'.format(link=link)\n safetitle = title.replace(' ', '-')\n safetitle = safetitle.replace('`', '')\n index = urlquote(safetitle, safe='?!$\\\\') + '%0A'\n elif 'Solution' in line[0]:\n solnflag = True\n if solnb is None:\n solnb = nf.v4.new_notebook()\n solnb['metadata'] = plain['metadata']\n solnb['cells'].append(nf.v4.new_markdown_cell(source='# Solutions'))\n \n solnb['cells'].append(nf.v4.new_markdown_cell(source=''))\n # REDEFINE c\n solnb['cells'][-1] = c.copy()\n plain['cells'].remove(c)\n c = solnb['cells'][-1]\n \n colour = 'blue'\n symbol = 'eye'\n subtitle = line[0].split(':')\n title = ':'.join(subtitle[1:])\n body = '\\n'.join(line[1:])\n safetitle = title.replace(' ', '-')\n safetitle = safetitle.replace('`', '')\n index = urlquote(safetitle, safe='?!$\\\\') + ' '\n elif 'Key Points' in line[0]:\n colour = 'green'\n symbol = 'key'\n title = line[0].lstrip('#')\n body = '\\n'.join(line[1:])\n safetitle = title.replace(' ', '-')\n safetitle = safetitle.replace('`', '')\n index = urlquote(safetitle, safe='?!$\\\\') + ' '\n elif 'Schedule' in line[0]:\n colour = None\n body = '\\n'.join(line)\n html = nc.filters.markdown2html(body)\n html2 = html.replace('', '
')\n html = html2.replace('', '')\n c['source'] = html\n else:\n colour = None\n \n if colour is not None:\n htmltitle = nc.filters.markdown2html(title)\n temp = htmltitle.replace('

', '')\n htmltitle = temp.replace('

', '')\n \n htmlbody = nc.filters.markdown2html(body)\n temp = htmlbody.replace('*', '*')\n htmlbody = temp.replace('_', '_')\n \n c['source'] = apply_template(colour, symbol, htmltitle, htmlbody, index)\n\ndef navigation_triple(directory, inputfile):\n print('Directory: ', directory)\n print('contains: ')\n contents = os.listdir(directory)\n contents.sort()\n try:\n # Remove checkpoints folder from list\n contents.remove('.ipynb_checkpoints')\n except ValueError:\n pass\n \n # Remove solution files from index\n contents = [f for f in contents if '-soln' not in f]\n \n for afile in contents:\n print(' ', afile)\n \n contents.append(contents[0])\n \n current = inputfile.split('/')[-1]\n # Exceptional case if you're making a new solution document\n if '-soln' in current:\n current = current.replace('-soln','')\n \n index = contents.index(current)\n \n outdir = './'\n print('Navigation triple is: ', outdir+contents[index-1], outdir+contents[0], outdir+contents[index+1])\n triple = { 'previous' : outdir+contents[index-1],\n 'index' : outdir+contents[0],\n 'next' : outdir+contents[index+1] }\n return triple\n\nif args.footercell is not None:\n print('Reading from footercell: ' + args.footercell.name)\n footer = nf.read(args.footercell, nf.NO_CONVERT)\n \n triple = {'index' : './00_schedule.ipynb'} # Prevent error\n if args.sourcedir is not None:\n triple = navigation_triple(args.sourcedir, args.input.name)\n for cell in footer['cells']:\n #print(cell['source'].format_map(triple))\n cell['source'] = cell['source'].format_map(triple)\n \n inputname = './' + args.input.name.split('/')[-1]\n if triple['index'] != inputname:\n plain['cells'].append(*footer['cells'])\n args.footercell.close()\n \noutfp = open(args.output, 'w')\nprint('Writing output file: ' + args.output)\nplain['metadata']['celltoolbar'] = 'None'\nplain['metadata']['livereveal'] = {'scroll' : True}\nnf.write(plain, outfp)\nargs.input.close()\noutfp.close()\n\nif solnflag:\n solfp = open(solnfilename, 'w')\n print('and also solution outputfile')\n solnb['metadata']['celltoolbar'] = 'None'\n #solnb['metadata']['livereveal'] = {\"scroll\" : True}\n nf.write(solnb, solfp)\n solfp.close()\n \n \n","sub_path":"tools/nbfancy.py","file_name":"nbfancy.py","file_ext":"py","file_size_in_byte":7849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"4972448","text":"# vim :set ts=4 sw=4 sts=4 et :\nimport os, sys, signal, time\nfrom nose.tools import ok_, eq_, istest\n\nsys.path.append('..')\n\nimport miniredis.server\nfrom miniredis.client import RedisClient\n\npid = None\nr = None\n\ndef setup_module(module):\n global pid, r\n pid = miniredis.server.fork()\n print(\"Launched server with pid %d.\" % pid)\n time.sleep(1)\n r = RedisClient()\n\ndef teardown_module(module):\n global pid\n os.kill(pid, signal.SIGKILL)\n print(\"Killed server.\")\n\n\ndef test_put():\n eq_(r.set('test:key', 'value'),'OK')\n\ndef test_get():\n eq_(r.get('test:key'),'value')\n\ndef test_del():\n # single key\n eq_(r.delete('test:key'), 1)\n eq_(r.get('test:key'),None)\n # multiple keys\n r.set('test:key1', 'value')\n r.set('test:key2', 'value')\n eq_(r.delete('test:key1', 'test:key2'), 2)\n\ndef test_dump():\n eq_(r.set('test:key','value'), 'OK')\n eq_(r.dump('test:key'),'value')\n\ndef test_exists():\n eq_(r.exists('test:key'), 1)\n eq_(r.exists('test:notthere'), 0)\n\ndef test_expire():\n # missing key\n eq_(r.expire('test:notthere', 2), 0)\n # valid setting\n eq_(r.expire('test:key', 2), 1)\n eq_(r.ttl('test:key'), 2)\n # reset ttl\n eq_(r.set('test:key','value'), 'OK')\n eq_(r.ttl('test:key'), -1)\n\ndef test_expireat():\n # missing key\n at = int(time.time() + 2)\n eq_(r.expireat('test:notthere', at), 0)\n # valid setting\n at = int(time.time() + 2)\n eq_(r.expireat('test:key', at), 1)\n eq_(r.ttl('test:key'), 2)\n # reset ttl\n eq_(r.set('test:key','value'), 'OK')\n eq_(r.ttl('test:key'), -1)\n\ndef test_keys():\n # place a test key\n eq_(r.set('test:key','value'), 'OK')\n eq_(r.keys('*:key'), ['test:key'])\n\n","sub_path":"tests/test_keys.py","file_name":"test_keys.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"259986044","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\n# IDEA\n# Download .csv file from https://www.football-data.co.uk/germanym.php (updated frequently, only contains past matches)\n# add missing matches to database\n# Fetch dates for future matches from https://www.worldfootball.net/all_matches/bundesliga-2019-2020/ (updated realtime, contains all matches)\n# predict them based on database contents\n\ndef fetchseason(season): # season == \"19-20\"\n firstseasonyear = season.split('-')[0]\n secondseasonyear = season.split('-')[1]\n\n pastbettingdata = []\n matchdata = []\n\n #\n # BEGIN past betting data\n #\n\n URL = \"https://www.football-data.co.uk/mmz4281/\"+firstseasonyear+secondseasonyear+\"/D1.csv\" # taken from https://www.football-data.co.uk/germanym.php\n # .../season/[D1|D2].csv #TODO: when D1 and when D2?\n response = requests.get(URL)\n page = response.text\n matches = page.split('\\n')\n matches.pop(0) # remove header line\n for match in matches:\n if match != \"\":\n matchvalues = match.split(',')\n #print(match)\n #print(matchvalues[1]) # date\n #print(matchvalues[2]) # time\n pastbettingdataentry = [matchvalues[1],matchvalues[2],matchvalues[11],matchvalues[13],matchvalues[12],matchvalues[14],matchvalues[23],matchvalues[24],matchvalues[25]]\n # [date ,time ,home-shots ,h-shots-target ,away-shots ,a-shots-target ,odds-home ,odds-draw ,odds-away ]\n pastbettingdata.append(pastbettingdataentry)\n\n #\n # END past betting data\n # BEGIN match data\n #\n\n URL = \"https://www.worldfootball.net/all_matches/bundesliga-20\"+firstseasonyear+\"-20\"+secondseasonyear+\"/\"\n response = requests.get(URL)\n #page = response.text.replace(\"\\t\",\"\") # remove whitespace for better debugging\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n date = \"\" # initialize date at current scope\n for tr in soup.findAll('tr'):\n if len(tr.findAll('form')) == 0 and len(tr.findAll('th')) == 0 and len(tr.findAll('td')) >=5: # only when tr does contain neither form nor tableheader AND contains enough 'td's\n\n if type(tr.findAll('td')[0].a) != type(None): # if (new) date is set\n date = tr.findAll('td')[0].a.string # extract date from html\n\n time = tr.findAll('td')[1].string # extract time from html\n\n firstteam = tr.findAll('td')[2].a['href'].split('/')[2] # extract first team from html\n\n secondteam = tr.findAll('td')[4].a['href'].split('/')[2] # extract second team from html\n\n finalresult = \"\"\n if type(tr.findAll('td')[5].a.string) != type(None) and tr.findAll('td')[5].a.string.strip() != \"-:-\": # if result is not yet added\n result = tr.findAll('td')[5].a.string.split() # extract match results from html\n finalresult = result[0] # extract final match result\n #halftimeresult = result[1].replace('(','').replace(')','') # extract halftime result\n \n if finalresult != \"\": # only add past games for now\n matchdataentry = [date,time,firstteam,secondteam,finalresult] # time is currently unused\n matchdata.append(matchdataentry)\n \n #\n # END match data\n # BEGIN merging\n #\n\n matches = []\n\n for index, pbd in enumerate(pastbettingdata): # only past matches are added for now\n md = matchdata[index] #date,time,firstteam.secondteam,finalresult\n\n hg = md[4].split(':')[0]\n ag = md[4].split(':')[1]\n\n match = {}\n match[\"home-team\"] = md[2] # hometeam\n match[\"away-team\"] = md[3] # awayteam\n date = datetime.strptime(pbd[0], \"%d/%m/%Y\") # get date from string\n datenum = int(datetime.strftime(date, \"%Y%m%d\")) # number from date\n match[\"date\"] = datenum # date\n match[\"odds-home\"] = pbd[6] # odds-home\n match[\"odds-draw\"] = pbd[7] # odds-draw\n match[\"odds-away\"] = pbd[8] # odds-away\n match[\"home-goals\"] = hg # home-goals\n match[\"home-shots\"] = pbd[2] # home-shots\n match[\"home-shots-on-target\"] = pbd[3] # home-shots-on-target\n match[\"away-goals\"] = ag # away-goals\n match[\"away-shots\"] = pbd[4] # away-shots\n match[\"away-shots-on-target\"] = pbd[5] # away-shots-on-target\n\n matches.append(match)\n \n return matches\n\n# testing:\ndef main():\n print(fetchseason(\"19-20\")[0])\nif __name__ == \"__main__\":\n main()","sub_path":"Web/Backend/fetching/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"272517603","text":"import os\nimport cv2\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nfrom bson.objectid import ObjectId\nfrom skimage.transform import resize\nfrom annoy import AnnoyIndex\n\nimport detect_face\nimport facenet\n\n\ndef to_rgb(img):\n w, h = img.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img\n return ret\n\n\nclass Facedetection:\n def __init__(self):\n self.minsize = 20\n self.threshold = [0.6, 0.7, 0.7]\n self.factor = 0.709\n\n # self.minsize = 40 # minimum size of face\n # self.threshold = [0.6, 0.7, 0.9] # three steps's threshold\n # self.factor = 0.709 # scale factor\n\n with tf.Graph().as_default():\n sess = tf.Session()\n with sess.as_default():\n self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(sess, \"data/\")\n\n def detect_face(self, image):\n bounding_boxes, points = detect_face.detect_face(image, self.minsize, self.pnet, self.rnet, self.onet, self.threshold, self.factor)\n return bounding_boxes, points\n\n\nclass facenetEmbedding:\n def __init__(self, model_path):\n self.sess = tf.InteractiveSession()\n self.sess.run(tf.global_variables_initializer())\n\n facenet.load_model(model_path)\n\n self.images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n self.tf_embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n\n def get_embedding(self, images):\n feed_dict = {self.images_placeholder: images, self.phase_train_placeholder: False}\n embedding = self.sess.run(self.tf_embeddings, feed_dict=feed_dict)\n return embedding\n\n def free(self):\n self.sess.close()\n\n\ndef prewhiten(x):\n if x.ndim == 4:\n axis = (1, 2, 3)\n size = x[0].size\n elif x.ndim == 3:\n axis = (0, 1, 2)\n size = x.size\n else:\n raise ValueError('Dimension should be 3 or 4')\n\n mean = np.mean(x, axis=axis, keepdims=True)\n std = np.std(x, axis=axis, keepdims=True)\n std_adj = np.maximum(std, 1.0 / np.sqrt(size))\n y = (x - mean) / std_adj\n return y\n\n\ndef l2_normalize(x, axis=-1, epsilon=1e-10):\n output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\n return output\n\n\ndef calc_embs(imgs, batch_size):\n aligned_images = prewhiten(np.array(imgs))\n\n pd = []\n for start in range(0, len(aligned_images), batch_size):\n pd.append(facenet.predict_on_batch(aligned_images[start:start + batch_size]))\n\n embs = l2_normalize(np.concatenate(pd))\n return embs\n\n\ndef nowTime():\n return int(round(time.time() * 1000))\n\n\nimage_size = 160\n\n# 创建MTCNN网络\nface_detect = Facedetection()\n\n# 初始化facenetEmbedding\nmodel_path = \"data/20180402-114759\"\nface_net = facenetEmbedding(model_path)\n\n\"\"\"\ngroup_id link to index_user_map\n\"\"\"\ngroup_user_map = dict()\n\n\"\"\"\ngroup_id link to face feature index\n\"\"\"\ngroup_index_map = dict()\n\n\ndef clear_ann_index():\n group_index_map.clear()\n group_user_map.clear()\n\n\ndef delete_group_ann_index(group_id):\n group_index_map.pop(group_id, None)\n group_user_map.pop(group_id, None)\n\n\ndef build_group_index(group_id, user_ids, feature_list):\n \"\"\"\n build group feature index\n \"\"\"\n\n print(\"build face index of group_id:\", group_id)\n\n f = 512\n index = AnnoyIndex(f)\n\n for n in range(len(feature_list)):\n features = feature_list[n]\n index.add_item(n, features)\n\n index.build(4)\n\n old_index = group_index_map.get(group_id)\n\n if old_index is not None:\n print(\"old_index:\", old_index)\n\n group_index_map[group_id] = index\n\n index_user_map = dict()\n\n for m in range(len(user_ids)):\n index_user_map[m] = user_ids[m]\n\n old_index_user_map = group_user_map.get(group_id)\n\n if old_index_user_map is not None:\n print(\"old_index_user_map:\", old_index_user_map)\n\n group_user_map[group_id] = index_user_map\n\n\ndef local_load_image(image, log_id):\n tmp_image_path = \"tmp/\" + log_id + \".jpg\"\n # print(\"tmp_image_name:\", tmp_image_path)\n\n start_time = nowTime()\n\n tmp_image_file = open(tmp_image_path, mode='bw+')\n tmp_image_file.write(image)\n tmp_image_file.flush()\n tmp_image_file.close()\n\n loaded_img = cv2.imread(tmp_image_path, cv2.IMREAD_REDUCED_COLOR_2)\n os.remove(tmp_image_path)\n\n end_time = nowTime()\n print(\"save/reload image in \", end_time - start_time, \" ms\")\n\n return loaded_img\n\n\ndef local_get_faces(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n img = to_rgb(gray)\n # img = cv2.cvtColor(loaded_img, cv2.COLOR_BGR2RGB)\n\n start_time = nowTime()\n\n bounding_boxes, points = face_detect.detect_face(img)\n\n end_time = nowTime()\n print(\"face_detect in \", end_time - start_time, \" ms\")\n\n return bounding_boxes, points\n\n\ndef local_get_descriptors(faces):\n start_time = nowTime()\n\n embeddings = face_net.get_embedding(faces)\n\n end_time = nowTime()\n print(\"calc_embs in \", end_time - start_time, \" ms\")\n\n print(\"embeddings.shape:\", embeddings.shape)\n\n return embeddings\n\n\ndef local_ann_search(descriptors, result_n, group_id):\n closest = list()\n\n # group_id link to face feature index\n index = group_index_map[group_id]\n if index is None:\n return closest\n\n start_time = nowTime()\n\n # search face from annoy index tree\n search_k = -1\n\n tmp_closest, distances = index.get_nns_by_vector(descriptors, result_n, search_k, include_distances=True)\n\n print(\"ann_search--closest:\", tmp_closest, \"distances:\", distances)\n\n end_time = nowTime()\n print(\"face search with group_id:\", group_id, \" in \", end_time - start_time, \" ms\")\n\n for i in range(len(tmp_closest)):\n _id = tmp_closest[i]\n distance = distances[i]\n # print(\"distance:\", distance)\n\n # set the max distance\n if distance > 1:\n continue\n\n closest.append(_id)\n\n print(\"search result -- id:\", _id, \" distance:\", distance)\n\n return closest\n\n\ndef user_search(descriptors, result_n, group_ids):\n user_infos = list()\n\n for group_id in group_ids:\n\n user_map = group_user_map.get(group_id)\n\n if user_map is None:\n continue\n\n # print(\"user_map:\", user_map)\n\n closest = local_ann_search(descriptors, result_n, group_id)\n\n for id in closest:\n user_info = dict()\n user_info['group_id'] = group_id\n user_info['user_id'] = user_map.get(id)\n user_info['score'] = 0.0\n\n user_infos.append(user_info)\n\n result_n = result_n - len(closest)\n\n if result_n == 0:\n break\n\n return user_infos\n\n\ndef detect(image, max_face_num):\n\n face_infos = list()\n\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n bounding_boxes, points = local_get_faces(img)\n\n nrof_faces = bounding_boxes.shape[0] # number of faces\n\n if nrof_faces > 0:\n print('找到人脸数目为:{}'.format(nrof_faces))\n\n faces = []\n\n for face_position in bounding_boxes:\n face_position = face_position.astype(int)\n print(\"face_position:\", face_position)\n\n left = max(face_position[0], 0)\n right = max(face_position[2], 0)\n top = max(face_position[1], 0)\n bottom = max(face_position[3], 0)\n\n crop = resize(img[top:bottom, left:right, :], (160, 160), mode='reflect')\n\n faces.append(crop)\n\n embeddings = local_get_descriptors(faces)\n\n for i in range(len(embeddings)):\n\n if i > max_face_num:\n break\n\n face_position = bounding_boxes[i]\n face_position = face_position.astype(int)\n\n left = max(face_position[0], 0)\n right = max(face_position[2], 0)\n top = max(face_position[1], 0)\n bottom = max(face_position[3], 0)\n\n # cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 1)\n # cv2.imwrite(\"tmp/face.jpg\", image)\n\n location = dict()\n location['left'] = float(left)\n location['top'] = float(top)\n location['width'] = float(right - left)\n location['height'] = float(bottom - top)\n location['rotation'] = 0\n\n landmarks = list()\n\n embedding = embeddings[i]\n embedding = embedding.astype(float)\n\n # print(\"embedding type:\", type(embedding))\n\n descriptors = list()\n for n in embedding:\n descriptors.append(n)\n\n face_info = dict()\n face_info['face_token'] = ObjectId().__str__()\n face_info['face_probability'] = 1.0\n face_info['label'] = ''\n face_info['location'] = location\n face_info['landmark'] = landmarks\n face_info['descriptors'] = descriptors\n\n face_infos.append(face_info)\n\n return face_infos\n","sub_path":"face2/face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"565221726","text":"import torch\nimport random\nimport time\nfrom network import ClassificationNetwork, ClassificationNetworkArrow, RegressionNetworkArrow\nfrom load_imitations import load_imitations\nfrom logger import Logger\nimport numpy as np\nimport os\n\nmodel_dict = {\"action_classes\": ClassificationNetwork,\n \"arrow_classes\": ClassificationNetworkArrow,\n \"arrow_regression\": RegressionNetworkArrow}\n\n\ndef cross_entropy_loss(batch_out, batch_gt):\n \"\"\"\n Calculates the cross entropy loss between the prediction of the network and\n the ground truth class for one batch.\n batch_out: torch.Tensor of size (batch_size, number_of_classes)\n batch_gt: torch.Tensor of size (batch_size, number_of_classes)\n return float\n \"\"\"\n\n bs = batch_gt.shape[0]\n batch_gt_indices = batch_gt.argmax(1)\n # print(batch_gt_indices[0], batch_out[0])\n log_likelihood = -torch.log(batch_out[range(bs), batch_gt_indices]+1e-10)\n # print(log_likelihood.shape)\n loss = torch.sum(log_likelihood) / bs\n return loss\n\n\nloss_dict = {\"action_classes\": cross_entropy_loss,\n \"arrow_classes\": torch.nn.BCELoss(),\n \"arrow_regression\": torch.nn.MSELoss()}\n\nnum_classes_dict = {\"action_classes\": 9,\n \"arrow_classes\": 4,\n \"arrow_regression\": 3}\n\noptimizer_dict = {\"adam\": torch.optim.Adam,\n \"sgd\": torch.optim.SGD}\n\n\ndef train(model_dir, model_type, loader_params, num_epochs, device, batch_size, gamma, lr_milestone, save_network_frequency, optimizer, optimizer_params,\n model_params, img_size, img_channels, train_file_suffix):\n \"\"\"\n Function for training the network.\n \"\"\"\n Logger.configure(model_dir, \"tensorboard\")\n logger = Logger(scope='main')\n device = torch.device(device)\n\n train_file = os.path.join(model_dir, train_file_suffix)\n number_of_classes = num_classes_dict[model_type]\n loss_fn = loss_dict[model_type]\n infer_action = model_dict[model_type](number_of_classes=number_of_classes, **model_params)\n infer_action = infer_action.to(device=device)\n optimizer = optimizer_dict[optimizer](infer_action.parameters(), **optimizer_params)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_milestone, gamma=gamma)\n observations, actions = load_imitations(**loader_params)\n observations = [torch.Tensor(observation) for observation in observations]\n actions = [torch.Tensor(action) for action in actions]\n action_classes = infer_action.actions_to_classes(actions)\n\n action_distribution, _ = infer_action.get_action_distribution(action_classes)\n print(action_distribution)\n\n batches = [batch for batch in zip(observations, action_classes)]\n\n\n start_time = time.time()\n\n for epoch in range(num_epochs):\n random.shuffle(batches)\n\n total_loss = 0\n batch_in = []\n batch_gt = []\n batch_out_all = []\n num_batches = 0\n for batch_idx, batch in enumerate(batches):\n batch_in.append(batch[0].to(device))\n batch_gt.append(batch[1].to(device))\n\n if (batch_idx + 1) % batch_size == 0 or batch_idx == len(batches) - 1:\n num_batches += 1\n batch_in = torch.reshape(torch.cat(batch_in, dim=0),\n (-1, img_size, img_size, img_channels))\n batch_gt = torch.reshape(torch.cat(batch_gt, dim=0),\n (-1, number_of_classes))\n\n batch_out = infer_action(batch_in)\n batch_out_all += [output for output in batch_out]\n loss = loss_fn(batch_out, batch_gt)\n logger.log(loss, 'loss')\n # print(batch_out -batch_gt)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_loss += loss\n\n batch_in = []\n batch_gt = []\n total_loss /= num_batches\n scheduler.step()\n\n\n time_per_epoch = (time.time() - start_time) / (epoch + 1)\n time_left = (1.0 * time_per_epoch) * (num_epochs - 1 - epoch)\n print(\"Epoch %5d\\t[Train]\\tloss: %.6f \\tETA: +%fs\" % (\n epoch + 1, total_loss, time_left))\n if (epoch + 1) % save_network_frequency == 0:\n torch.save(infer_action, train_file)\n\n torch.save(infer_action, train_file)\n return total_loss\n\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"375022267","text":"from Vertice import Vertice\nfrom Arista import Arista\nfrom Grafo import Grafo\nfrom Solucion import Solucion\nfrom Tabu import Tabu\nimport random \nimport sys\nimport re\nimport math \nimport copy\nfrom clsTxt import clsTxt\nfrom time import time\n\nclass TSP:\n def __init__(self, M: list, nombreArchivo, solInicial, nroIntercambios, opt, tenureADD, tenureDROP, tiempoEjec, optimo):\n self._G = Grafo(M,0) #Grafo original\n print(\"Se cargo el archivo\")\n self.__soluciones = [] #Lista de Grafos que corresponden a las soluciones\n self.__nroIntercambios=nroIntercambios*2 #corresponde al nro de vertices los intercambios. 1intercambio => 2 vertices\n self.__opt=opt\n self.__optimo = optimo\n self.__tenureADD = tenureADD\n self.__tenureMaxADD = int(tenureADD*1.7)\n self.__tenureDROP = tenureDROP\n self.__tenureMaxDROP = int(tenureDROP*1.7)\n self.__txt = clsTxt(str(nombreArchivo))\n self.__tiempoMaxEjec = float(tiempoEjec)\n self.__frecMatriz = []\n for i in range(0, len(self._G.getMatriz())):\n fila = []\n for j in range(0, len(self._G.getMatriz())):\n fila.append(0)\n j\n self.__frecMatriz.append(fila)\n i\n self.tabuSearch(solInicial)\n\n def vecinoMasCercano(self, matrizDist: list, pos: int, visitados: list):\n masCercano = matrizDist[pos][pos]\n indMasCercano = 0\n \n for i in range(0, len(matrizDist)):\n costo = matrizDist[pos][i]\n if(costo=0):\n mayFrecuencia = -1\n \n #Recorro las aristas de la ultima solucion optima obtenida\n for a in aristasSol:\n vert_Origen = a.getOrigen()\n vert_Destino = a.getDestino()\n frec_Actual = self.__frecMatriz[vert_Origen.getValue()-1][vert_Destino.getValue()-1]\n pertenece = self.pertenListaTabu_TSF(vert_Origen, vert_Destino, lista_tabu)\n \n if(frec_Actual > mayFrecuencia and not pertenece):\n mayFrecuencia = frec_Actual\n vertADD = vert_Origen\n vertDROP = vert_Destino \n #Cargamos los mas frecuentados con un Tenure igual a -1, para que no se eliminen\n if(vertADD != None and vertDROP != None):\n lista_Frecuentados = self.frecuentados(vertADD, vertDROP, lista_tabu)\n #print(\"vertADD: \"+str(vertADD)+\" vertDROP: \"+str(vertDROP)+ \" Max Frecuencia: \"+str(mayFrecuencia))\n #print(\"Lista tabu ahora: \"+str(lista_Frecuentados))\n return lista_Frecuentados\n \n #Si no se cumple, tengo la lista tabu \"llena\"\n #Elimino una cantidad suficiente de la lista Tabu para que permita realizar los intercambios\n lista_Frecuentados = self.borraFrecuentados(lista_tabu)\n \n #print(\"Lista tabu ahora: \"+str(lista_Frecuentados))\n \n return lista_Frecuentados\n\n #Devuelve los frecuentados\n def frecuentados(self, vert_ADD, vert_DROP, lista_tabu):\n lista_Frecuentados = []\n for x in lista_tabu:\n valor = x.getElemento().getValue()\n if(valor != vert_ADD.getValue() and valor != vert_DROP.getValue()):\n lista_Frecuentados.append(x)\n\n if(vert_ADD.getValue()!= 1):\n Tabu_ADD = Tabu(vert_ADD, -1)\n lista_Frecuentados.append(Tabu_ADD)\n \n if(vert_DROP.getValue()!= 1):\n Tabu_DROP = Tabu(vert_DROP, -1)\n lista_Frecuentados.append(Tabu_DROP)\n\n return lista_Frecuentados\n\n #Pertenece o no a la lista tabu\n def pertenListaTabu_TSF(self, v1, v2, lista_tabu):\n lista_ElementosTabu = []\n e1 = v1.getValue()\n e2 = v2.getValue()\n \n for x in lista_tabu:\n elem = int(x.getElemento().getValue())\n lista_ElementosTabu.append(elem)\n \n return (e1 in lista_ElementosTabu) or (e2 in lista_ElementosTabu)\n\n #Borro una cantidad necesaria para realizar los Swapp proximos\n def borraFrecuentados(self, lista_tabu):\n #Borramos al azar\n indices_azar = random.sample(range(0,len(lista_tabu)), 4)\n \n ADD = None\n DROP = None\n print(\"Lista de frecuentados llena. Borramos algunos\")\n for ind in indices_azar:\n lista_tabu[ind].setTenure(1)\n if(ADD == None):\n ADD = lista_tabu[ind].getElemento().getValue() -1\n elif(DROP == None):\n DROP = lista_tabu[ind].getElemento().getValue() -1\n else:\n self.__frecMatriz[int(ADD)][int(DROP)] = 0\n ADD = None\n DROP = None\n self.decrementaTenure(lista_tabu)\n\n return lista_tabu\n\n ####### Empezamos con Tabu Search #########\n def tabuSearch(self, strSolInicial):\n lista_tabu = [] #Tiene objetos de la clase Tabu\n lista_permit = [] #Tiene objetos del tipo vertice \n g1 = self._G.copyVacio() #La primera solucion corresponde a g1\n \n if(strSolInicial==\"Vecino mas cercano\"):\n ########Partimos del vecino mas cercano###########\n print(\"Soluncion inicial por Vecino mas cercano\")\n vecinosCercanos = self.solucionVecinosCercanos() #Obtiene un vector de vértices\n g1.cargarDesdeSecuenciaDeVertices(vecinosCercanos) #Carga el recorrido a la solución\n else:\n ########Partimos de una solucion al Azar#############\n print(\"Solucion inicial al azar\")\n solucionAzar = self.solucionAlAzar()\n g1.cargarDesdeSecuenciaDeVertices(solucionAzar)\n\n self.__soluciones.append(g1) #Agregar solución inicial\n self.incrementaFrecuencia(g1)\n \n print(\"Comenzando Tabu Search\")\n self.__txt.escribir(\"############### GRAFO CARGADO #################\")\n self.__txt.escribir(str(self._G))\n self.__txt.escribir(\"################ SOLUCION INICIAL #################\")\n self.__txt.escribir(\"Vertices: \" + str(g1.getV()))\n self.__txt.escribir(\"Aristas: \" + str(g1.getA()))\n self.__txt.escribir(\"Costo asociado: \" + str(g1.getCostoAsociado()))\n \n ############## Atributos ################\n #Soluciones a utilizar\n Sol_Actual = self._G.copyVacio()\n Sol_Actual = self.__soluciones[len(self.__soluciones)-1] #La actual es la Primera solución\n Sol_Optima = copy.deepcopy(Sol_Actual) #Ultima solucion optima obtenida, corresponde a la primera Solucion\n \n #Atributos banderas utilizados\n condOptim = False #En caso de que encontre uno mejor que el optimo lo guardo en el archivo txt\n condTS_Frecuencia = False #Empezamos a utilizar las aristas mas frecuentadas\n cond_3opt = False\n cond_4opt = False\n\n if(self.__opt == \"3-opt\"):\n cond_3opt = True\n print(\"Movimiento: 3-opt\")\n\n #Atributos de tiempo y otros\n tiempoIni = time()\n tiempoIniEstancamiento = tiempoIni \n tiempoIniNoMejora = tiempoIni\n tiempoMax = float(self.__tiempoMaxEjec*60)\n tiempoEjecuc = 0\n iterac = 1\n \n #Duarnte 1min de no mejora o si es demasiado, la 1/5 parte del tiempo\n tiempoMaxNoMejora = 2*60\n if(tiempoMaxNoMejora > tiempoMax/4):\n tiempoMaxNoMejora = float(tiempoMax/4) #La 1/5 parte del tiempo, en caso de que los 1min sea demasiado\n\n print(\"Tiempo maximo: \"+str(int(tiempoMax/60))+\"min \"+str(int(tiempoMax%60))+\"seg\")\n print(\"Tiempo maximo estancamiento: \"+str(int(tiempoMaxNoMejora/60))+\"min \"+str(int(tiempoMaxNoMejora%60))+\"seg\")\n print(\"Optimo real: \"+str(self.__optimo))\n print(\"Solucion inicial: \"+str(Sol_Optima.getCostoAsociado()))\n\n nroIntercambios = 2 #Empezamos con 2 al inicio\n while(tiempoEjecuc <= tiempoMax):\n lista_permit = self.pertenListaTabu(lista_tabu) #Lista de elementos que no son tabu\n ADD = []\n DROP = []\n \n #Verifico si hay vertices disponibles suficientes para el intercambio\n if(len(lista_permit)>=4):\n #Controla que el nro de intercambios no supere la longitud de permitidos\n if(len(lista_permit) tiempoMaxNoMejora): \n tiempoTotal = time()-tiempoIniEstancamiento\n print(\"\\nDurante \" + str(int(tiempoTotal/60))+\"min \"+str(int(tiempoTotal%60))+\"seg no hubo mejora\")\n print(\"Tiempo restante: \"+str(int(tiempoRestante/60))+\"min \"+str(int(tiempoRestante%60))+ \"seg\")\n \n print(\"\\nAplicamos frecuencia de aristas mas visitadas\")\n lista_tabu = self.TS_Frecuencia(Sol_Optima, lista_tabu, nroIntercambios) \n lista_permit = self.pertenListaTabu(lista_tabu)\n condTS_Frecuencia = not condTS_Frecuencia\n \n #Se intercambia movimientos entre 2-opt, 3-opt y 4-opt\n if(not cond_3opt and not cond_4opt):\n print(\"Aplicamos movimientos 3-opt\")\n cond_3opt = True\n else:\n cond_3opt = False\n if(not cond_4opt):\n print(\"Aplicamos movimientos 4-opt v2\")\n cond_4opt = True\n elif(nroIntercambios < self.__nroIntercambios):\n nroIntercambios += 2\n cond_4opt = False\n print(\"Aplicamos movimientos 4-opt v1\")\n else:\n cond_4opt = False\n nroIntercambios = 2\n print(\"Aplicamos movimientos 2-opt\")\n\n #Obtengo 2/3 de lo que resta de tiempo, para que la proxima vez ingrese en menor tiempo cuando no hay mejoria\n #solo en caso de que el tiempo restante sea menor al tiempo MaxNoMejora, ya que si no, no habra una proxima\n #vez en que se estanque\n if(tiempoRestante < tiempoMaxNoMejora and not cond_3opt):\n tiempoMaxNoMejora = tiempoRestante*2/3\n elif(tiempoMaxNoMejora > 20 and not cond_3opt): #Mayor que 20seg\n tiempoMaxNoMejora = tiempoMaxNoMejora*0.75\n\n tiempoIniEstancamiento=time() #Reiniciamos el tiempo de No mejora\n \n ######### Tabu Search Granular ########## \n if(cond_3opt):\n #3-opt\n ind_random = random.sample(range(0,len(lista_permit)),1)\n ind_random = self.vecinosMasCercanosTSG(ind_random, lista_permit, Sol_Optima.getV())\n ind_aux = self.vecinosMasCercanosTSG(ind_random, lista_permit, Sol_Optima.getV())\n ind_random.append(ind_aux[-1])\n elif(cond_4opt):\n #4-opt\n ind_random = random.sample(range(0,len(lista_permit)),2)\n ind_random = self.vecinosMasCercanosTSG(ind_random, lista_permit, Sol_Optima.getV())\n else:\n #2-opt \n ind_random = random.sample(range(0,len(lista_permit)),int(nroIntercambios/2))\n ind_random = self.vecinosMasCercanosTSG(ind_random, lista_permit, Sol_Optima.getV())\n \n #Crea los elementos ADD y DROP\n for i in range(0,len(ind_random)):\n if(i%2==0): #Los pares para ADD y los impares para DROP\n ADD.append(Tabu(lista_permit[ind_random[i]], self.__tenureADD))\n else:\n DROP.append(Tabu(lista_permit[ind_random[i]], self.__tenureDROP))\n\n #Realiza el intercambio de los vertices seleccionados\n if(cond_3opt):\n #3-opt\n Sol_Actual = Sol_Actual.swap_3opt(ADD[0].getElemento(), DROP[0].getElemento(), ADD[1].getElemento())\n elif(cond_4opt):\n #4-opt v2\n Sol_Actual = Sol_Actual.swap_4opt(ADD[0].getElemento(), DROP[0].getElemento(), ADD[1].getElemento(), DROP[1].getElemento())\n else:\n #2-opt y 4-opt v1\n for i in range(0,len(ADD)):\n Sol_Actual = Sol_Actual.swapp(ADD[i].getElemento(), DROP[i].getElemento())\n \n #Si obtengo una nueva solucion optima\n if(Sol_Actual < Sol_Optima):\n Sol_Optima = Sol_Actual #Actualizo la solucion optima\n self.incrementaFrecuencia(Sol_Optima) #Incrementa Frecuencia de Aristas visitadas\n \n condOptim = True\n \n tiempoTotal = time() - tiempoIniNoMejora\n print(\"La solución anterior duró \" + str(int(tiempoTotal/60))+\"min \"+str(int(tiempoTotal%60))+\"seg -------> Nuevo optimo encontrado. Costo: \"+str(Sol_Optima.getCostoAsociado()))\n \n self.__soluciones.append(Sol_Actual) #Cargo las soluciones optimas\n tiempoIniEstancamiento=time()\n tiempoIniNoMejora = time()\n\n #Actualizo el tenure con el tenureMax de ADD y DROP\n for i in range(0,len(ADD)):\n if(i> '+'🦠 '+ flag + ' ' + f'Covid-19 {country} Report\\n>> {date_str}\\n')\n\t\tprint(R + '>> Today:' + W)\n\t\tprint(f'>> Cases today in {country}: ' + R + f'{my_info[0] [\"todayCases\"]}' + W)\n\t\tprint(f'>> Deaths today in {country}: ' + R + f'{my_info[0] [\"todayDeaths\"]}\\n' + W)\n\n\t\tprint(R + '>>Totals:' + W)\n\t\tprint(f'>> Cases so far in {country}: ' + R + f'{my_info[0] [\"cases\"]}' + W)\n\t\tprint(f'>> Deaths so far in {country}: ' + R + f'{my_info[0] [\"deaths\"]}\\n' + W)\n\t\t\n\t\tprint(f'>> Active in {country}: ' + R + f'{my_info[0] [\"active\"]}' + W)\n\t\tprint(f'>> Critical in {country}: ' + R + f'{my_info[0] [\"critical\"]}' + W)\n\t\tprint(f'>> Recovered so far in {country}: ' + R + f'{my_info[0] [\"recovered\"]}\\n' + W)\n\n\t\tprint(f'>> Tested in {country}: ' + R + f'{my_info[0] [\"tests\"]}\\n' + W)\n\n\t\tprint(f'>> Cases per million in {country}: ' + R + f'{my_info[0] [\"casesPerOneMillion\"]}' + W)\n\t\tprint(f'>> Deaths per million in {country}: ' + R + f'{my_info[0] [\"deathsPerOneMillion\"]}' + W)\n\t\tprint(f'>> Tests per million in {country}: ' + R + f'{my_info[0] [\"testsPerOneMillion\"]}\\n' + W)\n\t\t\n\t\tpercent = (int(my_info[0] [\"deaths\"]) / int(my_info[0] [\"cases\"])) * 100\n\n\t\tprint(R + '>> Percent: ' + str(\"%.2f\" % percent) + '%\\n' + W)\n\telse:\n\t\tprint(f'No data for {country}')\n\t\n\tgo_again = input('Wanna go again? (y/n)').title()\n\tif go_again != 'N':\n\t\tanother = True\n\telse:\n\t\tanother = False\n\t\tbanner()\n\t\tprint('See you soon.\\nStay Safe.\\n\\n#stayTheFuckHome\\n'+'🦠 🇺🇳 🏳️‍🌈\\n\\n')","sub_path":"covid.py","file_name":"covid.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"193299226","text":"import os\r\nimport io\r\nimport boto3\r\nimport json\r\nimport csv\r\nimport botocore\r\nimport sys\r\nimport uuid\r\nfrom urllib.parse import unquote_plus\r\n\r\n# model endpoint and result file location are stored in environment variables\r\nENDPOINT_NAME = os.environ['ENDPOINT_NAME']\r\nRESULT_BUCKET = os.environ['RESULT_BUCKET']\r\nRESULT_FILE_KEY = os.environ['RESULT_FILE_KEY']\r\n\r\nruntime= boto3.client('runtime.sagemaker')\r\ns3_client = boto3.client('s3')\r\n\r\ndef lambda_handler(event, context):\r\n print(\"Received event: \" + json.dumps(event, indent=2))\r\n \r\n # for the purposes of proving the idea, the classifications \r\n # are logged in a file on S3 itelf. This is not efficient \r\n # with respect to S3 because every time a bird is detected I \r\n # am doing a pull and put to read and update, but it is convenient\r\n # for seeing the results and for a few hundred images it is acceptable\r\n \r\n # download the results file from S3 or create it if it can't be downloaded. \r\n # The bucket name is defined by the trigger that calls this (the trigger \r\n # is on a specific bucket)\r\n result_filename = '/tmp/bird-classification-results.txt'\r\n try:\r\n s3_client.download_file(RESULT_BUCKET, RESULT_FILE_KEY, result_filename)\r\n except botocore.exceptions.ClientError as e:\r\n if e.response['Error']['Code'] == \"404\":\r\n print(\"Result file not found in result bucket.\")\r\n else:\r\n raise \r\n \r\n result_file = open(result_filename, 'at') \r\n \r\n # for every file reported by the event, download the file, pass\r\n # the contents to the model, and append the results to the log file\r\n for record in event['Records']:\r\n bucket = record['s3']['bucket']['name']\r\n key = unquote_plus(record['s3']['object']['key'])\r\n print(bucket, key)\r\n \r\n download_path = '/tmp/{}{}'.format(uuid.uuid4(), key)\r\n s3_client.download_file(bucket, key, download_path)\r\n \r\n # open the downloaded file and pass contents to model\r\n with open(download_path, 'rb') as f:\r\n payload = f.read()\r\n payload = bytearray(payload)\r\n \r\n print(payload)\r\n \r\n response = runtime.invoke_endpoint(EndpointName=ENDPOINT_NAME,\r\n ContentType='application/x-image',\r\n Body = payload)\r\n \r\n result = json.loads(response['Body'].read().decode())\r\n print(result)\r\n \r\n w = csv.writer(result_file)\r\n w.writerow(result)\r\n \r\n result_file.close()\r\n s3_client.upload_file(result_filename, RESULT_BUCKET, \r\n 'bird-classification-results.txt')","sub_path":"File_put_Lambda.py","file_name":"File_put_Lambda.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"82416726","text":"\"\"\" ConfD/CSP services \"\"\"\n\n__author__ = \"Dave Wapstra \"\n\n\nimport re\nimport collections\nfrom time import sleep\n\nfrom unicon.bases.routers.services import BaseService\nfrom unicon.core.errors import SubCommandFailure, StateMachineError\nfrom unicon.eal.dialogs import Dialog, Statement\n\nfrom unicon.plugins.generic.statements import GenericStatements, \\\n authentication_statement_list\nfrom unicon.plugins.confd.patterns import ConfdPatterns\nfrom unicon.plugins.generic import GenericUtils\n\nfrom .service_statements import reload_statement_list, \\\n reload_continue_statement_list\n\n\nutils = GenericUtils()\nstatements = GenericStatements()\n\n\nclass Reload(BaseService):\n \"\"\"Service to reload the device.\n\n Arguments:\n reload_command: reload command to be issued. default is\n \"system reload\" on config mode.\n dialog: Dialog which include list of Statements for\n additional dialogs prompted by reload command, in-case\n it is not in the current list.\n timeout: Timeout value in sec, Default value is {} sec\n\n Returns:\n Console log output of connected via serial console,\n if connected via SSH returns connect log\n raises SubCommandFailure on failure\n\n Example ::\n .. code-block:: python\n\n csp.reload()\n\n \"\"\"\n\n def __init__(self, connection, context, **kwargs):\n super().__init__(connection, context, **kwargs)\n self.start_state = 'cisco_exec'\n self.end_state = 'cisco_exec'\n self.timeout = connection.settings.RELOAD_TIMEOUT\n self.__doc__ = self.__doc__.format(connection.settings.RELOAD_TIMEOUT)\n\n def call_service(self,\n reload_command='system reboot',\n dialog=Dialog([]),\n timeout=None,\n *args, **kwargs):\n con = self.connection\n timeout = timeout or self.timeout\n\n fmt_msg = \"+++ reloading %s \" \\\n \" with reload_command '%s' \" \\\n \"and timeout %s +++\"\n con.log.info(fmt_msg % (self.connection.hostname,\n reload_command,\n timeout))\n\n if not isinstance(dialog, Dialog):\n raise SubCommandFailure(\n \"dialog passed must be an instance of Dialog\")\n\n if self.context.get('console'):\n dialog = self.service_dialog(service_dialog=dialog)\n dialog += Dialog(authentication_statement_list)\n dialog += Dialog(reload_continue_statement_list)\n con.spawn.sendline(reload_command)\n try:\n self.result = dialog.process(con.spawn,\n timeout=timeout,\n prompt_recovery=self.prompt_recovery,\n context=self.context)\n except Exception as err:\n raise SubCommandFailure(\"Reload failed %s\" % err)\n\n if self.result:\n self.result = utils.remove_ansi_escape_codes(self.result.match_output)\n else:\n con.log.warning('Did not detect a console session, will try to reconnect...')\n dialog = Dialog(reload_statement_list)\n con.spawn.sendline(reload_command)\n dialog.process(con.spawn,\n timeout=timeout,\n prompt_recovery=self.prompt_recovery,\n context=self.context)\n con.expect('.+')\n con.log.warning('Disconnecting...')\n con.disconnect()\n for x in range(3):\n con.log.warning('Waiting for {} seconds'.format(con.settings.RELOAD_WAIT))\n sleep(con.settings.RELOAD_WAIT)\n con.log.warning('Trying to connect... attempt #{}'.format(x+1))\n try:\n output = con.connect()\n self.result = output\n except:\n con.log.warning('Connection failed')\n if con.connected:\n break\n\n if not con.connected:\n raise SubCommandFailure('Reload failed - could not reconnect')\n","sub_path":"src/unicon/plugins/confd/csp/service_implementation.py","file_name":"service_implementation.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"371771080","text":"#!/usr/bin/env python\n\"\"\"The file finder client action.\"\"\"\n\nimport abc\nimport collections\nimport errno\nimport fnmatch\nimport itertools\nimport logging\nimport os\nimport platform\nimport re\n\nimport psutil\n\nfrom grr.client import actions\nfrom grr.client import client_utils\nfrom grr.client import client_utils_common\nfrom grr.client import streaming\nfrom grr.client.vfs_handlers import files\n\nfrom grr.lib import utils\nfrom grr.lib.rdfvalues import client as rdf_client\nfrom grr.lib.rdfvalues import file_finder as rdf_file_finder\nfrom grr.lib.rdfvalues import paths as rdf_paths\n\n\nclass _SkipFileException(Exception):\n pass\n\n\nclass Component(object):\n \"\"\"A component of a path.\"\"\"\n\n def __hash__(self):\n return hash(self.__str__())\n\n def Generate(self, base_path):\n raise NotImplementedError()\n\n\nclass RecursiveComponent(Component):\n \"\"\"A recursive component.\"\"\"\n\n def __init__(self, depth, follow_links=False, mountpoints_blacklist=None):\n self.depth = depth\n self.follow_links = follow_links\n self.mountpoints_blacklist = mountpoints_blacklist\n\n def Generate(self, base_path):\n for f in self._Generate(base_path, []):\n yield f\n\n def _Generate(self, base_path, relative_components):\n \"\"\"Generates the relative filenames.\"\"\"\n\n new_base = os.path.join(base_path, *relative_components)\n if not relative_components:\n yield new_base\n try:\n filenames = os.listdir(new_base)\n except OSError as e:\n if e.errno == errno.EACCES: # permission denied.\n logging.info(e)\n return\n\n for f in filenames:\n new_components = relative_components + [f]\n relative_name = os.path.join(*new_components)\n yield relative_name\n\n if len(new_components) >= self.depth:\n continue\n\n filename = os.path.join(base_path, relative_name)\n try:\n stat = utils.Stat(filename)\n if not stat.IsDirectory():\n continue\n if filename in self.mountpoints_blacklist:\n continue\n if (not self.follow_links and\n utils.Stat(filename, follow_symlink=False).IsSymlink()):\n continue\n for res in self._Generate(base_path, new_components):\n yield res\n except OSError as e:\n if e.errno not in [errno.ENOENT, errno.ENOTDIR, errno.EINVAL]:\n logging.info(e)\n\n def __str__(self):\n return \"%s:%s\" % (self.__class__, self.depth)\n\n\nclass RegexComponent(Component):\n \"\"\"A component matching the file name against a regex.\"\"\"\n\n def __init__(self, regex):\n self.regex = re.compile(regex, flags=re.I)\n\n def Generate(self, base_path):\n try:\n for f in os.listdir(base_path):\n if self.regex.match(f):\n yield f\n except OSError as e:\n if e.errno == errno.EACCES: # permission denied.\n logging.error(e)\n\n def __str__(self):\n return \"%s:%s\" % (self.__class__, self.regex)\n\n\nclass LiteralComponent(Component):\n \"\"\"A component matching literal names.\"\"\"\n\n def __init__(self, literal):\n self.literal = literal\n\n def Generate(self, base_path):\n corrected_matches = []\n literal_lower = self.literal.lower()\n\n try:\n for c in os.listdir(base_path):\n # Perfect match.\n if c == self.literal:\n yield self.literal\n return\n\n # Case correction.\n if c.lower() == literal_lower:\n corrected_matches.append(c)\n\n for m in corrected_matches:\n yield m\n\n except OSError as e:\n if e.errno == errno.EACCES: # permission denied.\n logging.error(e)\n\n def __str__(self):\n return \"%s:%s\" % (self.__class__, self.literal)\n\n\nclass FileFinderOS(actions.ActionPlugin):\n \"\"\"The file finder implementation using the OS file api.\"\"\"\n\n in_rdfvalue = rdf_file_finder.FileFinderArgs\n out_rdfvalues = [rdf_file_finder.FileFinderResult]\n\n # This regex finds grouping patterns\n # (e.g. {test.exe,foo.doc,bar.txt}) and interpolations\n # (%%users.homedir%%).\n GROUPING_PATTERN = re.compile(\"({([^}]+,[^}]+)}|%%([^%]+?)%%)\")\n\n # This regex finds recursions (C:\\**, /usr/bin/**2).\n RECURSION_REGEX = re.compile(r\"\\*\\*(\\d*)\")\n\n # A regex indicating if there are shell globs in this path.\n GLOB_MAGIC_CHECK = re.compile(\"[*?[]\")\n\n def Run(self, args):\n self.follow_links = args.follow_links\n self.process_non_regular_files = args.process_non_regular_files\n\n self.stat_cache = utils.StatCache()\n\n # Generate a list of mount points where we stop recursive searches.\n if args.xdev == args.XDev.NEVER:\n # Never cross device boundaries, stop at all mount points.\n self.mountpoints_blacklist = set(\n [p.mountpoint for p in psutil.disk_partitions(all=True)])\n elif args.xdev == args.XDev.LOCAL:\n # Descend into file systems on physical devices only.\n self.mountpoints_blacklist = (\n set([p.mountpoint for p in psutil.disk_partitions(all=True)]) -\n set([p.mountpoint for p in psutil.disk_partitions(all=False)]))\n elif args.xdev == args.XDev.ALWAYS:\n # Never stop at any device boundary.\n self.mountpoints_blacklist = set()\n\n for fname in self.CollectGlobs(args.paths):\n self.Progress()\n try:\n matches = self._Validate(args, fname)\n result = self._ProcessFile(args, fname)\n result.matches = matches\n self.SendReply(result)\n except _SkipFileException:\n pass\n\n def _GetStat(self, filepath, follow_symlink=True):\n try:\n return self.stat_cache.Get(filepath, follow_symlink=follow_symlink)\n except OSError:\n raise _SkipFileException()\n\n def _Validate(self, args, filepath):\n matches = []\n self._ValidateRegularity(args, filepath)\n self._ValidateMetadata(args, filepath)\n self._ValidateContent(args, filepath, matches)\n return matches\n\n def _ValidateRegularity(self, args, filepath):\n stat = self._GetStat(filepath, follow_symlink=False)\n\n is_regular = stat.IsRegular() or stat.IsDirectory()\n if not is_regular and not args.process_non_regular_files:\n raise _SkipFileException()\n\n def _ValidateMetadata(self, args, filepath):\n stat = self._GetStat(filepath, follow_symlink=False)\n\n for metadata_condition in MetadataCondition.Parse(args.conditions):\n if not metadata_condition.Check(stat):\n raise _SkipFileException()\n\n def _ValidateContent(self, args, filepath, matches):\n for content_condition in ContentCondition.Parse(args.conditions):\n result = list(content_condition.Search(filepath))\n if not result:\n raise _SkipFileException()\n matches.extend(result)\n\n def _ProcessFile(self, args, fname):\n if args.action.action_type == args.action.Action.STAT:\n return self._ExecuteStat(fname, args)\n\n # For directories, only Stat makes sense.\n if self._GetStat(fname, follow_symlink=True).IsDirectory():\n raise _SkipFileException()\n\n if args.action.action_type == args.action.Action.DOWNLOAD:\n return self._ExecuteDownload(fname, args)\n\n if args.action.action_type == args.action.Action.HASH:\n return self._ExecuteHash(fname, args)\n\n raise ValueError(\"incorrect action type: %s\" % args.action.action_type)\n\n def _ExecuteStat(self, fname, args):\n stat_entry = self.Stat(fname, args.action.stat)\n return rdf_file_finder.FileFinderResult(stat_entry=stat_entry)\n\n def _ExecuteDownload(self, fname, args):\n stat_opts = rdf_file_finder.FileFinderStatActionOptions(\n resolve_links=True,\n collect_ext_attrs=args.action.download.collect_ext_attrs)\n\n stat_entry = self.Stat(fname, stat_opts)\n uploaded_file = self.Upload(fname, args.action.download)\n if uploaded_file:\n uploaded_file.stat_entry = stat_entry\n\n return rdf_file_finder.FileFinderResult(\n stat_entry=stat_entry, uploaded_file=uploaded_file)\n\n def _ExecuteHash(self, fname, args):\n stat_opts = rdf_file_finder.FileFinderStatActionOptions(\n resolve_links=True,\n collect_ext_attrs=args.action.hash.collect_ext_attrs)\n\n stat_entry = self.Stat(fname, stat_opts)\n hash_entry = self.Hash(fname, args.action.hash)\n return rdf_file_finder.FileFinderResult(\n stat_entry=stat_entry, hash_entry=hash_entry)\n\n def Stat(self, fname, opts):\n stat = self._GetStat(fname, follow_symlink=opts.resolve_links)\n pathspec = rdf_paths.PathSpec(\n pathtype=rdf_paths.PathSpec.PathType.OS,\n path=client_utils.LocalPathToCanonicalPath(fname),\n path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)\n return files.MakeStatResponse(\n stat, pathspec=pathspec, ext_attrs=opts.collect_ext_attrs)\n\n def Hash(self, fname, opts):\n file_size = self._GetStat(fname, follow_symlink=True).GetSize()\n if file_size <= opts.max_size:\n max_hash_size = file_size\n else:\n policy = rdf_file_finder.FileFinderHashActionOptions.OversizedFilePolicy\n if opts.oversized_file_policy == policy.SKIP:\n return None\n elif opts.oversized_file_policy == policy.HASH_TRUNCATED:\n max_hash_size = opts.max_size\n\n hasher = client_utils_common.MultiHasher(progress=self.Progress)\n try:\n hasher.HashFilePath(fname, max_hash_size)\n except IOError:\n return None\n return hasher.GetHashObject()\n\n def Upload(self, fname, opts):\n file_size = self._GetStat(fname, follow_symlink=True).GetSize()\n max_bytes = None\n if file_size > opts.max_size:\n policy = opts.oversized_file_policy\n policy_enum = opts.OversizedFilePolicy\n if policy == policy_enum.DOWNLOAD_TRUNCATED:\n max_bytes = opts.max_size\n elif policy == policy_enum.SKIP:\n return None\n else:\n raise ValueError(\"Unknown oversized file policy %s.\" % int(policy))\n\n uploaded_file = self.grr_worker.UploadFile(\n open(fname, \"rb\"),\n opts.upload_token,\n max_bytes=max_bytes,\n network_bytes_limit=self.network_bytes_limit,\n session_id=self.session_id,\n progress_callback=self.Progress)\n return uploaded_file\n\n def CollectGlobs(self, globs):\n expanded_globs = {}\n for glob in globs:\n initial_component, path = self._SplitInitialPathComponent(\n utils.SmartUnicode(glob))\n expanded_globs.setdefault(initial_component, []).extend(\n self._InterpolateGrouping(path))\n\n component_tree = {}\n for initial_component, glob_list in expanded_globs.iteritems():\n for glob in glob_list:\n node = component_tree.setdefault(initial_component, {})\n for component in self._ConvertGlobIntoPathComponents(glob):\n node = node.setdefault(component, {})\n\n for initial_component in component_tree:\n for f in self._TraverseComponentTree(component_tree[initial_component],\n initial_component):\n yield f\n\n def _SplitInitialPathComponent(self, path):\n r\"\"\"Splits off the initial component of the given path.\n\n This function is needed since on Windows, the first component of a\n path (usually indicating a drive) needs to be treated\n specially. Even though there are many ways of specifying paths on\n Windows, we only support the syntax c:\\file.\n\n Args:\n path: The path to split.\n Returns:\n A tuple, first component and remainder.\n Raises:\n ValueError: The path format was not understood.\n \"\"\"\n\n if platform.system() != \"Windows\":\n return u\"/\", path\n\n # In case the path start with: C:\n if len(path) >= 2 and path[1] == u\":\":\n # A backslash is needed after the drive letter to make this an\n # absolute path. Also, we always capitalize the drive letter.\n return path[:2].upper() + u\"\\\\\", path[2:].lstrip(u\"\\\\\")\n\n raise ValueError(\"Can't handle path: %s\" % path)\n\n def _TraverseComponentTree(self, component_tree, base_path):\n\n for component, subtree in component_tree.iteritems():\n for f in component.Generate(base_path):\n if subtree:\n for res in self._TraverseComponentTree(subtree,\n os.path.join(base_path, f)):\n yield res\n else:\n yield os.path.join(base_path, f)\n\n def _InterpolateGrouping(self, pattern):\n \"\"\"Takes the pattern and splits it into components.\n\n Each grouping pattern is expanded into a set:\n /foo{a,b}/bar -> [\"/foo\", set([\"a\", \"b\"]), \"/bar\"]\n\n Raises:\n ValueError: Unknown pattern or interpolation.\n NotImplementedError: The pattern is using knowledgebase interpolations,\n they are not implemented client side yet.\n Args:\n pattern: list of patterns.\n Returns:\n A list of interpolated patterns.\n \"\"\"\n result = []\n components = []\n offset = 0\n for match in self.GROUPING_PATTERN.finditer(pattern):\n match_str = match.group(0)\n # Alternatives.\n if match_str.startswith(u\"{\"):\n components.append([pattern[offset:match.start()]])\n\n # Expand the attribute into the set of possibilities:\n alternatives = match.group(2).split(u\",\")\n components.append(set(alternatives))\n offset = match.end()\n\n # KnowledgeBase interpolation.\n elif match_str.startswith(u\"%\"):\n raise NotImplementedError(\"Client side knowledgebase not available.\")\n\n else:\n raise ValueError(\"Unknown interpolation %s\" % match.group(0))\n\n components.append([pattern[offset:]])\n # Now calculate the cartesian products of all these sets to form all\n # strings.\n for vector in itertools.product(*components):\n result.append(u\"\".join(vector))\n\n # These should be all possible patterns.\n # e.g. /fooa/bar , /foob/bar\n return result\n\n def _ConvertGlobIntoPathComponents(self, pattern):\n r\"\"\"Converts a glob pattern into a list of components.\n\n Wildcards are also converted to regular expressions. The\n components do not span directories, and are marked as a regex or a\n literal component.\n We also support recursion into directories using the ** notation. For\n example, /home/**2/foo.txt will find all files named foo.txt recursed 2\n directories deep. If the directory depth is omitted, it defaults to 3.\n Example:\n /home/**/*.exe -> [{type: \"LITERAL\", path: \"home\"},\n {type: \"RECURSIVE\"},\n {type: \"REGEX\", path: \".*\\\\.exe\\\\Z(?ms)\"}]]\n Args:\n pattern: A glob expression with wildcards.\n Returns:\n A list of Components.\n Raises:\n ValueError: If the glob is invalid.\n \"\"\"\n components = []\n recursion_count = 0\n for path_component in pattern.split(os.path.sep):\n if not path_component:\n continue\n\n m = self.RECURSION_REGEX.search(path_component)\n if m:\n recursion_count += 1\n if recursion_count > 1:\n raise ValueError(\"Pattern cannot have more than one recursion.\")\n\n if m.group(0) != path_component:\n raise ValueError(\"Can't have combined recursive search and regex.\")\n\n depth = 3\n\n # Allow the user to override the recursion depth.\n if m.group(1):\n depth = int(m.group(1))\n\n component = RecursiveComponent(\n depth=depth,\n follow_links=self.follow_links,\n mountpoints_blacklist=self.mountpoints_blacklist)\n\n elif self.GLOB_MAGIC_CHECK.search(path_component):\n component = RegexComponent(fnmatch.translate(path_component))\n\n else:\n component = LiteralComponent(path_component)\n\n components.append(component)\n\n return components\n\n\nclass MetadataCondition(object):\n \"\"\"An abstract class representing conditions on the file metadata.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def Check(self, stat):\n \"\"\"Checks whether condition is met.\n\n Args:\n stat: An `utils.Stat` object.\n\n Returns:\n True if the condition is met.\n \"\"\"\n pass\n\n @staticmethod\n def Parse(conditions):\n \"\"\"Parses the file finder condition types into the condition objects.\n\n Args:\n conditions: An iterator over `FileFinderCondition` objects.\n\n Yields:\n `MetadataCondition` objects that correspond to the file-finder conditions.\n \"\"\"\n kind = rdf_file_finder.FileFinderCondition.Type\n classes = {\n kind.MODIFICATION_TIME: ModificationTimeCondition,\n kind.ACCESS_TIME: AccessTimeCondition,\n kind.INODE_CHANGE_TIME: InodeChangeTimeCondition,\n kind.SIZE: SizeCondition,\n kind.EXT_FLAGS: ExtFlagsCondition,\n }\n\n for condition in conditions:\n try:\n yield classes[condition.condition_type](condition)\n except KeyError:\n pass\n\n\nclass ModificationTimeCondition(MetadataCondition):\n \"\"\"A condition checking modification time of a file.\"\"\"\n\n def __init__(self, params):\n super(ModificationTimeCondition, self).__init__()\n self.params = params.modification_time\n\n def Check(self, stat):\n min_mtime = self.params.min_last_modified_time.AsSecondsFromEpoch()\n max_mtime = self.params.max_last_modified_time.AsSecondsFromEpoch()\n return min_mtime <= stat.GetModificationTime() <= max_mtime\n\n\nclass AccessTimeCondition(MetadataCondition):\n \"\"\"A condition checking access time of a file.\"\"\"\n\n def __init__(self, params):\n super(AccessTimeCondition, self).__init__()\n self.params = params.access_time\n\n def Check(self, stat):\n min_atime = self.params.min_last_access_time.AsSecondsFromEpoch()\n max_atime = self.params.max_last_access_time.AsSecondsFromEpoch()\n return min_atime <= stat.GetAccessTime() <= max_atime\n\n\nclass InodeChangeTimeCondition(MetadataCondition):\n \"\"\"A condition checking change time of inode of a file.\"\"\"\n\n def __init__(self, params):\n super(InodeChangeTimeCondition, self).__init__()\n self.params = params.inode_change_time\n\n def Check(self, stat):\n min_ctime = self.params.min_last_inode_change_time.AsSecondsFromEpoch()\n max_ctime = self.params.max_last_inode_change_time.AsSecondsFromEpoch()\n return min_ctime <= stat.GetChangeTime() <= max_ctime\n\n\nclass SizeCondition(MetadataCondition):\n \"\"\"A condition checking size of a file.\"\"\"\n\n def __init__(self, params):\n super(SizeCondition, self).__init__()\n self.params = params.size\n\n def Check(self, stat):\n min_fsize = self.params.min_file_size\n max_fsize = self.params.max_file_size\n return min_fsize <= stat.GetSize() <= max_fsize\n\n\nclass ExtFlagsCondition(MetadataCondition):\n \"\"\"A condition checking extended flags of a file.\n\n Args:\n params: A `FileFinderCondition` instance.\n \"\"\"\n\n def __init__(self, params):\n super(ExtFlagsCondition, self).__init__()\n self.params = params.ext_flags\n\n def Check(self, stat):\n return self.CheckOsx(stat) and self.CheckLinux(stat)\n\n def CheckLinux(self, stat):\n flags = stat.GetLinuxFlags()\n bits_set = self.params.linux_bits_set\n bits_unset = self.params.linux_bits_unset\n return (bits_set & flags) == bits_set and (bits_unset & flags) == 0\n\n def CheckOsx(self, stat):\n flags = stat.GetOsxFlags()\n bits_set = self.params.osx_bits_set\n bits_unset = self.params.osx_bits_unset\n return (bits_set & flags) == bits_set and (bits_unset & flags) == 0\n\n\nclass ContentCondition(object):\n \"\"\"An abstract class representing conditions on the file contents.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def Search(self, path):\n \"\"\"Searches specified file for particular content.\n\n Args:\n path: A path to the file that is going to be searched.\n\n Yields:\n `BufferReference` objects pointing to file parts with matching content.\n \"\"\"\n pass\n\n @staticmethod\n def Parse(conditions):\n \"\"\"Parses the file finder condition types into the condition objects.\n\n Args:\n conditions: An iterator over `FileFinderCondition` objects.\n\n Yields:\n `ContentCondition` objects that correspond to the file-finder conditions.\n \"\"\"\n kind = rdf_file_finder.FileFinderCondition.Type\n classes = {\n kind.CONTENTS_LITERAL_MATCH: LiteralMatchCondition,\n kind.CONTENTS_REGEX_MATCH: RegexMatchCondition,\n }\n\n for condition in conditions:\n try:\n yield classes[condition.condition_type](condition)\n except KeyError:\n pass\n\n OVERLAP_SIZE = 1024 * 1024\n CHUNK_SIZE = 10 * 1024 * 1024\n\n def Scan(self, path, matcher):\n \"\"\"Scans given file searching for occurrences of given pattern.\n\n Args:\n path: A path to the file that needs to be searched.\n matcher: A matcher object specifying a pattern to search for.\n\n Yields:\n `BufferReference` objects pointing to file parts with matching content.\n \"\"\"\n streamer = streaming.FileStreamer(\n chunk_size=self.CHUNK_SIZE, overlap_size=self.OVERLAP_SIZE)\n\n offset = self.params.start_offset\n amount = self.params.length\n for chunk in streamer.StreamFilePath(path, offset=offset, amount=amount):\n for span in chunk.Scan(matcher):\n ctx_begin = max(span.begin - self.params.bytes_before, 0)\n ctx_end = min(span.end + self.params.bytes_after, len(chunk.data))\n ctx_data = chunk.data[ctx_begin:ctx_end]\n\n yield rdf_client.BufferReference(\n offset=chunk.offset + ctx_begin,\n length=len(ctx_data),\n data=ctx_data)\n\n if self.params.mode == self.params.Mode.FIRST_HIT:\n return\n\n\nclass LiteralMatchCondition(ContentCondition):\n \"\"\"A content condition that lookups a literal pattern.\"\"\"\n\n def __init__(self, params):\n super(LiteralMatchCondition, self).__init__()\n self.params = params.contents_literal_match\n\n def Search(self, path):\n matcher = LiteralMatcher(utils.SmartStr(self.params.literal))\n for match in self.Scan(path, matcher):\n yield match\n\n\nclass RegexMatchCondition(ContentCondition):\n \"\"\"A content condition that lookups regular expressions.\"\"\"\n\n def __init__(self, params):\n super(RegexMatchCondition, self).__init__()\n self.params = params.contents_regex_match\n\n def Search(self, path):\n matcher = RegexMatcher(self.params.regex)\n for match in self.Scan(path, matcher):\n yield match\n\n\nclass Matcher(object):\n \"\"\"An abstract class for objects able to lookup byte strings.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n Span = collections.namedtuple(\"Span\", [\"begin\", \"end\"]) # pylint: disable=invalid-name\n\n @abc.abstractmethod\n def Match(self, data, position):\n \"\"\"Matches the given data object starting at specified position.\n\n Args:\n data: A byte string to pattern match on.\n position: First position at which the search is started on.\n\n Returns:\n A `Span` object if the matcher finds something in the data.\n \"\"\"\n pass\n\n\nclass RegexMatcher(Matcher):\n \"\"\"A regex wrapper that conforms to the `Matcher` interface.\n\n Args:\n regex: An RDF regular expression that the matcher represents.\n \"\"\"\n\n # TODO(hanuszczak): This class should operate on normal Python regexes, not on\n # RDF values.\n\n def __init__(self, regex):\n super(RegexMatcher, self).__init__()\n self.regex = regex\n\n def Match(self, data, position):\n match = self.regex.Search(data[position:])\n if not match:\n return None\n\n begin, end = match.span()\n return Matcher.Span(begin=position + begin, end=position + end)\n\n\nclass LiteralMatcher(Matcher):\n \"\"\"An exact string matcher that conforms to the `Matcher` interface.\n\n Args:\n literal: A byte string pattern that the matcher matches.\n \"\"\"\n\n def __init__(self, literal):\n super(LiteralMatcher, self).__init__()\n self.literal = literal\n\n def Match(self, data, position):\n offset = data.find(self.literal, position)\n if offset == -1:\n return None\n\n return Matcher.Span(begin=offset, end=offset + len(self.literal))\n","sub_path":"grr/client/client_actions/file_finder.py","file_name":"file_finder.py","file_ext":"py","file_size_in_byte":23619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"238665415","text":"import pandas as pd\n\nimport pytest\n\nfrom resite.resite import Resite\n\n\ndef test_init():\n resite = Resite([\"BENELUX\"], [\"wind_onshore\"], ['2015-01-01T00:00', '2015-01-01T23:00'], 0.5)\n for attr in [\"technologies\", \"regions\", \"timestamps\", \"spatial_res\"]:\n assert hasattr(resite, attr)\n\n\ndef build_data_test(resite, technologies, regions, timestamps, nb_sites):\n for attr in [\"use_ex_cap\", \"tech_points_tuples\", \"tech_points_dict\",\n \"initial_sites_ds\", \"tech_points_regions_ds\", \"data_dict\"]:\n assert hasattr(resite, attr)\n assert resite.use_ex_cap\n for i, tech in enumerate(technologies):\n assert tech in resite.tech_points_dict\n assert len(resite.tech_points_dict[tech]) == nb_sites[i]\n assert sum([t[0] == tech for t in resite.tech_points_tuples]) == nb_sites[i]\n assert sum([t[0] == tech for t in resite.tech_points_regions_ds.index]) == nb_sites[i]\n assert all([region in regions for region in set(resite.tech_points_regions_ds.values)])\n for key in [\"load\", \"cap_potential_ds\", \"existing_cap_ds\", \"cap_factor_df\"]:\n assert key in resite.data_dict\n assert not set(resite.data_dict[\"load\"].index).symmetric_difference(set(timestamps))\n assert all([region in resite.data_dict[\"load\"].columns for region in regions])\n assert not set(resite.data_dict[\"cap_factor_df\"].index).symmetric_difference(set(timestamps))\n for tech_point in resite.tech_points_tuples:\n assert tech_point in resite.data_dict[\"cap_potential_ds\"].index\n assert tech_point in resite.data_dict[\"existing_cap_ds\"].index\n assert tech_point in resite.data_dict[\"cap_factor_df\"].columns\n\n\ndef test_build_data_wrong_cap_pot_thresholds_len():\n technologies = [\"pv_utility\", \"wind_offshore\"]\n regions = [\"BENELUX\"]\n timeslice = ['2015-01-01T00:00', '2015-01-01T23:00']\n resite = Resite(regions, technologies, timeslice, 0.5)\n with pytest.raises(AssertionError):\n resite.build_data(True, [0.01])\n\n\ndef test_build_data_one_region():\n technologies = [\"pv_utility\", \"wind_offshore\"]\n regions = [\"BENELUX\"]\n timeslice = ['2015-01-01T00:00', '2015-01-01T23:00']\n timestamps = pd.date_range(timeslice[0], timeslice[1], freq='1H')\n nb_sites = [35, 35]\n resite = Resite(regions, technologies, timeslice, 0.5)\n resite.build_data(True, [0.01, 0.01])\n build_data_test(resite, technologies, regions, timestamps, nb_sites)\n\n\ndef test_build_data_two_regions():\n technologies = [\"pv_utility\", \"wind_offshore\"]\n regions = [\"BENELUX\", \"PT\"]\n timeslice = ['2015-01-01T00:00', '2015-01-01T23:00']\n timestamps = pd.date_range(timeslice[0], timeslice[1], freq='1H')\n nb_sites = [72, 49]\n resite = Resite(regions, technologies, timeslice, 0.5)\n resite.build_data(True)\n build_data_test(resite, technologies, regions, timestamps, nb_sites)\n\n\ndef test_build_model():\n technologies = [\"pv_utility\", \"wind_offshore\"]\n regions = [\"BENELUX\"]\n timeslice = ['2015-01-01T00:00', '2015-01-01T23:00']\n resite = Resite(regions, technologies, timeslice, 0.5)\n resite.build_data(True)\n resite.build_model(\"pyomo\", \"max_generation\", {\"nb_sites_per_region\": [5]}, False)\n for attr in [\"instance\", \"modelling\", \"formulation\", \"formulation_params\"]:\n assert hasattr(resite, attr)\n\n\ndef test_solve_model():\n technologies = [\"pv_utility\", \"wind_offshore\"]\n regions = [\"BENELUX\"]\n timeslice = ['2015-01-01T00:00', '2015-01-01T23:00']\n resite = Resite(regions, technologies, timeslice, 0.5)\n resite.build_data(True)\n resite.build_model(\"pyomo\", \"max_generation\", {\"nb_sites_per_region\": [5]}, False)\n resite.solve_model()\n for attr in [\"objective\", \"y_ds\", \"sel_tech_points_dict\"]:\n assert hasattr(resite, attr)\n for tech_point in resite.y_ds.index:\n assert tech_point in resite.tech_points_tuples.tolist()\n for tech in resite.sel_tech_points_dict:\n assert tech in technologies\n points = resite.sel_tech_points_dict[tech]\n for point in points:\n assert (tech, point[0], point[1]) in resite.tech_points_tuples.tolist()\n","sub_path":"tests/test_resite.py","file_name":"test_resite.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"376927040","text":"# 순차 탐색 소스코드 구현\ndef sequetial_search(n, target, array):\n # 각 원소를 하나씩 확인하며\n for i in range(n):\n # 현재 원소가 찾고자 하는 원소와 동일한 경우\n if array[i] == target:\n return i + 1 # 현재의 위치 반환(인덱스는 0부터 시작하므로 1 더하기)\n \nprint(\"생성할 원소 개수를 입력한 다음 한칸 띄고 찾을 문자열을 입력하시오.\")\ninput_data = input().split()\n\nn = int(input_data[0])\ntarget = input_data[1]\n\nprint(\"앞서 적은 원소 개수 만큼 문자열을 입력하시오. 구분은 띄어쓰기 한 칸으로 합니다.\")\narray = input().split()\n\n# 순차 탐색 수행결과 출력\nprint(sequetial_search(n, target, array))","sub_path":"search/sequential_search.py","file_name":"sequential_search.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"110893616","text":"\n# spisok = [] - simpler and more common way to create a list\n# spisok = list() - unusual way via constructor\n# List Comprehension!!! WATCH SOCRATICA!!\n\nlist1 = ['History', 'Anatomy']\n#list2 = list1 + 'Art'\n#print(list2)\n\nlist2 = [' of England', 'human']\nfor a, b in zip(list2, list1):\n\tprint(a, b)\n\t\n","sub_path":"Begin/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"115719102","text":"import os\r\nimport sys\r\nimport random\r\nimport math\r\nimport time\r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\n\r\nfrom mrcnn import utils\r\nimport mrcnn.model as modellib\r\nfrom mrcnn import visualize\r\nfrom mrcnn.model import log\r\n\r\nimport rocks\r\n\r\nconfig = rocks.RocksConfig()\r\nconfig.display()\r\n\r\nvalid_dataset = rocks.RocksDataset()\r\nvalid_dataset.load_rocks('C:\\\\Users\\\\nemad\\\\PycharmProjects\\\\test\\\\Mask_RCNN\\\\dataset', 'infer')\r\n\r\nvalid_dataset.prepare()\r\n\r\nROOT_DIR = os.path.abspath(\"../.././\")\r\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\r\nclass InferenceConfig(rocks.RocksConfig):\r\n GPU_COUNT = 1\r\n IMAGES_PER_GPU = 1\r\n\r\ninference_config = InferenceConfig()\r\n\r\n# Recreate the model in inference mode\r\nmodel_pred = modellib.MaskRCNN(mode=\"inference\",\r\n config=inference_config,\r\n model_dir=MODEL_DIR)\r\n\r\n# Get path to saved weights\r\n# Either set a specific path or find last trained weights\r\nmodel_path = \"C:\\\\Users\\\\nemad\\\\PycharmProjects\\\\test\\\\craters\\\\logs\\\\crater20200420T2028\\\\mask_rcnn_crater_0008.h5\"#\"C:\\\\Users\\\\nemad\\\\PycharmProjects\\\\test\\\\Mask_RCNN\\\\logs\\\\crater20200421T1235\\\\mask_rcnn_crater_0100.h5\" #os.path.join(ROOT_DIR, \"logs\\\\crater20200327T1511\\\\mask_rcnn_crater_0005.h5\")\r\n# model_path = model_pred.find_last()\r\n\r\n# Load trained weights\r\nprint(\"Loading weights from \", model_path)\r\nmodel_pred.load_weights(model_path, by_name=True)\r\n\r\n\r\ndef get_ax(rows=1, cols=1, size=8):\r\n \"\"\"Return a Matplotlib Axes array to be used in\r\n all visualizations in the notebook. Provide a\r\n central point to control graph sizes.\r\n\r\n Change the default size attribute to control the size\r\n of rendered images\r\n \"\"\"\r\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\r\n return ax\r\n\r\n\r\ninstances = list()\r\n\r\nfor info in valid_dataset.image_info:\r\n image_id = info['id']\r\n # print(\"---------------------------------------------\", image_id)\r\n image = cv2.imread(info['path'])\r\n image, image_meta, gt_class_id, gt_bbox, gt_mask = \\\r\n modellib.load_image_gt(valid_dataset, inference_config,\r\n image_id, use_mini_mask=False)\r\n\r\n results = model_pred.detect([image], verbose=1)\r\n r = results[0]\r\n print(r['masks'].shape)\r\n\r\n ### display original image\r\n # original_image = visualize.display_instances(image, gt_bbox, gt_mask, gt_class_id, valid_dataset.class_names, figsize=(8, 8))\r\n ### display inference image\r\n # image = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], valid_dataset.class_names, r['scores'], ax=get_ax())\r\n name = 'masked_' + info['path'].split('/')[-1]\r\n ### save inference image\r\n # cv2.imwrite(\"../../dataset/zhiang_c3/infer/\"+name, image)\r\n\r\n ### get all instances\r\n image_name = info['path'].split('/')[-1]\r\n coord_str = image_name.split('.')[0]\r\n coord = coord_str.split('_')\r\n coord = [int(coord[0]), int(coord[1])]\r\n\r\n for i, bb in enumerate(r['rois']):\r\n instance = dict()\r\n instance['coord'] = coord\r\n instance['bb'] = bb\r\n mask = r['masks'][:, :, i].astype(float)\r\n mask = cv2.resize(mask, (400, 400)).astype(bool)\r\n instance['mask'] = mask\r\n instances.append(instance)\r\n print(len(instances))\r\n print('*' * 20)\r\n\r\ni = np.random.randint(len(instances))\r\nprint(instances[i]['bb'])\r\nmask = instances[i]['mask']\r\nplt.imshow(mask)\r\nplt.show()\r\n\r\n\r\nl = len(instances)\r\nfor i in range(0,l,10000):\r\n if l-i>10000:\r\n s = instances[i:i+10000]\r\n else:\r\n s = instances[i:]\r\n with open('instances_'+str(int(i/10000))+'.pickle', 'wb') as f:\r\n pickle.dump(s, f, protocol=pickle.HIGHEST_PROTOCOL)\r\n","sub_path":"samples/rocks/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"447134262","text":"def floor(a, b):\r\n return ((-a)//b)*-1\r\n\r\n\r\nN = int(input())\r\nratio = []\r\nfor _ in range(N):\r\n ratio.append(list(map(int, input().split())))\r\n\r\nv_T = ratio[0][0]\r\nv_A = ratio[0][1]\r\n\r\nfor t, a in ratio[1:]:\r\n diff_t = t - v_T\r\n diff_a = a - v_A\r\n\r\n if diff_t < 0 and diff_a < 0:\r\n d = max(floor(v_T, t), floor(v_A, a))\r\n v_T = d*t\r\n v_A = d*a\r\n\r\n elif diff_t < 0:\r\n d = floor(v_T, t)\r\n v_T = d*t\r\n v_A = d*a\r\n\r\n elif diff_a < 0:\r\n d = floor(v_A, a)\r\n v_T = d*t\r\n v_A = d*a\r\n\r\n elif diff_t >= 0 and diff_a >= 0:\r\n m = max(diff_t, diff_a)\r\n v_T = t\r\n v_A = a\r\n\r\nprint(v_T+v_A)","sub_path":"Source Codes/AtCoder/arc062/A/4114914.py","file_name":"4114914.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"374898327","text":"# Guided Backprop\n# Modified from: https://github.com/utkuozbulak/pytorch-cnn-visualizations/blob/master/src/guided_backprop.py\n\nimport torch\nfrom torch.nn import ReLU\n\n\nclass GuidedBackprop():\n \"\"\"\n Produces gradients generated with guided back propagation from the given image\n \"\"\"\n def __init__(self, model):\n self.model = model\n self.gradients = None\n self.forward_relu_outputs = []\n # Put model in evaluation mode\n self.model.eval()\n self.update_relus()\n self.hook_layers()\n\n def hook_layers(self):\n def hook_function(module, grad_in, grad_out):\n self.gradients = grad_in[0]\n # Register hook to the first layer\n first_layer = list(self.model.modules())[0] # [1]\n first_layer.register_backward_hook(hook_function)\n\n def update_relus(self):\n \"\"\"\n Updates relu activation functions so that\n 1- stores output in forward pass\n 2- imputes zero for gradient values that are less than zero\n \"\"\"\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for module in self.model.modules():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)\n\n def generate_gradients(self, inputs, target_class, mc):\n # Forward pass\n inputs.requires_grad_()\n output = self.model(inputs)\n output = output[:, mc]\n # Zero gradients\n self.model.zero_grad()\n # Target for backprop\n # one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()\n # one_hot_output[0][target_class] = 1\n # Backward pass\n # model_output.backward(gradient=one_hot_output)\n output.backward(gradient=target_class)\n # Convert Pytorch variable to numpy array\n # [0] to get rid of the first channel (1,3,224,224)\n # gradients_as_arr = self.gradients.data.numpy()[0]\n # gradients_as_arr = self.gradients.data\n gradients_as_arr = inputs.grad.data\n return gradients_as_arr, output\n","sub_path":"guided_backprop.py","file_name":"guided_backprop.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"298655703","text":"import numpy as np\nimport matplotlib.pylab as plt\n\n\ndef step_function(x):\n # 引数に実数しか対応できない(numpy配列を受け付けない)\n\n #NumPy配列に対し不等号の演算を用いれば各要素にブーリアン型の生成される\n y = x > 0\n \n #astype()メソッドで任意の型(今回はnp.int型)に変更できる\n return y.astype(np.int)\n\ndef sigmoid_function(x):\n\n #np.exp(-x)はe^-xを表す\n #スカラ値とNumPy配列の計算出の演算は配列の各要素間で演算される\n return 1 / (1 + np.exp(-x))\n\ninput_data1 = np.arange(-5.0, 5.0, 0.1)\noutput_data1 = step_function(input_data1)\n\ninput_data2 = np.arange(-5.0, 5.0, 0.1)\noutput_data2 = sigmoid_function(input_data2)\n\n#グラフデータ生成\nplt.plot(input_data1, output_data1)\nplt.plot(input_data2, output_data2)\n\n#y軸の範囲を設定\nplt.ylim(-0.1, 1.1)\nplt.show()","sub_path":"3column.py","file_name":"3column.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"371059034","text":"#!/usr/bin/python3\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.init import xavier_uniform_, constant_\n\nfrom sklearn.metrics import average_precision_score\n\nfrom torch.utils.data import DataLoader\n\nfrom dataloader import TestDataset\n\nclass ResNetBlock(nn.Module):\n def __init__(self, channels, kernel_size, conv_layers, dropout=0.2):\n super(ResNetBlock, self).__init__()\n self.conv_layers = conv_layers\n self.channels = channels\n self.kernel_size = kernel_size\n # assume kernel size is odd\n self.pad = kernel_size // 2\n self.drop = dropout\n\n for i in range(self.conv_layers):\n name = 'conv{}'.format(i)\n conv = nn.Conv1d(channels, channels, kernel_size, padding=self.pad)\n setattr(self, name, conv)\n name = 'norm{}'.format(i)\n norm = nn.BatchNorm1d(channels)\n setattr(self, name, norm)\n name = 'drop{}'.format(i)\n drop = nn.Dropout(dropout)\n setattr(self, name, drop)\n name = 'actv{}'.format(i)\n actv = nn.ReLU()\n setattr(self, name, actv)\n\n def reset_parameters(self):\n for i in range(self.conv_layers):\n name = 'conv{}'.format(i)\n xavier_uniform_(getattr(self, name).weight.data)\n name = 'norm{}'.format(i)\n constant_(getattr(self, name).weight, 1)\n constant_(getattr(self, name).bias, 1)\n\n def forward(self, x):\n x_res = x\n for i in range(self.conv_layers):\n name = 'conv{}'.format(i)\n x = getattr(self, name)(x)\n name = 'norm{}'.format(i)\n x = getattr(self, name)(x)\n name = 'drop{}'.format(i)\n x = getattr(self, name)(x)\n name = 'actv{}'.format(i)\n x = getattr(self, name)(x)\n x = x + x_res\n return x\n\nclass MapResNet(nn.Module):\n def __init__(self,nodedim, edgedim, hidden_size, res_blocks, channels, kernel_size, in_drop, dropout):\n super(MapResNet, self).__init__()\n self.ndim = nodedim\n self.edim = edgedim\n self.blocks = res_blocks\n self.channels = channels\n self.kernel_size = kernel_size\n self.pad = kernel_size // 2\n self.in_drop = in_drop\n self.drop = dropout\n self.hidden = hidden_size\n\n self.in_drop_h = nn.Dropout(in_drop)\n self.in_drop_t = nn.Dropout(2*in_drop)\n\n self.in_layers = [nn.Conv1d(1, channels, kernel_size, padding=self.pad),\n nn.BatchNorm1d(channels),\n nn.ReLU(),\n nn.Dropout(dropout)\n ]\n\n for i in range(self.blocks):\n name = 'block{}'.format(i)\n block = ResNetBlock(channels, kernel_size, 2, dropout)\n setattr(self, name, block)\n\n self.lin_h_h = nn.Linear(2*nodedim+edgedim, hidden_size)\n self.lin_c_h = nn.Linear(channels*hidden_size, nodedim*edgedim)\n self.lin_h_t = nn.Linear(2*nodedim+edgedim, hidden_size)\n self.lin_c_t = nn.Linear(channels*hidden_size, nodedim*edgedim)\n\n def reset_parameters(self):\n xavier_uniform_(self.lin_h_h)\n xavier_uniform_(self.lin_c_h)\n xavier_uniform_(self.lin_c_t)\n xavier_uniform_(self.lin_h_t)\n for i in range(self.blocks):\n name = 'block{}'.format(i)\n getattr(self, name).reset_parameters()\n\n def forward(self, head, rel, tail):\n h = self.in_drop_h(head)\n t = self.in_drop_t(tail)\n\n x = torch.cat([h, rel, t], dim=-1)\n\n for layer in self.in_layers:\n x = layer(x)\n\n for i in range(self.blocks):\n name = 'block{}'.format(i)\n x = getattr(self, name)(x)\n\n ## final linear layers to produce maps\n x_h = self.lin_h_h(x)\n x_h = torch.flatten(x_h, 1)\n x_h = self.lin_c_h(x_h)\n map_h = torch.reshape(x_h, (-1, self.edim, self.ndim))\n\n x_t = self.lin_h_t(x)\n x_t = torch.flatten(x_t, 1)\n x_t = self.lin_c_t(x_t)\n map_t = torch.reshape(x_t, (-1, self.edim, self.ndim))\n\n ## apply maps to get score\n return map_h, map_t\n\nclass SheafNetwork(nn.Module):\n def __init__(self,device,nodedim, edgedim, hidden_size, res_blocks, channels, kernel_size, in_drop, dropout):\n super(SheafNetwork, self).__init__()\n self.map_generator = MapResNet(nodedim, edgedim, hidden_size, res_blocks, channels, kernel_size, in_drop, dropout)\n self.ndim = nodedim\n self.edim = edgedim\n self.valid_map = False\n self.map_h = None\n self.map_t = None\n self.dev = device\n\n def reset_parameters(self):\n self.map_generator.reset_parameters()\n\n def forward(self, head, relation, tail, mode):\n if mode == 'tail-batch':\n ## matching the negative samples to (h,r) pairs\n head = head.expand_as(tail)\n relation = relation.expand_as(tail)\n head = head.reshape((-1, 1, head.shape[2]))\n relation = relation.reshape((-1, 1, relation.shape[2]))\n tail = tail.reshape((-1, 1, tail.shape[2]))\n else:\n ## matching the negative samples to (r,t) pairs\n tail = tail.expand_as(head)\n relation = relation.expand_as(head)\n head = head.reshape((-1, 1, head.shape[2]))\n relation = relation.reshape((-1, 1, relation.shape[2]))\n tail = tail.reshape((-1, 1, tail.shape[2]))\n\n self.map_h, self.map_t = self.map_generator(head, relation, tail)\n self.valid_map = True\n score = torch.matmul(self.map_h, head.transpose(1,2)) - torch.matmul(self.map_t, tail.transpose(1,2))\n return score\n\n def regularise(self, rel):\n s = torch.zeros((torch.max(rel)+1,rel.shape[0]), device=self.dev)\n s = s.scatter_(0,rel,1)\n s = s[s.sum(1) != 0, :]\n s_norm = s.sum(1)\n s = s.to_sparse()\n sdelta = torch.mm(s, self.map_h) + torch.mm(s, self.map_t)\n sdelta = sdelta / s_norm.unsqueeze(1)\n delta_diff = sdelta.unsqueeze(1).expand(-1, delta_.shape[0], -1) - \\\n delta_.unsqueeze(0).expand(sdelta.shape[0], -1, -1)\n sdelta_diff = s.unsqueeze(-1).expand(-1,-1,delta_.shape[1])*delta_diff\n reg = ((sdelta_diff).sum(dim=0)**2).sum(dim=-1)\n\n self.valid_map=False\n return reg.sum()\n\n def clear_map(self):\n self.valid_map = False\n self.map_h = None\n self.map_t = None\n\n\nclass KGEModel(nn.Module):\n def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma, args,\n double_entity_embedding=False, double_relation_embedding=False):\n super(KGEModel, self).__init__()\n self.model_name = model_name\n self.nentity = nentity\n self.nrelation = nrelation\n self.hidden_dim = hidden_dim\n self.epsilon = 2.0\n \n self.gamma = nn.Parameter(\n torch.Tensor([gamma]), \n requires_grad=False\n )\n \n self.embedding_range = nn.Parameter(\n torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]), \n requires_grad=False\n )\n \n self.entity_dim = hidden_dim*2 if double_entity_embedding else hidden_dim\n self.relation_dim = hidden_dim*2 if double_relation_embedding else hidden_dim\n \n self.entity_embedding = nn.Parameter(torch.zeros(nentity, self.entity_dim))\n nn.init.uniform_(\n tensor=self.entity_embedding, \n a=-self.embedding_range.item(), \n b=self.embedding_range.item()\n )\n \n self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))\n nn.init.uniform_(\n tensor=self.relation_embedding, \n a=-self.embedding_range.item(), \n b=self.embedding_range.item()\n )\n \n if model_name == 'pRotatE':\n self.modulus = nn.Parameter(torch.Tensor([[0.5 * self.embedding_range.item()]]))\n \n #Do not forget to modify this line when you add a new model in the \"forward\" function\n if model_name not in ['TransE', 'DistMult', 'ComplEx', 'RotatE', 'pRotatE', 'Sheaf']:\n raise ValueError('model %s not supported' % model_name)\n \n if model_name == 'RotatE' and (not double_entity_embedding or double_relation_embedding):\n raise ValueError('RotatE should use --double_entity_embedding')\n\n if model_name == 'ComplEx' and (not double_entity_embedding or not double_relation_embedding):\n raise ValueError('ComplEx should use --double_entity_embedding and --double_relation_embedding')\n\n if model_name == 'Sheaf':\n device = torch.device(\"cuda\") if args.cuda else torch.device(\"cpu\")\n self.sheaf_network = SheafNetwork(device,\n args.ndim,\n args.edim,\n args.hid,\n args.resblocks,\n args.channels,\n args.kernel,\n args.indrop,\n args.drop)\n \n def forward(self, sample, mode='single'):\n '''\n Forward function that calculate the score of a batch of triples.\n In the 'single' mode, sample is a batch of triple.\n In the 'head-batch' or 'tail-batch' mode, sample consists two part.\n The first part is usually the positive sample.\n And the second part is the entities in the negative samples.\n Because negative samples and positive samples usually share two elements \n in their triple ((head, relation) or (relation, tail)).\n '''\n\n if mode == 'single':\n batch_size, negative_sample_size = sample.size(0), 1\n \n head = torch.index_select(\n self.entity_embedding, \n dim=0, \n index=sample[:,0]\n ).unsqueeze(1)\n \n relation = torch.index_select(\n self.relation_embedding, \n dim=0, \n index=sample[:,1]\n ).unsqueeze(1)\n \n tail = torch.index_select(\n self.entity_embedding, \n dim=0, \n index=sample[:,2]\n ).unsqueeze(1)\n \n elif mode == 'head-batch':\n tail_part, head_part = sample\n batch_size, negative_sample_size = head_part.size(0), head_part.size(1)\n \n head = torch.index_select(\n self.entity_embedding, \n dim=0, \n index=head_part.view(-1)\n ).view(batch_size, negative_sample_size, -1)\n \n relation = torch.index_select(\n self.relation_embedding, \n dim=0, \n index=tail_part[:, 1]\n ).unsqueeze(1)\n \n tail = torch.index_select(\n self.entity_embedding, \n dim=0, \n index=tail_part[:, 2]\n ).unsqueeze(1)\n \n elif mode == 'tail-batch':\n head_part, tail_part = sample\n batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)\n \n head = torch.index_select(\n self.entity_embedding, \n dim=0, \n index=head_part[:, 0]\n ).unsqueeze(1)\n \n relation = torch.index_select(\n self.relation_embedding,\n dim=0,\n index=head_part[:, 1]\n ).unsqueeze(1)\n \n tail = torch.index_select(\n self.entity_embedding, \n dim=0, \n index=tail_part.view(-1)\n ).view(batch_size, negative_sample_size, -1)\n \n else:\n raise ValueError('mode %s not supported' % mode)\n \n model_func = {\n 'TransE': self.TransE,\n 'DistMult': self.DistMult,\n 'ComplEx': self.ComplEx,\n 'RotatE': self.RotatE,\n 'pRotatE': self.pRotatE,\n 'Sheaf': self.sheaf\n }\n \n if self.model_name in model_func:\n score = model_func[self.model_name](head, relation, tail, mode)\n else:\n raise ValueError('model %s not supported' % self.model_name)\n \n return score\n \n def TransE(self, head, relation, tail, mode):\n if mode == 'head-batch':\n score = head + (relation - tail)\n else:\n score = (head + relation) - tail\n\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n\n def DistMult(self, head, relation, tail, mode):\n if mode == 'head-batch':\n score = head * (relation * tail)\n else:\n score = (head * relation) * tail\n\n score = score.sum(dim = 2)\n return score\n\n def ComplEx(self, head, relation, tail, mode):\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_relation, im_relation = torch.chunk(relation, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n if mode == 'head-batch':\n re_score = re_relation * re_tail + im_relation * im_tail\n im_score = re_relation * im_tail - im_relation * re_tail\n score = re_head * re_score + im_head * im_score\n else:\n re_score = re_head * re_relation - im_head * im_relation\n im_score = re_head * im_relation + im_head * re_relation\n score = re_score * re_tail + im_score * im_tail\n\n score = score.sum(dim = 2)\n return score\n\n def sheaf(self, head, relation, tail, mode):\n return self.sheaf_network(head, relation, tail, mode)\n\n def RotatE(self, head, relation, tail, mode):\n pi = 3.14159265358979323846\n \n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n #Make phases of relations uniformly distributed in [-pi, pi]\n\n phase_relation = relation/(self.embedding_range.item()/pi)\n\n re_relation = torch.cos(phase_relation)\n im_relation = torch.sin(phase_relation)\n\n if mode == 'head-batch':\n re_score = re_relation * re_tail + im_relation * im_tail\n im_score = re_relation * im_tail - im_relation * re_tail\n re_score = re_score - re_head\n im_score = im_score - im_head\n else:\n re_score = re_head * re_relation - im_head * im_relation\n im_score = re_head * im_relation + im_head * re_relation\n re_score = re_score - re_tail\n im_score = im_score - im_tail\n\n score = torch.stack([re_score, im_score], dim = 0)\n score = score.norm(dim = 0)\n\n score = self.gamma.item() - score.sum(dim = 2)\n return score\n\n def pRotatE(self, head, relation, tail, mode):\n pi = 3.14159262358979323846\n \n #Make phases of entities and relations uniformly distributed in [-pi, pi]\n\n phase_head = head/(self.embedding_range.item()/pi)\n phase_relation = relation/(self.embedding_range.item()/pi)\n phase_tail = tail/(self.embedding_range.item()/pi)\n\n if mode == 'head-batch':\n score = phase_head + (phase_relation - phase_tail)\n else:\n score = (phase_head + phase_relation) - phase_tail\n\n score = torch.sin(score) \n score = torch.abs(score)\n\n score = self.gamma.item() - score.sum(dim = 2) * self.modulus\n return score\n \n @staticmethod\n def train_step(model, optimizer, train_iterator, args):\n '''\n A single train step. Apply back-propation and return the loss\n '''\n\n model.train()\n\n optimizer.zero_grad()\n\n positive_sample, negative_sample, subsampling_weight, mode = next(train_iterator)\n\n if args.cuda:\n positive_sample = positive_sample.cuda()\n negative_sample = negative_sample.cuda()\n subsampling_weight = subsampling_weight.cuda()\n\n negative_score = model((positive_sample, negative_sample), mode=mode)\n\n if args.regularization and model.modelname == 'sheaf':\n regularization = model.sheaf_network.regularise(positive_sample[:,1])\n model.sheaf_network.clear_map()\n\n if args.negative_adversarial_sampling:\n #In self-adversarial sampling, we do not apply back-propagation on the sampling weight\n negative_score = (F.softmax(negative_score * args.adversarial_temperature, dim = 1).detach() \n * F.logsigmoid(-negative_score)).sum(dim = 1)\n else:\n negative_score = F.logsigmoid(-negative_score).mean(dim = 1)\n\n positive_score = model(positive_sample)\n\n positive_score = F.logsigmoid(positive_score).squeeze(dim = 1)\n\n if args.uni_weight:\n positive_sample_loss = - positive_score.mean()\n negative_sample_loss = - negative_score.mean()\n else:\n positive_sample_loss = - (subsampling_weight * positive_score).sum()/subsampling_weight.sum()\n negative_sample_loss = - (subsampling_weight * negative_score).sum()/subsampling_weight.sum()\n\n loss = (positive_sample_loss + negative_sample_loss)/2\n\n if args.regularization and model.modelname == 'sheaf':\n regularization += model.sheaf_network.regularise(positive_sample[:,1])\n loss = loss + regularization\n model.sheaf_network.clear_map()\n\n \n if args.regularization != 0.0:\n #Use L3 regularization for ComplEx and DistMult\n regularization = args.regularization * (\n model.entity_embedding.norm(p = 3)**3 + \n model.relation_embedding.norm(p = 3).norm(p = 3)**3\n )\n loss = loss + regularization\n regularization_log = {'regularization': regularization.item()}\n else:\n regularization_log = {}\n \n loss.backward()\n\n optimizer.step()\n\n log = {\n **regularization_log,\n 'positive_sample_loss': positive_sample_loss.item(),\n 'negative_sample_loss': negative_sample_loss.item(),\n 'loss': loss.item()\n }\n\n return log\n \n @staticmethod\n def test_step(model, test_triples, all_true_triples, args):\n '''\n Evaluate the model on test or valid datasets\n '''\n \n model.eval()\n \n if args.countries:\n #Countries S* datasets are evaluated on AUC-PR\n #Process test data for AUC-PR evaluation\n sample = list()\n y_true = list()\n for head, relation, tail in test_triples:\n for candidate_region in args.regions:\n y_true.append(1 if candidate_region == tail else 0)\n sample.append((head, relation, candidate_region))\n\n sample = torch.LongTensor(sample)\n if args.cuda:\n sample = sample.cuda()\n\n with torch.no_grad():\n y_score = model(sample).squeeze(1).cpu().numpy()\n\n y_true = np.array(y_true)\n\n #average_precision_score is the same as auc_pr\n auc_pr = average_precision_score(y_true, y_score)\n\n metrics = {'auc_pr': auc_pr}\n \n else:\n #Otherwise use standard (filtered) MRR, MR, HITS@1, HITS@3, and HITS@10 metrics\n #Prepare dataloader for evaluation\n test_dataloader_head = DataLoader(\n TestDataset(\n test_triples, \n all_true_triples, \n args.nentity, \n args.nrelation, \n 'head-batch'\n ), \n batch_size=args.test_batch_size,\n num_workers=max(1, args.cpu_num//2), \n collate_fn=TestDataset.collate_fn\n )\n\n test_dataloader_tail = DataLoader(\n TestDataset(\n test_triples, \n all_true_triples, \n args.nentity, \n args.nrelation, \n 'tail-batch'\n ), \n batch_size=args.test_batch_size,\n num_workers=max(1, args.cpu_num//2), \n collate_fn=TestDataset.collate_fn\n )\n \n test_dataset_list = [test_dataloader_head, test_dataloader_tail]\n \n logs = []\n\n step = 0\n total_steps = sum([len(dataset) for dataset in test_dataset_list])\n\n with torch.no_grad():\n for test_dataset in test_dataset_list:\n for positive_sample, negative_sample, filter_bias, mode in test_dataset:\n if args.cuda:\n positive_sample = positive_sample.cuda()\n negative_sample = negative_sample.cuda()\n filter_bias = filter_bias.cuda()\n\n batch_size = positive_sample.size(0)\n\n score = model((positive_sample, negative_sample), mode)\n score += filter_bias\n\n #Explicitly sort all the entities to ensure that there is no test exposure bias\n argsort = torch.argsort(score, dim = 1, descending=True)\n\n if mode == 'head-batch':\n positive_arg = positive_sample[:, 0]\n elif mode == 'tail-batch':\n positive_arg = positive_sample[:, 2]\n else:\n raise ValueError('mode %s not supported' % mode)\n\n for i in range(batch_size):\n #Notice that argsort is not ranking\n ranking = (argsort[i, :] == positive_arg[i]).nonzero()\n assert ranking.size(0) == 1\n\n #ranking + 1 is the true ranking used in evaluation metrics\n ranking = 1 + ranking.item()\n logs.append({\n 'MRR': 1.0/ranking,\n 'MR': float(ranking),\n 'HITS@1': 1.0 if ranking <= 1 else 0.0,\n 'HITS@3': 1.0 if ranking <= 3 else 0.0,\n 'HITS@10': 1.0 if ranking <= 10 else 0.0,\n })\n\n if step % args.test_log_steps == 0:\n logging.info('Evaluating the model... (%d/%d)' % (step, total_steps))\n\n step += 1\n\n metrics = {}\n for metric in logs[0].keys():\n metrics[metric] = sum([log[metric] for log in logs])/len(logs)\n\n return metrics\n","sub_path":"codes/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":23439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"103093558","text":"from _Helper import *\n\n# \n# Suppose you have N integers from 1 to N. We define a beautiful arrangement as an array that is constructed by these N numbers successfully if one of the following is true for the ith position (1 ≤ i ≤ N) in this array:\n\n# The number at the ith position is divisible by i.\n# i is divisible by the number at the ith position.\n# Now given N, how many beautiful arrangements can you construct?\n\n# Example 1:\n# Input: 2\n# Output: 2\n# Explanation: \n\n# The first beautiful arrangement is [1, 2]:\n\n# Number at the 1st position (i=1) is 1, and 1 is divisible by i (i=1).\n\n# Number at the 2nd position (i=2) is 2, and 2 is divisible by i (i=2).\n\n# The second beautiful arrangement is [2, 1]:\n\n# Number at the 1st position (i=1) is 2, and 2 is divisible by i (i=1).\n\n# Number at the 2nd position (i=2) is 1, and i (i=2) is divisible by 1.\n# Note:\n# N is a positive integer and will not exceed 15.\n# \n\n# \n# TIME: O()\n# SPACE: O()\n# \n\n# \n# COMMENT\n# \n\n# \n# Medium\n# \n\n\n# \nclass Solution(object):\n def GenerateList(self, used, value, index):\n if value % index != 0 and index % value != 0:\n return\n if index == len(used) - 1:\n self.res += 1\n for i in range(1, len(used)):\n if used[i] == False:\n used[i] = True\n self.GenerateList(used, i, index + 1)\n used[i] = False\n\n def countArrangement(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n if N <= 0:\n return 0\n if N == 1:\n return 1\n self.res = 0\n used = [False] * (N + 1)\n for i in range(1, len(used)):\n used[i] = True\n self.GenerateList(used, i, 1)\n used[i] = False\n return self.res\n\n\n# \n\n# \nsolution = Solution()\nInput = 1\nexpected = 1\nactual = solution.countArrangement(Input)\nAreEqual(expected, actual, Input)\n# another pair\nInput = 2\nexpected = 2\nactual = solution.countArrangement(Input)\nAreEqual(expected, actual, Input)\n# another pair\nInput = 3\nexpected = 3\nactual = solution.countArrangement(Input)\nAreEqual(expected, actual, Input)\n# ","sub_path":"526_Beautiful_Arrangement.py","file_name":"526_Beautiful_Arrangement.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"521417059","text":"import cloudkeeper.logging\nimport threading\nfrom cloudkeeper.baseplugin import BasePlugin\nfrom cloudkeeper.baseresources import BaseVolume\nfrom cloudkeeper.args import ArgumentParser\nfrom cloudkeeper.utils import parse_delta\nfrom cloudkeeper.event import (\n Event,\n EventType,\n add_event_listener,\n remove_event_listener,\n)\n\nlog = cloudkeeper.logging.getLogger(\"cloudkeeper.\" + __name__)\n\n\nclass CleanupVolumesPlugin(BasePlugin):\n def __init__(self):\n super().__init__()\n self.name = \"cleanup_volumes\"\n self.exit = threading.Event()\n if ArgumentParser.args.cleanup_volumes:\n try:\n self.age = parse_delta(ArgumentParser.args.cleanup_volumes_age)\n log.debug(f\"Volume Cleanup Plugin Age {self.age}\")\n add_event_listener(EventType.SHUTDOWN, self.shutdown)\n add_event_listener(\n EventType.CLEANUP_PLAN, self.volumes_cleanup, blocking=True\n )\n except ValueError:\n log.exception(\n f\"Error while parsing Volume Cleanup Age {ArgumentParser.args.volclean_age}\"\n )\n else:\n self.exit.set()\n\n def __del__(self):\n remove_event_listener(EventType.CLEANUP_PLAN, self.volumes_cleanup)\n remove_event_listener(EventType.SHUTDOWN, self.shutdown)\n\n def go(self):\n self.exit.wait()\n\n def volumes_cleanup(self, event: Event):\n graph = event.data\n log.info(\"Volume Cleanup called\")\n with graph.lock.read_access:\n for node in graph.nodes:\n if (\n isinstance(node, BaseVolume)\n and node.volume_status == \"available\"\n and node.age > self.age\n and node.last_access > self.age\n and node.last_update > self.age\n ):\n cloud = node.cloud(graph)\n account = node.account(graph)\n region = node.region(graph)\n log.debug(\n (\n f\"Found available volume {node.dname} in cloud {cloud.name} account {account.dname} \"\n f\"region {region.name} with age {node.age}. Last update was {node.last_update} ago \"\n f\"and last access {node.last_access} ago both of which is longer than {self.age} \"\n f\"- setting to be cleaned\"\n )\n )\n node.clean = True\n\n @staticmethod\n def add_args(arg_parser: ArgumentParser) -> None:\n arg_parser.add_argument(\n \"--cleanup-volumes\",\n help=\"Cleanup unused Volumes (default: False)\",\n dest=\"cleanup_volumes\",\n action=\"store_true\",\n default=False,\n )\n arg_parser.add_argument(\n \"--cleanup-volumes-age\",\n help=\"Cleanup unused Volumes Age (default: 14 days)\",\n default=\"14 days\",\n dest=\"cleanup_volumes_age\",\n )\n\n def shutdown(self, event: Event):\n log.debug(\n f\"Received event {event.event_type} - shutting down Volume Cleanup plugin\"\n )\n self.exit.set()\n","sub_path":"plugins/cleanup_volumes/cloudkeeper_plugin_cleanup_volumes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"634283758","text":"import io\nfrom PIL import Image\nfrom django.template.context_processors import media\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom model_bakery import baker\nfrom posts.models import Post, Comment\nfrom users.models import User\n\n\nclass PostTestCase(APITestCase):\n PHOTO_FILE_EXTENSION = 'png'\n\n def photo_file(self):\n file = io.BytesIO()\n image = Image.new('RGBA', size=(1, 1), color=(0, 0, 0))\n image.save(file, 'png')\n file.name = f'test_test.{self.PHOTO_FILE_EXTENSION}'\n file.seek(0)\n return file\n\n def setUp(self) -> None:\n self.user = User(username='testUser', password='1111')\n self.user.set_password(self.user.password)\n self.user.save()\n\n self.data = {\n 'image': self.photo_file(),\n 'text': 'hello mina',\n }\n\n self.multi_data = {\n 'image': [self.photo_file(), self.photo_file()],\n 'text': 'hello mina',\n }\n\n self.posts = baker.make(Post, _quantity=3)\n\n def test_post_create(self):\n self.client.force_authenticate(user=self.user)\n response = self.client.post('/posts/', self.data, format='multipart')\n print('rrrr:', response)\n\n # self.assertEqual(response.data['image'], self.data['image'])\n self.assertTrue(response.data['image'].startswith('http') and\n response.data['image'].endswith(self.PHOTO_FILE_EXTENSION))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_post_list(self):\n user = self.user\n self.client.force_authenticate(user=user)\n response = self.client.get('/posts/')\n print('list:', response)\n\n for response_data, posts_data in zip(response.data, self.posts):\n self.assertEqual(response_data['text'], posts_data.text)\n # self.assertEqual(response_data['image'], posts_data.image)\n\n def test_post_retrieve(self):\n user = self.user\n self.client.force_authenticate(user=user)\n make_post = self.posts[0]\n response = self.client.get(f'/posts/{make_post.pk}/')\n print('eee;', response)\n\n # self.assertEqual(make_post.image, response.data['image'])\n self.assertEqual(make_post.text, response.data['text'])\n\n def test_post_update(self):\n user = self.user\n self.client.force_authenticate(user=user)\n make_post = self.posts[0]\n data = {\n 'text': 'kkkkkkkk'\n }\n response = self.client.patch(f'/posts/{make_post.pk}/', data=data)\n self.assertEqual(response.data['text'], data['text'])\n\n def test_post_destroy(self):\n user = self.user\n self.client.force_authenticate(user=user)\n make_post = self.posts[0]\n\n response = self.client.delete(f'/posts/{make_post.pk}/')\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertFalse(response.data)\n pk = Post.objects.count()\n print('pkpkpk:', pk)\n self.assertEqual(pk, 2)\n\n def test_like(self):\n user = self.user\n self.client.force_authenticate(user=user)\n make_post = self.posts[0]\n response = self.client.post(f'/posts/{make_post.pk}/like_toggle/')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = self.client.post(f'/posts/{make_post.pk}/like_toggle/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\nclass CommentTestCode(APITestCase):\n def setUp(self) -> None:\n self.user = User(username='mina', password='1234')\n self.user.set_password(self.user.password)\n self.user.save()\n\n self.posts = baker.make(Post, _quantity=3)\n self.post = Post(image='kekeke', text='222222', owner=self.user)\n self.post.save()\n\n self.comments = baker.make(Comment, _quantity=3)\n self.comment = Comment(text='7777', post=self.post, author=self.user)\n self.comment.save()\n\n self.queryset = Comment.objects.all()\n\n def test_comment_crate(self):\n user = self.user\n self.client.force_authenticate(user=user)\n post = self.post\n data = {\n\n \"text\": \"tttttttt\"\n }\n response = self.client.post(f'/posts/{post.pk}/comments/', data=data)\n\n print('tttttttt', response.data)\n\n # response = self.client.post(f'/comments/{self.comment.pk}/reply/', data=data)\n\n self.assertEqual(response.data['text'], data['text'])\n\n def test_comment_list(self):\n user = self.user\n self.client.force_authenticate(user=user)\n\n response = self.client.get(f'/posts/{self.post.pk}/comments/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for a, b in zip(response.data, self.queryset):\n self.assertEqual(a['text'], b.text)\n self.assertEqual(a['author'], b.author_id)\n\n def test_comment_destroy(self):\n user = self.user\n self.client.force_authenticate(user=user)\n\n response = self.client.delete(f'/posts/{self.post.pk}/comments/{self.comment.pk}/')\n print('ㅇㅇㅇㅇㅇㅇㅇ', response)\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertFalse(response.data)\n\n\nclass ReplyTestCode(APITestCase):\n\n def setUp(self):\n self.user = User(username='mina', password='1234',)\n self.user.set_password(self.user.password)\n self.user.save()\n\n self.post = Post(image='kekeke', text='222222', owner=self.user)\n self.post.save()\n\n self.comment = Comment(text='7777', post=self.post, author=self.user)\n self.comment.save()\n\n def test_reply_create(self):\n self.client.force_authenticate(user=self.user)\n data = {\n # 'parent': self.comment.pk,\n # 'author': self.user.author_id,\n 'text': 'RRRRRRRR',\n }\n\n response = self.client.post(f'/comments/{self.comment.pk}/reply/', data=data)\n","sub_path":"minastagram/posts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"519023494","text":"import pickle\nimport time\nimport gc\nimport hashlib\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport xgboost as xgb\nimport lightgbm as lgb\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\n\ndef evalue_RMSLE_score(y_true, y_pred):\n return np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2)))\n\n# self-defined eval metric\n# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool\n# Root Mean Squared Logarithmic Error (RMSLE)\ndef rmsle(y_true, y_pred):\n return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False\n\ndef convert_2_md5(value):\n return hashlib.md5(str(value).encode('utf-8')).hexdigest()\n\ndef write_to_log(*param):\n param_list = [str(s) for s in param]\n log = ' '.join(param_list)\n with open('./outcome/log_file.txt', 'a') as file:\n file.write(log+'\\n')\n file.flush() #立即写入磁盘\n os.fsync(file) #立即写入磁盘\n \n\nprint('santander_value_model prog starting...')\n\nstart_t = time.time()\ntrain_df = pd.read_csv('C:/D_Disk/data_competition/Santander_Value_Prediction/data/train.csv', \n index_col=0, header=0)\ntest_df = pd.read_csv('C:/D_Disk/data_competition/Santander_Value_Prediction/data/test.csv',\n index_col=0, header=0)\nprint('train_df.shape is {}, test_df.shape is {} load data cost time:{}'.format(\n train_df.shape, test_df.shape, time.time()-start_t))\n\ntrain_y = train_df['target']\ntrain_X = train_df.drop(['target'], axis=1)\nmerged_X = train_X.append(test_df)\n\n#merged_X = (merged_X - merged_X.min()) / (merged_X.max() - merged_X.min())\n#merged_X = (merged_X - merged_X.mean()) / (merged_X.std())\n\ntrain_X = merged_X.iloc[:len(train_y)]\ntest_X = merged_X.iloc[len(train_y):]\n\nX_train_new, X_val_new, y_train_new, y_val_new = train_test_split(train_X, \n train_y, test_size=0.2, random_state=42)\nprint('X_train_new.shape is {}, X_val_new.shape is {}'.format(X_train_new.shape,\n X_val_new.shape))\n\ndef test_param(lgbm_param):\n start_t = time.time()\n \n lgbm = lgb.LGBMRegressor(**lgbm_param)\n lgbm.fit(X_train_new, y_train_new, eval_set=[(X_train_new, y_train_new), \n (X_val_new, y_val_new)], eval_metric=rmsle, \n verbose=2000, early_stopping_rounds=2000)\n \n best_iteration = lgbm.best_iteration_\n print('best_iteration: {} partial fit cost time: {} '.format(\n best_iteration, time.time()-start_t))\n \n #lgbm.fit(train_X, train_y, eval_set=[(X_train_new, y_train_new), \n # (X_val_new, y_val_new)], verbose=2000, early_stopping_rounds=2000)\n #lgbm.fit(X_train_new, y_train_new)\n \n y_predictions_whole = lgbm.predict(train_X)\n RMSLE_score_lgb_whole = round(rmsle(train_y, y_predictions_whole)[1], 5)\n \n y_predictions_train = lgbm.predict(X_train_new)\n RMSLE_score_lgb_train = round(rmsle(y_train_new, y_predictions_train)[1], 5)\n \n y_predictions_val = lgbm.predict(X_val_new)\n RMSLE_score_lgb_val = round(rmsle(y_val_new, y_predictions_val)[1], 5)\n \n len_to_get = int(0.20*len(y_val_new))\n RMSLE_score_lgb_val_20_percent = round(rmsle(y_val_new[:len_to_get],\n y_predictions_val[:len_to_get])[1], 5)\n \n print('partial data whole_score: {} train score: {} test score: {}, '\n 'RMSLE_score_lgb_val_20_percent: {} '.format(\n RMSLE_score_lgb_whole, RMSLE_score_lgb_train, \n RMSLE_score_lgb_val, RMSLE_score_lgb_val_20_percent))\n \n lgbm_param['n_estimators'] = best_iteration\n param_md5_str = convert_2_md5(lgbm_param)\n store_path = 'C:/D_Disk/data_competition/Santander_Value_Prediction/outcome/'\n partial_file_name = '_'.join(['submission_partial', str(RMSLE_score_lgb_val), param_md5_str]) + '.csv'\n full_file_name = '_'.join(['submission_full', str(RMSLE_score_lgb_val), param_md5_str]) + '.csv'\n \n start_t = time.time()\n test_df['target'] = lgbm.predict(test_X)\n test_df['target'].to_csv(store_path+partial_file_name, header=['target'])\n print('get partial predict outcome cost time: ', time.time()-start_t)\n \n start_t = time.time()\n lgbm = lgb.LGBMRegressor(**lgbm_param)\n lgbm.fit(train_X, train_y)\n print('full fit cost time: ', time.time()-start_t)\n \n start_t = time.time()\n test_df['target'] = lgbm.predict(test_X)\n test_df['target'].to_csv(store_path+full_file_name, header=['target'])\n print('get full predict outcome cost time: ', time.time()-start_t)\n \n write_to_log('-'*25, ' md5 value: ', param_md5_str, '-'*25)\n write_to_log('param: ', lgbm_param)\n write_to_log('best_iteration: ', best_iteration)\n write_to_log('valid rmsle: ', RMSLE_score_lgb_val)\n write_to_log('-'*80+'\\n')\n\n\n#lgbm_param = {'n_estimators':50000, 'n_jobs':-1, 'learning_rate':0.1, \n# 'random_state':42, 'max_depth':20, 'min_child_samples':23,\n# 'num_leaves':91, 'subsample':0.8, 'colsample_bytree':0.5,\n# 'silent':-1, 'verbose':-1}\n\n#test_param(lgbm_param)\n\nlgbm_param = {'n_estimators':90000, 'n_jobs':-1, 'learning_rate':0.15, \n 'random_state':42, 'max_depth':20, 'min_child_samples':17,\n 'num_leaves':131, 'subsample':0.8, 'colsample_bytree':0.5,\n 'silent':-1, 'verbose':-1}\n\ntest_param(lgbm_param)\n\n#lgbm = lgb.LGBMRegressor(n_estimators=50000, n_jobs=-1, learning_rate=0.1, \n# random_state=42, max_depth=20, min_child_samples=23,\n# num_leaves=91, subsample=0.8, colsample_bytree=0.5,\n# silent=-1, verbose=-1)\n\n\n","sub_path":"santander_value_prediction/santander_value_model.py","file_name":"santander_value_model.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"93474799","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /Users/jorgeramos/Projects/uphold/aiopype/tests/sources/test_websocket.py\n# Compiled at: 2016-07-05 10:43:47\n# Size of source mod 2**32: 3822 bytes\n__doc__ = '\\nTest websocket source.\\n'\nimport asyncio\nfrom unittest import mock\nfrom unittest import TestCase\nfrom aiopype import SyncProtocol\nfrom aiopype.sources import WebsocketSource\n\nclass TestWebsocketSource(TestCase):\n\n @mock.patch('aiopype.sources.WebsocketSource.on_connect')\n @mock.patch('aiopype.sources.WebsocketSource.on_lost_connection')\n def test_init(self, lost_connection_mock, connected_mock):\n source = WebsocketSource('test', SyncProtocol())\n done_future = asyncio.Future()\n done_future.set_result(None)\n connected_mock.return_value = done_future\n lost_connection_mock.return_value = done_future\n\n async def test_events():\n await source.emit_async('connected')\n await source.emit_async('disconnected')\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(test_events())\n connected_mock.assert_called_with()\n lost_connection_mock.assert_called_with()\n self.assertEqual(source.done, None)\n self.assertEqual(source.failures, 0)\n self.assertEqual(source.max_failures, 10)\n self.assertEqual(source.heartbeat_timeout, 30)\n\n def test_lost_connection(self):\n source = WebsocketSource('test', SyncProtocol())\n source.max_failures = 0\n loop = asyncio.get_event_loop()\n loop.run_until_complete(source.on_lost_connection())\n self.assertFalse(source.running)\n\n def test_start(self):\n source = WebsocketSource('test', SyncProtocol())\n source.connect = mock.Mock(return_value='test')\n watch_future = asyncio.Future()\n watch_future.set_result(None)\n source.watchdog = mock.Mock(return_value=watch_future)\n WebsocketSource.running = mock.PropertyMock(side_effect=[True, True, False])\n loop = asyncio.get_event_loop()\n exception = None\n try:\n loop.run_until_complete(source.start())\n except Exception as error:\n exception = error\n\n self.assertEqual(str(exception), 'Disconnected too many times, stopped')\n self.assertTrue(source.done)\n source.watchdog.assert_called_with('test')\n WebsocketSource.running = True\n\n def test_parse(self):\n parsed = WebsocketSource.parse('[1, 2, 3]')\n self.assertEqual(parsed, [1, 2, 3])\n\n\nclass TestWebsocketSourceWatchdog(TestCase):\n\n def setUp(self):\n self.source = WebsocketSource('test', SyncProtocol())\n self.loop = asyncio.get_event_loop()\n done_future = asyncio.Future()\n done_future.set_result(None)\n self.source.emit_async = mock.Mock(return_value=done_future)\n\n async def sleeper(self, time):\n await asyncio.sleep(time)\n\n def test_finish_before_websocket(self):\n self.loop.run_until_complete(self.source.watchdog(self.sleeper(1)))\n self.source.emit_async.assert_called_with('disconnected', 'Service failed')\n\n def test_timeout_before_websocket(self):\n self.source.heartbeat_timeout = 0.01\n self.loop.run_until_complete(self.source.watchdog(self.sleeper(1)))\n self.source.emit_async.assert_called_with('disconnected', 'Unable to connect')\n\n def test_coroutine_done(self):\n self.source.websocket = mock.Mock()\n pong_future = asyncio.Future()\n pong_future.set_result(asyncio.ensure_future(self.sleeper(1)))\n self.source.websocket.ping = mock.Mock(return_value=pong_future)\n self.loop.run_until_complete(self.source.watchdog(self.sleeper(1)))\n self.source.emit_async.assert_called_with('disconnected', 'Service failed')\n\n def test_lost_heartbeat(self):\n self.source.websocket = mock.Mock()\n self.source.heartbeat_timeout = 0.01\n pong_future = asyncio.Future()\n pong_future.set_result(asyncio.ensure_future(self.sleeper(10)))\n self.source.websocket.ping = mock.Mock(return_value=pong_future)\n self.loop.run_until_complete(self.source.watchdog(self.sleeper(1)))\n self.source.emit_async.assert_called_with('disconnected', 'Lost heartbeat')","sub_path":"pycfiles/aiopyrestful-1.4.1-py3-none-any/test_websocket.cpython-35.py","file_name":"test_websocket.cpython-35.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"336924","text":"import psycopg2\nimport psycopg2.extras\n\nimport glob\nfrom os.path import join\nimport os\n\nfrom flask import current_app, g\nimport click\nfrom flask.cli import with_appcontext\n\ndef _get_faces():\n path = os.environ[\"FOLDER_IMGS\"]\n faces = []\n for f in glob.glob(join(path, \"*/*.jpg\"), recursive=True):\n faces.append((f, 'unlabeled', False, False))\n return faces\n\ndef init_app(app):\n app.teardown_appcontext(close_db)\n app.cli.add_command(init_db_command)\n\ndef get_db():\n if 'db' not in g:\n g.db = psycopg2.connect(\n database = os.environ[\"POSTGRES_DB\"],\n user = os.environ[\"POSTGRES_USER\"],\n password = os.environ[\"POSTGRES_PW\"],\n host = os.environ[\"POSTGRES_HOST\"],\n )\n return g.db\n\ndef close_db(exception=None):\n db = g.pop('db', None)\n if db is not None:\n db.close()\n\ndef init_db():\n sql = \"INSERT INTO dataset (ref, label, busy, marked) VALUES %s\"\n db = get_db()\n cursor = db.cursor()\n with current_app.open_resource('schema.sql') as f:\n faces = _get_faces()\n # cria a tabela no banco de dados\n cursor.execute(f.read().decode('utf8'))\n # insere dados no banco de dados\n psycopg2.extras.execute_values(cursor, sql, faces, template=None)\n db.commit()\n cursor.close()\n\n@click.command('init-db')\n@with_appcontext\ndef init_db_command():\n \"\"\"Apaga os dados existentes e cria uma nova tabela.\"\"\"\n init_db()\n click.echo('Initialized the database.')\n","sub_path":"tool/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"299238903","text":"#Given a matrix mat where every row is sorted in strictly increasing order, return the smallest common element in all rows.\n\n#If there is no common element, return -1.\n\n\n\n#Example 1:\n\n#Input: mat = [[1,2,3,4,5],[2,4,5,8,10],[3,5,7,9,11],[1,3,5,7,9]]\n#Output: 5\n\n\n#Constraints:\n\n#1 <= mat.length, mat[i].length <= 500\n#1 <= mat[i][j] <= 10^4\n#mat[i] is sorted in strictly increasing order.\n\nfrom typing import List\nclass Solution:\n def smallestCommonElement(self, mat: List[List[int]]) -> int:\n if not mat or len(mat) == 0:\n return 0\n tmp = {}\n m = len(mat)\n n = len(mat[0])\n for i in range(m):\n for j in range(n):\n if mat[i][j] in tmp:\n tmp[mat[i][j]] += 1\n else:\n tmp[mat[i][j]] = 1\n res = []\n for k, v in tmp.items():\n if v == m:\n res.append(k)\n\n if len(res) == 0:\n return -1\n res = sorted(res)\n return res[0]","sub_path":"python_code/1198_Find_Smallest_Common_Element_in_All_Rows.py","file_name":"1198_Find_Smallest_Common_Element_in_All_Rows.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"440777891","text":"from datetime import timedelta, datetime\nimport base64\nfrom xlrd import open_workbook\nfrom dateutil.relativedelta import relativedelta\nfrom odoo.exceptions import UserError\nfrom odoo.exceptions import ValidationError\nfrom odoo import api, fields, models, _\n\nclass crm_leads(models.Model):\n _inherit = \"crm.lead\"\n\n planned_revenue = fields.Float('Expected Premium in Company Currency', track_visibility='always')\n c_type = fields.Many2one('res.currency', string='Expected Premium in Currency')\n ammount = fields.Float(string='Ammount')\n # user_id = fields.Many2one('res.users', string='Lead Operator', index=True, track_visibility='onchange',\n # default=lambda self: self.env.user )\n create_uid = fields.Many2one('res.users', string='Lead Generator')\n policy_number = fields.Char( string='Policy Number')\n\n insurance_type = fields.Selection([('Life', 'Life'),\n ('P&C', 'P&C'),\n ('Health', 'Health'), ],\n 'Insurance Type', track_visibility='onchange',required=True)\n ins_type = fields.Selection([('Individual', 'Individual'),\n ('Group', 'Group'),],\n 'insured type', track_visibility='onchange')\n risks_method = fields.Selection([('count', 'Count'),\n ('members', 'Members'), ],\n 'Risk Method', track_visibility='onchange', copy=True)\n duration_no = fields.Integer('Policy Duration Number')\n duration_type =fields.Selection([('day', 'Day'),\n ('month', 'Month'),\n ('year', 'Year'),],\n 'Policy Duration Type',track_visibility='onchange')\n term=fields.Char(string='Term',compute='_compute_term',force_save=True)\n\n validate_basic_mark_opp = fields.Boolean(copy=False, default=True)\n validate_risk_mark_opp = fields.Boolean(copy=False)\n validate_prop = fields.Boolean(copy=False)\n validate_prop_line = fields.Boolean(copy=False)\n validate_underwr=fields.Boolean(copy=False)\n validate_contact = fields.Boolean(copy=False)\n\n @api.multi\n def validate_basic_opp(self):\n self.validate_basic_mark_opp = True\n self.validate_risk_mark_opp = False\n self.validate_prop = False\n self.validate_prop_line = False\n self.validate_underwr = False\n self.validate_contact = False\n print(self.validate_basic_mark_opp)\n\n return True\n\n @api.multi\n def validate_risk_opp(self):\n if self.LOB:\n self.validate_basic_mark_opp = False\n self.validate_risk_mark_opp = True\n self.validate_prop = False\n self.validate_prop_line = False\n self.validate_underwr = False\n self.validate_contact = False\n print(self.validate_basic_mark_opp)\n return True\n\n @api.multi\n def validate_proposal(self):\n if self.objectrisks:\n self.validate_basic_mark_opp = False\n self.validate_risk_mark_opp = False\n self.validate_prop = True\n self.validate_prop_line = False\n self.validate_underwr = False\n self.validate_contact = False\n return True\n @api.multi\n def validate_proposal_line(self):\n if self.objectrisks and self.proposal_opp:\n self.validate_basic_mark_opp = False\n self.validate_risk_mark_opp = False\n self.validate_prop = False\n self.validate_prop_line = True\n self.validate_underwr = False\n self.validate_contact = False\n return True\n\n @api.multi\n def validate_underwritting(self):\n self.validate_basic_mark_opp = False\n self.validate_risk_mark_opp = False\n self.validate_prop = False\n self.validate_prop_line = False\n self.validate_underwr=True\n self.validate_contact = False\n\n\n @api.multi\n def validate_continfo(self):\n self.validate_basic_mark_opp = False\n self.validate_risk_mark_opp = False\n self.validate_prop = False\n self.validate_prop_line = False\n self.validate_underwr = False\n self.validate_contact = True\n\n\n @api.one\n def _compute_term(self):\n if self.duration_no and self.duration_type:\n self.term = str(self.duration_no) + '-' + str(self.duration_type)\n\n LOB = fields.Many2one('insurance.line.business', string='Line of business', domain=\"[('insurance_type','=',insurance_type)]\",required=True)\n\n oppor_type = fields.Char(\n string='Opportunity type',\n compute='_changeopp',\n store=False,\n compute_sudo=True,\n )\n\n\n\n\n\n\n\n #pol=fields.Many2one(related='Policy_type.insured_type' , string='insured type')\n test=fields.Char('')\n group=fields.Boolean('Groups')\n individual = fields.Boolean('Item by Item')\n test1=fields.Boolean(readonly=True)\n\n objectrisks = fields.One2many('policy.risk', 'risks_crm', string='car',copy=True) # where you are using this fiedl ? in xml\n\n # objectgroup = fields.One2many('group.group.opp', 'object_group_crm', string='Group')\n\n proposal_opp = fields.One2many('proposal.opp.bb', 'proposal_crm', string='proposla')\n\n coverage_line = fields.One2many('coverage.line', 'covers_crm', 'Coverage lines')\n\n selected_proposal = fields.One2many('proposal.opp.bb', 'select_crm', compute='proposalselected')\n prop_id = fields.Integer('', readonly=True)\n my_notes = fields.Text('Under writting')\n\n # covers=fields.One2many(related='selected_proposal.proposals_covers')\n\n # policy_opp=fields.Many2one('policy.broker')\n selected_coverage = fields.Many2one('proposal.opp.bb', domain=\"[('id','in',proposal_opp)]\",string='Final Proposal')\n set_covers = fields.Boolean('')\n test_computed = fields.Char('', compute='testcom')\n @api.depends('ins_type')\n def testcom(self):\n self.test_computed='Islam'\n\n\n\n\n\n\n\n\n def proposalselected(self):\n print('5555555')\n ids = self.env['proposal.opp.bb'].search([('id', '=',self.prop_id)]).ids\n self.selected_proposal = [(6, 0, ids)]\n\n @api.multi\n def covers_button(self):\n self.set_covers=True\n # self.coverage_line.covers_crm=self.id\n return True\n # form_view = self.env.ref('insurance_broker_system_blackbelts.Risks_form')\n\n # return {\n # 'name': ('Risk Details'),\n # 'view_type': 'form',\n # 'view_mode': 'form',\n # 'views': [(form_view.id, 'form')],\n # 'res_model': 'risks.opp',\n # 'target': 'inline',\n # 'type': 'ir.actions.act_window',\n # 'context': {'default_risks_crm': self.id},\n # 'flags': {'form': {'action_buttons': True}}\n\n\n @api.multi\n def proposal_button(self):\n form_view = self.env.ref('insurance_broker_system_blackbelts.form_proposal_opp')\n\n return {\n 'name': ('Proposals'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'views': [(form_view.id, 'form')],\n 'res_model': 'proposal.opp.bb',\n 'target': 'current',\n 'type': 'ir.actions.act_window',\n 'context': {'default_proposal_crm':self.id},\n }\n\n\n # objectcar_selected = fields.Many2one('car.object', string='car')\n # objectperson_selected = fields.Many2one('person.object', string='Person')\n # objectcargo_selected = fields.Many2one('cargo.object', string='cargo')\n # objectgroup_selected = fields.Many2one('group.group', string='Group')\n\n\n\n\n\n # @api.onchange('user_id')\n # def get_car_proposal_crm(self):\n # for lead in self:\n # proposal_ids = []\n # for car in self.objectcar:\n # if car.btn1:\n # proposal_ids = proposal_ids+car.proposal_car.ids\n # lead.prop_car = [(6,0, proposal_ids)]\n\n\n\n # @api.multi\n # def button_action(self):\n # return {\n # 'type': 'ir.actions.act_url',\n # 'url': 'http://167.99.243.240/moodle/login/index.php?username=%{0}&password=Admin%40123&Submit=Login' .format(self.env.user.name),\n # 'target': 'self',\n # 'res_id': self.id,\n # }\n\n\n\n #prop_car=fields.One2many(related='objectcar')\n # prop_person = fields.One2many(related='objectperson_selected.proposal_person')\n # prop_cargo = fields.One2many(related='objectcargo_selected.proposal_cargo')\n # prop_group = fields.One2many(related='objectgroup_selected.proposal_group')\n\n @api.multi\n def create_policy(self):\n form_view = self.env.ref('smart_policy.policy_form_view')\n\n recordrisks = self.env['policy.risk'].search([('id', 'in', self.objectrisks.ids)])\n records_risks = []\n for rec in recordrisks:\n records_risks.append(rec.id)\n print(records_risks)\n\n recordproposal = self.env['proposal.opp.bb'].search([('id', '=', self.selected_coverage.id)])\n print(recordproposal.id)\n recordcovers = self.env['coverage.line'].search([('proposal_id', '=', recordproposal.id)])\n\n records_covers = []\n for rec in recordcovers:\n coversline = (\n 0, 0,\n {'riskk': rec.risk_id_cover.id ,'insurerd': rec.insurer.id,\n 'prod_product': rec.product.id, 'name1': rec.covers.id, 'sum_insure': rec.sum_insured,\n 'deductible' : rec.deductible, 'limitone' :rec.limitone ,'limittotal': rec.limittotal ,\n 'net_perimum': rec.net_premium, 'rate': rec.rate})\n print(coversline)\n records_covers.append(coversline)\n print(records_covers)\n\n if self.policy_number and self.selected_coverage:\n return {\n 'name': ('Policy'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'views': [(form_view.id, 'form')],\n 'res_model': 'policy.broker',\n 'target': 'current',\n 'type': 'ir.actions.act_window',\n 'context': {\n 'default_std_id': self.policy_number,\n 'default_company': self.selected_coverage.Company.id,\n 'default_ins_type': self.ins_type,\n 'default_line_of_bussines': self.LOB.id,\n 'default_product_policy': self.selected_coverage.product_pol.id,\n 'default_insurance_type': self.insurance_type,\n 'default_customer': self.partner_id.id,\n 'default_salesperson': self.user_id.partner_id.id,\n 'default_new_risk_ids': [(6, 0, records_risks)],\n 'default_name_cover_rel_ids': records_covers,\n\n },\n }\n else:\n raise ValidationError(\n ('You Must Enter the Policy Number '\n 'OR select final proposal .'))\n\n # , 'default_objectperson':records_person ,'default_objectcar':records_car},\n\n # # tree and form view id here.\n # proposal_car_tree form_proposal\n # view = self.env.ref('crm__black_belts.proposal_car_tree')\n # form_view = self.env.ref('insurance_broker_blackbelts.my_view_for_policy_form_kmlo1')\n # print(self.objectperson.ids)\n #\n # #self.policy_opp.test=self.test\n # return {\n # 'name': 'Policy',\n # 'type': 'ir.actions.act_window',\n # 'view_type': 'form',\n # 'view_mode': ' form',\n # 'views': [(form_view.id, 'form')],\n # 'res_model': 'policy.broker',\n # 'target': 'current',\n # 'context': {'default_policy_opp':self,'default_insurance_type': self.insurance_type, 'default_line_of_bussines': self.LOB.id,\n # 'default_ins_type': self.ins_type\n # , 'default_objectvehicle': self.objectcar.ids, 'default_objectperson': self.objectperson.ids},\n #\n #\n #\n #\n # }\n\n\n\n\n\n @api.onchange('LOB','ins_type')\n def _compute_comment(self):\n if self.ins_type =='Group':\n self.test = 'Group'\n else:\n self.test = self.LOB.object\n\n\n\n\n\n #@api.onchange('user_id')\n #def onchange_user_id(self):\n # if self.user_id and self.env.uid != 1 :\n # return {'domain':{'user_id': [('id','in',[self.env.uid,1])]}}\n\n\n @api.onchange('user_id', 'create_uid')\n def _changeopp(self):\n for record in self:\n if record.create_uid:\n if record.create_uid == record.user_id:\n record['oppor_type'] = 'Own'\n\n else:\n record['oppor_type'] = 'Network'\n else :\n record.create_uid=self.env.uid\n\n @api.multi\n def print_opp(self):\n return self.env.ref('Smart_crm.crm_report').report_action(self)\n\n @api.multi\n def send_mail_template(self):\n # Find the e-mail template\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n template_id = self.env.ref('Smart_crm.opp_email_template')\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n ctx = {\n 'default_model': 'crm.lead',\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id.id),\n 'default_template_id': template_id.id,\n 'default_composition_mode': 'comment',\n 'mark_so_as_sent': True,\n # 'custom_layout': \"sale.mail_template_data_notification_email_sale_order\",\n 'proforma': self.env.context.get('proforma', False),\n 'force_email': True\n }\n\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n # You can also find the e-mail template like this:\n # template = self.env['ir.model.data'].get_object('mail_template_demo', 'example_email_template')\n\n # Send out the e-mail template to the user\n template_id.send_mail(self.ids[0], force_send=True)\n # self.env['mail.template'].browse(template.id).send_mail(self.id)\n\n\n\n\n\n @api.onchange('ammount', 'c_type')\n def _change(self):\n if self.c_type.id:\n self.planned_revenue = self.ammount / self.c_type.rate\n print(self.c_type.rate)\n\n attachment =fields.Binary(string='Excel File')\n group_ids = fields.One2many('risk.groups','crm_id',string='Group Members')\n\n @api.multi\n def import_excel(self):\n if self.attachment:\n wb = open_workbook(file_contents=base64.decodestring(self.attachment))\n values = []\n for s in wb.sheets():\n for row in range(1, s.nrows):\n col_value = []\n for col in range(s.ncols):\n value = (s.cell(row, col).value)\n try:\n value = str(int(value))\n except:\n pass\n col_value.append(value)\n values.append(col_value)\n return values\n else:\n raise ValidationError('please import your Excel Sheet !')\n\n\n @api.multi\n def create_risk(self):\n dict = {}\n for elem in self.import_excel():\n if elem[5] not in dict:\n dict[elem[5]] = []\n dict[elem[5]].append(elem[0:])\n\n for key,value in dict.items():\n self.update({\n 'objectrisks': [(0, 0,{'group_category': key,\n 'group_count':len(value)})],})\n\n for item in value:\n self.update({\n 'group_ids': [(0, 0, {'member_id': item[0],\n 'name':item[1],\n 'member_payed':item[2],\n 'sum_insured':item[3],\n 'group_name':item[5],})],})\n\n\n\n\nclass crm_leads_currency(models.Model):\n _inherit = 'res.currency'\n # currency_type=fields.One2many('crm.lead','currency_type ',string='currency')\n c = fields.One2many('crm.lead', 'c_type', string='currency')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Smart_crm/models/qoutation.py","file_name":"qoutation.py","file_ext":"py","file_size_in_byte":16869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"625062584","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport datetime\nimport time\nimport re\nimport glob\nimport subprocess\nimport netCDF4 as nc\nimport numpy as np\nimport pcraster as pcr\nimport VirtualOS as vos\n\n# declare a global variable to hold valid names of time dimension\nvalid_time_dimnames = ['time']\n\nclass OutputNetCDF(object):\n \n def __init__(self, netcdf_attr, model_dimensions, variable_list):\n self.model_dimensions = model_dimensions\n self.variable_list = variable_list\n self.set_netcdf_y_orientation(netcdf_attr)\n self.set_general_netcdf_attributes(netcdf_attr)\n self.set_netcdf_format_options(netcdf_attr) \n\n def set_netcdf_format_options(self, netcdf_attr):\n self.format = 'NETCDF3_CLASSIC'\n self.zlib = False\n if 'formatNetCDF' in netcdf_attr.keys():\n self.format = str(netcdf_attr['formatNetCDF'])\n if 'zlib' in netcdf_attr.keys():\n if netcdf_attr['zlib'] == \"True\":\n self.zlib = True\n \n def set_netcdf_y_orientation(self, netcdf_attr): \n self.netcdf_y_orientation_follow_cf_convention = False\n if 'netcdf_y_orientation_follow_cf_convention' in netcdf_attr.keys():\n if netcdf_attr['netcdf_y_orientation_follow_cf_convention'] == \"True\":\n # msg = \"Latitude (y) orientation for output netcdf files start from the bottom to top.\"\n self.netcdf_y_orientation_follow_cf_convention = True \n\n def set_general_netcdf_attributes(self, netcdf_attr):\n \"\"\"Function to set general netCDF attributes\"\"\" \n self.attributeDictionary = {}\n self.attributeDictionary['institution'] = netcdf_attr['institution']\n self.attributeDictionary['title' ] = netcdf_attr['title' ]\n self.attributeDictionary['description'] = netcdf_attr['description']\n \n def add_dimension_time(self, netcdf, dimname, dimvar):\n \"\"\"Function to add a time dimension to a netCDF file\"\"\"\n shortname = self.variable_list.netcdf_short_name[dimname]\n try:\n datatype = self.variable_list.netcdf_datatype[dimname]\n except:\n datatype = 'f4'\n dimensions = self.variable_list.netcdf_dimensions[dimname]\n netcdf.createDimension(shortname, None)\n var = netcdf.createVariable(\n shortname,\n datatype,\n dimensions,\n zlib=self.zlib) \n var.standard_name = self.variable_list.netcdf_standard_name[dimname]\n var.long_name = self.variable_list.netcdf_long_name[dimname]\n var.units = self.variable_list.netcdf_unit[dimname]\n var.calendar = self.variable_list.netcdf_calendar[dimname]\n\n def add_dimension_not_time(self, netcdf, dimname, dimvar):\n \"\"\"Function to add a dimension (not time) to a netCDF \n file\n \"\"\"\n ndim = len(dimvar)\n shortname = self.variable_list.netcdf_short_name[dimname]\n try:\n datatype = self.variable_list.netcdf_datatype[dimname]\n except:\n datatype = 'f4'\n dimensions = self.variable_list.netcdf_dimensions[dimname]\n standard_name = self.variable_list.netcdf_standard_name[dimname]\n if standard_name in ['latitude','longitude']:\n keyword_args = {'zlib' : True, 'least_significant_digit' : 16}\n else:\n keyword_args = {'zlib' : self.zlib}\n\n # if dimension is latitude, check whether the netCDF\n # should follow the CF convention and order latitudes\n # from high -> low (e.g. 60N -> 60S). If not, reverse\n # dimension variable because the model stores the\n # latitude dimension from high -> low.\n if standard_name in ['latitude']:\n if not self.netcdf_y_orientation_follow_cf_convention:\n if (dimvar[0] - dimvar[1]) > 0:\n dimvar = dimvar[::-1]\n\n # create the dimension\n netcdf.createDimension(shortname, ndim)\n\n # create the variable to store dimension values\n var = netcdf.createVariable(\n shortname,\n datatype,\n dimensions,\n **keyword_args)\n var.standard_name = self.variable_list.netcdf_standard_name[dimname]\n var.long_name = self.variable_list.netcdf_long_name[dimname]\n var.units = self.variable_list.netcdf_unit[dimname]\n var[:] = np.array(dimvar)\n\n def add_dimension(self, netcdf, dimname, dimvar):\n isTimeDim = dimname in ['time']\n if isTimeDim:\n self.add_dimension_time(netcdf, dimname, None)\n else:\n self.add_dimension_not_time(netcdf, dimname, dimvar)\n \n def add_variable(self, netcdf, varname, **kwargs):\n self.repair_variable_dict(varname)\n shortname = self.variable_list.netcdf_short_name[varname]\n try:\n datatype = self.variable_list.netcdf_datatype[varname]\n except:\n datatype = 'f4'\n dimensions = self.variable_list.netcdf_dimensions[varname]\n var = netcdf.createVariable(\n shortname,\n datatype,\n dimensions,\n **kwargs)\n var.standard_name = self.variable_list.netcdf_standard_name[varname]\n var.long_name = self.variable_list.netcdf_long_name[varname]\n var.units = self.variable_list.netcdf_unit[varname] \n\n def repair_variable_dict(self, varname):\n \"\"\"Function to fill in missing values in variable dictionary\"\"\"\n if self.variable_list.netcdf_long_name[varname] is None:\n self.variable_list.netcdf_long_name[varname] = self.variable_list.netcdf_short_name[varname]\n\n def get_variable_dimensions(self, varname):\n \"\"\"Function to get the dimensions of a variable. If \n 'varname' is a list then the function will attempt to \n retrieve all unique dimensions. In either case the\n dimensions will be returned as a tuple.\n \"\"\"\n if isinstance(varname, basestring):\n var_dims = self.variable_list.netcdf_dimensions[varname]\n elif isinstance(varname, list):\n var_dims = []\n for item in varname:\n var_dims += list(self.variable_list.netcdf_dimensions[item])\n var_dims = tuple(set(var_dims)) \n return var_dims\n \n def create_netCDF(self, ncFileName, varname, dimensions=None):\n \"\"\"Function to create netCDF file\"\"\"\n # FIXME: make dimensions a required arg\n netcdf = nc.Dataset(ncFileName, 'w', format=self.format)\n if dimensions is None:\n dimensions = self.get_variable_dimensions(varname)\n for dim in dimensions:\n self.add_dimension(netcdf, dim, self.model_dimensions[dim])\n\n if isinstance(varname, basestring):\n varname = [varname]\n\n for item in varname:\n self.add_variable(netcdf, item, zlib=self.zlib, fill_value=vos.MV)\n \n attributeDictionary = self.attributeDictionary\n for k, v in attributeDictionary.items():\n setattr(netcdf,k,v)\n\n netcdf.sync()\n netcdf.close()\n \n def add_data_to_netcdf(self, ncFileName, varname, varField, timeStamp=None, posCnt=None):\n \"\"\"Function to write data to netCDF. If first opens \n the netCDF file specified by 'ncFileName', identifies \n the variable name and dimensions corresponding to \n 'varname', and adds the data using the appropriate \n method depending on whether the variable is \n time-varying or not.\n \"\"\"\n netcdf = nc.Dataset(ncFileName, 'a')\n short_name = self.variable_list.netcdf_short_name[varname]\n dims = self.variable_list.netcdf_dimensions[varname]\n has_time_dim = any([dim in valid_time_dimnames for dim in dims])\n if has_time_dim:\n self.add_data_to_netcdf_with_time(netcdf, short_name, dims, varField, timeStamp, posCnt)\n else:\n self.add_data_to_netcdf_without_time(netcdf, short_name, dims, varField) \n netcdf.sync()\n netcdf.close()\n \n def add_data_to_netcdf_with_time(self, netcdf, shortVarName, var_dims, varField, timeStamp=None, posCnt=None):\n time_dimname = [dim for dim in var_dims if dim in valid_time_dimnames][0]\n date_time = netcdf.variables[time_dimname]\n if posCnt is None:\n posCnt = len(date_time)\n\n date_time[posCnt] = nc.date2num(\n timeStamp,\n date_time.units,\n date_time.calendar)\n\n # The CF convention is for latitudes to go from high\n # to low (which corresponds with numpy). Hence if the\n # config specifies NOT to follow CF convention then\n # we must flip the latitude dimension of the variable\n # such that latitudes go from low to high.\n if not self.netcdf_y_orientation_follow_cf_convention:\n varField = np.flip(varField, axis=-2)\n \n time_axis = [i for i in range(len(var_dims)) if var_dims[i] == time_dimname][0]\n slc = [slice(None)] * len(var_dims)\n slc[time_axis] = posCnt\n netcdf.variables[shortVarName][slc] = varField\n \n def add_data_to_netcdf_without_time(self, netcdf, shortVarName, var_dims, varField):\n \"\"\"Function to write data to netCDF without a time dimension\"\"\"\n if not self.netcdf_y_orientation_follow_cf_convention:\n varField = np.flip(varField, axis=-2)\n netcdf.variables[shortVarName][:] = varField \n \n def close(self, ncFileName):\n \"\"\"Function to close netCDF file\"\"\"\n rootgrp = nc.Dataset(ncFileName,'w')\n rootgrp.close()\n","sub_path":"water_balance_model/OutputNetCDF.py","file_name":"OutputNetCDF.py","file_ext":"py","file_size_in_byte":9727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"219039240","text":"import pygame\r\n\r\n\r\nclass Button:\r\n def __init__(self, x, y, w, h, text):\r\n self.border = (0, 0, 0)\r\n self.color = (172, 59, 97)\r\n self.active_color = (255, 40, 0)\r\n self.text_color = (255, 255, 255)\r\n self.text = text\r\n self.x = x\r\n self.y = y\r\n self.width = w\r\n self.height = h\r\n self.activated = False\r\n\r\n def activate(self, mouse, shapes, space, button_list):\r\n x = mouse[0]\r\n y = mouse[1]\r\n if self.x < x < self.x + self.width and self.y < y < self.y + self.height and not self.activated:\r\n for button in button_list:\r\n button.activated = False\r\n shapes.point_list.clear()\r\n self.activated = True\r\n elif self.x < x < self.x + self.width and self.y < y < self.y + self.height and self.activated:\r\n shapes.point_list.clear()\r\n self.activated = False\r\n\r\n def draw_button(self, screen, shapes):\r\n if not self.activated:\r\n pygame.draw.rect(screen.screen, self.color, (self.x, self.y, self.width, self.height), 0)\r\n pygame.draw.rect(screen.screen, self.border, (self.x, self.y, self.width, self.height), 3)\r\n button_text = screen.font.render(self.text, True, self.text_color)\r\n text_rect = button_text.get_rect(center=(self.x + (self.width/2), self.y + (self.height/2)))\r\n screen.screen.blit(button_text, text_rect)\r\n else:\r\n pygame.draw.rect(screen.screen, self.active_color, (self.x, self.y, self.width, self.height), 0)\r\n pygame.draw.rect(screen.screen, self.border, (self.x, self.y, self.width, self.height), 3)\r\n button_text = screen.font.render(self.text, True, self.text_color)\r\n text_rect = button_text.get_rect(center=(self.x + (self.width/2), self.y + (self.height/2)))\r\n screen.screen.blit(button_text, text_rect)\r\n","sub_path":"buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"471722006","text":"from PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtMultimedia import *\r\nfrom PyQt5.QtMultimediaWidgets import *\r\n\r\nfrom manager import *\r\nfrom ._thread import *\r\nfrom ._scene import *\r\n\r\nimport imageio\r\nimport sys\r\nfrom os.path import dirname, realpath, join\r\nimport json\r\nimport cv2\r\n\r\n\r\nPROJECT_PATH = realpath(join(dirname(realpath(__file__)), \"../\"))\r\nDATA_PATH = join(PROJECT_PATH, \"../data\")\r\nVIDEO_PATH = join(DATA_PATH, \"golf_swing.mp4\")\r\nICON_PATH =join(PROJECT_PATH, \"icons\")\r\n\r\n\r\nclass E_MainWindow(QMainWindow):\r\n def __init__(self, parent=None):\r\n super(E_MainWindow, self).__init__(parent)\r\n \r\n #Initialize GUI\r\n self.initialize_upper_toolbar()\r\n self.initialize_central_widget()\r\n self.initialize_lower_toolbar()\r\n \r\n #Initialize Data\r\n self.video_data = []\r\n self.set_video_data(VIDEO_PATH)\r\n \r\n #Initialize Manager\r\n E_Manager.add_3d_vertices()\r\n E_Manager.reset_clipping()\r\n E_Manager.redraw()\r\n\r\n\r\n ##Start Server\r\n self.server = E_SocketServer()\r\n self.server.videostreamed.connect(self.on_video_from_client) \r\n self.server.start()\r\n\r\n def initialize_upper_toolbar(self):\r\n #Initialize Toolbar\r\n upper_toolbar = QToolBar()\r\n upper_toolbar.setIconSize(QSize(50,50))\r\n upper_toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)\r\n upper_toolbar.setMovable(False)\r\n self.addToolBar(Qt.TopToolBarArea, upper_toolbar)\r\n\r\n #Import Video\r\n import_action = QAction(QIcon(ICON_PATH + \"/import.png\"), \"Import data\", self)\r\n import_action.triggered.connect(self.on_import_video)\r\n upper_toolbar.addAction(import_action)\r\n\r\n #Add Actions to the toolbar\r\n self.train_action = QAction(QIcon(ICON_PATH + \"/train.png\"),\"Train\", self)\r\n self.train_action.setCheckable(True)\r\n self.train_action.toggled.connect(self.on_training)\r\n upper_toolbar.addAction(self.train_action)\r\n\r\n\r\n\r\n def initialize_central_widget(self): \r\n #Set Central Layout\r\n central_widget = QWidget()\r\n self.central_layout = QHBoxLayout()\r\n central_widget.setLayout(self.central_layout);\r\n\r\n self.setCentralWidget(central_widget)\r\n\r\n #Initialize List Widget\r\n self.tree_widget = QTreeWidget()\r\n self.tree_widget.setAlternatingRowColors(True)\r\n for idx, name in enumerate(E_Manager.m_joint_name):\r\n item = QTreeWidgetItem()\r\n item.setText(0, name)\r\n color = np.asarray(E_Manager.m_3d_colors[idx])*255\r\n item.setBackground(0,QBrush(QColor(color[0], color[1], color[2])))\r\n self.tree_widget.addTopLevelItem( item )\r\n self.central_layout.addWidget(self.tree_widget)\r\n \r\n\r\n #Initialize 2D-video Renderer\r\n self.video_widget, self.video_scene = self.initialize_video_widget()\r\n self.central_layout.addWidget(self.video_widget) \r\n \r\n\r\n\r\n #Initialize VTK Renderer\r\n vtk_widget = E_Manager.add_vtk_renderer()\r\n self.central_layout.addWidget(vtk_widget)\r\n\r\n #initialize temp heatmap widget - temp\r\n self.heatmap_widget, self.heatmap_scene = self.initialize_video_widget()\r\n self.heatmap_widget.setBackgroundBrush(QBrush(QColor(0.0, 1.0, 0.0)))\r\n self.central_layout.addWidget(self.heatmap_widget)\r\n\r\n\r\n self.central_layout.setStretch(0, 1)\r\n self.central_layout.setStretch(1, 8)\r\n self.central_layout.setStretch(2, 8)\r\n self.central_layout.setStretch(3, 8)\r\n\r\n #Initialize Test 2D Renderer\r\n # self.test_widget = \r\n # self.central_layer.addWidget(self.test_widget)\r\n\r\n\r\n\r\n E_Manager.redraw()\r\n\r\n def initialize_lower_toolbar(self): \r\n #Lower Toolbar\r\n lower_toolbar = QToolBar()\r\n lower_toolbar.setIconSize(QSize(50,50))\r\n lower_toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)\r\n lower_toolbar.setMovable(False)\r\n self.addToolBar(Qt.BottomToolBarArea, lower_toolbar)\r\n\r\n #Load Saved Action\r\n self.play_action = QAction(QIcon(ICON_PATH + \"/play.png\"),\"Play\", self) \r\n self.play_action.setCheckable(True)\r\n self.play_action.toggled.connect(self.on_play_action)\r\n lower_toolbar.addAction(self.play_action)\r\n\r\n\r\n #Slider\r\n self.player_slider = QSlider(Qt.Horizontal) \r\n self.player_slider.setSingleStep(1)\r\n self.player_slider.valueChanged.connect(self.on_slider_change)\r\n lower_toolbar.addWidget(self.player_slider)\r\n\r\n \r\n #Save_action\r\n save_action = QAction(QIcon(ICON_PATH + \"/save.png\"),\"Save\", self)\r\n save_action.triggered.connect(self.on_save_json)\r\n lower_toolbar.addAction(save_action)\r\n\r\n def initialize_video_widget(self):\r\n video_widget = QGraphicsView()\r\n video_scene = E_GraphicsScene()\r\n \r\n #Add Scene adn configure\r\n video_widget.setScene(video_scene)\r\n video_widget.setRenderHint(QPainter.Antialiasing)\r\n video_widget.setBackgroundBrush(QBrush(QColor(0.0, 0.0, 0.0)))\r\n\r\n return video_widget, video_scene\r\n\r\n def on_import_video(self): \r\n path = QFileDialog.getOpenFileName(self, \"Import Video or picture\", \"~/\")\r\n\r\n # print(path[0]) \r\n try:\r\n self.set_video_data(path[0])\r\n except ValueError:\r\n pass\r\n\r\n \r\n \r\n def set_video_data(self, path):\r\n data = imageio.get_reader(path)\r\n\r\n try:\r\n self.video_data = [] \r\n for slide in data:\r\n if slide.shape[2] == 4:\r\n slide = slide[:,:,:-1]\r\n\r\n\r\n self.video_data.append(slide)\r\n\r\n #if completed, reset buffers\r\n E_Manager.reset_buffer()\r\n except RuntimeError: \r\n pass\r\n\r\n \r\n\r\n #Set Slider Range\r\n self.player_slider.setRange(0, len(self.video_data)-1)\r\n self.player_slider.setValue(1)\r\n self.player_slider.setValue(0)\r\n\r\n \r\n\r\n\r\n def on_training(self, run):\r\n if run: \r\n E_Manager.m_save_data_3d = []\r\n E_Manager.m_save_data_2d = []\r\n E_Manager.m_save_croppingbox = []\r\n \r\n self.cam_thread = E_PoseEstimator(self.video_data)\r\n self.cam_thread.calculated.connect(self.on_calculated_data)\r\n self.cam_thread.finished.connect(self.on_finished_training)\r\n self.cam_thread.start()\r\n else:\r\n self.cam_thread.running = False\r\n\r\n def on_save_json(self):\r\n data_3d = []\r\n for i, vertices in enumerate(E_Manager.m_save_data_3d):\r\n framedata = {}\r\n \r\n for j, position in enumerate(vertices):\r\n # print(j)\r\n framedata[ E_Manager.m_joint_name[j] ] = position.tolist()\r\n data_3d.append(framedata)\r\n\r\n data_2d = []\r\n for i, vertices in enumerate(E_Manager.m_save_data_2d):\r\n framedata = {}\r\n \r\n for j, position in enumerate(vertices):\r\n # print(j)\r\n framedata[ E_Manager.m_joint_name[j] ] = position.tolist()\r\n data_2d.append(framedata)\r\n\r\n \r\n jsondata = {'3d':data_3d, '2d':data_2d}\r\n\r\n with open(\"./temp/raw.json\", 'w') as outfile:\r\n json.dump(jsondata, outfile)\r\n\r\n \r\n\r\n def on_play_action(self, run):\r\n if run:\r\n #if slider position >= max, set to 0 \r\n if self.player_slider.value() >= len(self.video_data)-1:\r\n self.player_slider.setValue(0)\r\n #Use Timer\r\n self.player = QTimer(self)\r\n self.player.timeout.connect(self.forward_video)\r\n self.player.setInterval(10)\r\n self.player.start()\r\n\r\n else:\r\n self.player.stop()\r\n\r\n def forward_video(self):\r\n \r\n current_idx = self.player_slider.value()\r\n if current_idx >= len(self.video_data)-1:\r\n self.player.stop()\r\n self.play_action.setChecked(False)\r\n return\r\n\r\n \r\n self.player_slider.setValue(current_idx+1)\r\n\r\n\r\n \r\n\r\n def on_calculated_data(self, idx, vertices3d, vertices2d, cropping_box, heatmap):\r\n #Save data to the memory buffer\r\n E_Manager.save_data(vertices3d, vertices2d, cropping_box, heatmap)\r\n\r\n #Update Renderin\r\n self.player_slider.setValue(idx)\r\n # self.on_play_data(idx, vertices3d, vertices2d, cropping_box)\r\n\r\n def on_finished_training(self):\r\n self.train_action.setChecked(False) \r\n \r\n def on_play_data(self, idx, vertices3d, vertices2d, cropping_box):\r\n\r\n #Update 2d, 3d\r\n self.update_image(self.video_data[idx], vertices2d, cropping_box)\r\n self.update_vertices(vertices3d)\r\n\r\n def update_vertices(self, vertices):\r\n E_Manager.update_vertices(vertices)\r\n E_Manager.redraw()\r\n\r\n def update_heatmap(self, image):\r\n \r\n #Image Preprocessing\r\n preprocessing_image = copy.deepcopy(np.asarray(image))\r\n \r\n #Normalize Image\r\n p_min = np.amin(preprocessing_image)\r\n p_max = np.amax(preprocessing_image)\r\n preprocessing_image -= p_min\r\n preprocessing_image *= 255.0/p_max\r\n\r\n # #Threshold - clip and normalize again\r\n minimum_threshold = 125.0\r\n preprocessing_image = np.clip(preprocessing_image, minimum_threshold, 255.0)\r\n preprocessing_image -= minimum_threshold\r\n preprocessing_image *= 255.0/130.0\r\n\r\n #Convert to integer\r\n preprocessing_image = np.uint8(preprocessing_image)\r\n\r\n #Convert to Qimage\r\n qimg = QImage(preprocessing_image, preprocessing_image.shape[0], preprocessing_image.shape[1],preprocessing_image.strides[0], QImage.Format_Indexed8)\r\n \r\n\r\n #Update To renderer\r\n self.heatmap_scene.clear()\r\n self.heatmap_widget.fitInView(QRectF(qimg.rect()), Qt.KeepAspectRatio)\r\n self.heatmap_scene.addPixmap(QPixmap.fromImage(qimg))\r\n\r\n return\r\n\r\n #Calculated Joint Position before resize!\r\n #Try Connected Components\r\n \r\n cc_output = cv2.connectedComponentsWithStats(preprocessing_image)\r\n num_labels = cc_output[0]\r\n centroids = cc_output[3]\r\n\r\n #Resize Center Position\r\n original_shape = preprocessing_image.shape\r\n centroids[:,0] *= 368/original_shape[0]\r\n centroids[:,1] *= 368/original_shape[1]\r\n centroids = np.uint32(centroids)\r\n \r\n \r\n # print(num_labels, centroids)\r\n\r\n #Resize\r\n #preprocessing_image = cv2.resize(preprocessing_image, (368,368))\r\n\r\n\r\n #Add Message\r\n joint_id = self.tree_widget.indexOfTopLevelItem(self.tree_widget.currentItem())\r\n color = np.asarray(E_Manager.m_3d_colors[joint_id])*255\r\n \r\n\r\n for idx, position in enumerate(centroids):\r\n if idx == 0: continue\r\n # color = [0,0,0]\r\n # color[]\r\n # print(position)\r\n item = QGraphicsEllipseItem() \r\n item.setRect(0, 0, 10, 10)\r\n item.setPos(position[0], position[1])\r\n item.setBrush(QBrush(QColor(color[0],color[1],color[2]))) \r\n item.setZValue(1)\r\n self.heatmap_scene.addItem(item)\r\n\r\n\r\n\r\n #Add Text\r\n message = QGraphicsTextItem()\r\n message.setPos(position[0], position[1]+5);\r\n message.setDefaultTextColor(QColor(color[0],color[1],color[2]))\r\n message.setPlainText(E_Manager.m_joint_name[joint_id]);\r\n self.heatmap_scene.addItem(message)\r\n\r\n def update_image(self, image, vertices_2d=None, cropping_box=None):\r\n \r\n output = copy.deepcopy(image)\r\n #OpenCV-based limb drawing - will be deprecated\r\n # try:\r\n # # Draw Cropping Box\r\n # output = E_Manager.draw_croppingbox_2d(output, cropping_box)\r\n # except TypeError:\r\n # pass\r\n\r\n #Draw on the renderer\r\n imageform = QImage.Format_RGB888\r\n if not len(output.shape) == 3:\r\n imageform = QImage.Format_Grayscale8\r\n\r\n qimg = QImage(output, output.shape[1], output.shape[0], imageform)\r\n \r\n #Update QGraphicsScene\r\n self.video_scene.clear()\r\n self.video_widget.fitInView(QRectF(qimg.rect()), Qt.KeepAspectRatio) \r\n self.video_scene.addPixmap(QPixmap.fromImage(qimg))\r\n \r\n\r\n\r\n #QT-based limb drawing here!\r\n try:\r\n #Add Joints\r\n for idx, position in enumerate(vertices_2d):\r\n color = np.asarray(E_Manager.m_3d_colors[idx])*255\r\n\r\n #Make Graphics Item\r\n item = E_GraphicsEllipseItem() \r\n item.setPos(position[1]-5, position[0]-5)\r\n item.setBrush(QBrush(QColor(color[0], color[1], color[2]))) \r\n item.setZValue(1) \r\n\r\n #Add Ellipase\r\n self.video_scene.addItem(item)\r\n\r\n joint_items = [item for item in self.video_scene.items(order=Qt.AscendingOrder) if isinstance(item, E_GraphicsEllipseItem)]\r\n #Add limbs\r\n for idx, connection in enumerate(E_Manager.m_line_connection):\r\n \r\n line_item = E_GraphicsLineItem()\r\n #Add Points\r\n line_item.addPoints(joint_items[connection[0]], joint_items[connection[1]]) \r\n line_item.setZValue(0)\r\n self.video_scene.addItem(line_item)\r\n\r\n\r\n #Draw Cropping Box\r\n self.video_scene.addRect(235,76 , 498, 425, QPen(QBrush(QColor(0, 255, 0)), 3))\r\n # addRect(boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height, QPen(QBrush(QColor(0, 255, 0)), 3));\r\n except TypeError: \r\n pass\r\n\r\n\r\n def on_slider_change(self, idx):\r\n \r\n #Set Value\r\n E_Manager.m_current_frame = idx\r\n\r\n #\r\n data_3d = None\r\n data_2d = None\r\n cropping_box = None\r\n image = self.video_data[idx]\r\n\r\n try:\r\n #Update 3d Vertice\r\n data_3d = E_Manager.m_save_data_3d[idx]\r\n self.update_vertices(data_3d)\r\n\r\n #Update 2D vertices + cropping box\r\n data_2d = E_Manager.m_save_data_2d[idx]\r\n cropping_box = E_Manager.m_save_croppingbox[idx]\r\n\r\n\r\n #Get current tree widget\r\n heatmap_idx = self.tree_widget.indexOfTopLevelItem(self.tree_widget.currentItem())\r\n heatmap_image = E_Manager.m_save_2d_heatmap[idx][:,:,heatmap_idx]\r\n self.update_heatmap(heatmap_image)\r\n\r\n except IndexError:\r\n pass\r\n\r\n\r\n\r\n\r\n self.update_image(image=image, vertices_2d=data_2d, cropping_box=cropping_box)\r\n\r\n def on_video_from_client(self, path):\r\n print(path)\r\n self.set_video_data(path)","sub_path":"python/gui/_mainwindow.py","file_name":"_mainwindow.py","file_ext":"py","file_size_in_byte":15256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"82098152","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\ndef plot_decision_regions(X_train, X_test, y_train, y_test, clf):\n\n X = np.append(X_train, X_test, axis=0)\n y = np.append(y_train, y_test, axis=0)\n \n n_classes = len(np.unique(y))\n \n xx, yy = make_meshgrid(X, y, h=0.01)\n \n model = clf.fit(X_train, y_train)\n\n fig = plt.figure()\n axs = plt.gca()\n \n plot_contours(axs, model, xx, yy)\n\n # plot training data with 'x's\n plot_data(X_train, y_train, axs, 'o')\n # plot unknown data with 'o's\n plot_data(X_test, y_test, axs, 'x')\n\n plt.show()\n return\n\ndef make_meshgrid(x, y, h=.02):\n\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy\n\n\ndef plot_contours(ax, model, xx, yy, **params):\n \n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n out = ax.contourf(xx, yy, Z, cmap = plt.cm.Set3, alpha=0.5, **params)\n return out\n\ndef plot_data(X, y, axs, m='x'):\n\n n_classes = len(np.unique(y))\n colors = ['blue', 'red', 'green']\n # plot data with colors according to class labels\n for l, c in zip(range(n_classes), colors):\n xs = []\n for xi, yi in zip(X, y):\n if yi == l:\n xs.append(xi)\n xs = np.array(xs)\n axs.scatter(xs[:, 0], xs[:, 1], color=c, marker=m, alpha=1.0, edgecolor='black')\n return\n\ndef plot_KMeans(X, clf):\n \n plt.figure(figsize=(10,7))\n colormap = np.array(['red', 'lime', 'black'])\n model = clf.fit(X)\n plt.scatter(X[:, 0], X[:, 1], c=colormap[clf.labels_], s=40)\n plt.show()\n\n","sub_path":"plotData.py","file_name":"plotData.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"6835971","text":"import random, copy\ndef Creer_Grille (Liste_Cases, iterations):\n for i in range (iterations):\n Liste_Cases.append([])\n\ndef Creer_Case (iterations_2, biome_case, intensite_case, etat_case, Liste_Cases):\n dictionary = {\"Biome\" : biome_case, \"Intensite du feu\" : intensite_case, \"etat\" : etat_case}\n Liste_Cases[iterations_2].append(dictionary)\n\ndef checkNeighboursExist(coord_i,coord_j, Choixbiome):\n neighboursExist = []\n for i in range(-1,2,1):\n for j in range(-1,2,1):\n while True:\n try:\n assert(coord_i + i < len(Choixbiome))\n assert(coord_i + i >= 0)\n assert(coord_j + j < len(Choixbiome))\n assert(coord_i + i >= 0)\n except AssertionError:\n neighboursExist.append(0)\n break\n neighboursExist.append(1)\n break\n return neighboursExist\n\ndef seedToForest(Choixbiome, iterations, forestSeeds, pourcentage_foret):\n cptForet = 0\n for i in range (forestSeeds):\n Choixbiome[random.randint(0,iterations-1)][random.randint(0,iterations-1)]= 1\n cptForet += 1\n while cptForet < ((pourcentage_foret)*iterations*iterations):\n forestStorage = copy.deepcopy(Choixbiome)\n for i in range(iterations):\n for j in range(iterations):\n if forestStorage[i][j] == 1:\n neighboursExist = checkNeighboursExist(i,j,Choixbiome)\n neighbourPositions = [[i-1,j-1],[i-1,j],[i-1,j+1],[i,j-1],[i,j],[i,j+1],[i+1,j-1],[i+1,j],[i+1,j+1]]\n for k in range(len(neighboursExist)):\n randomNumberChoice = random.randint(1,10)\n if (randomNumberChoice > 6 and neighboursExist[k] == 1):\n if Choixbiome[neighbourPositions[k][0]][neighbourPositions[k][1]] == 0:\n Choixbiome[neighbourPositions[k][0]][neighbourPositions[k][1]] = 1\n cptForet += 1\n return Choixbiome\n\ndef dropToLake(Choixbiome, iterations, waterDroplets, pourcentage_lac):\n cptLac = 0\n for i in range (waterDroplets):\n randomNumber1 = random.randint(0,iterations-1)\n randomNumber2 = random.randint(0,iterations-1)\n try:\n assert(Choixbiome[randomNumber1][randomNumber2] != 1)\n except AssertionError:\n randomNumber1 = random.randint(0,iterations-1)\n randomNumber2 = random.randint(0,iterations-1)\n Choixbiome[randomNumber1][randomNumber2] = 2\n cptLac += 1\n while cptLac < ((pourcentage_lac)*iterations*iterations):\n lakeStorage = copy.deepcopy(Choixbiome)\n for i in range(iterations):\n for j in range(iterations):\n if lakeStorage[i][j] == 2:\n neighboursExist = checkNeighboursExist(i,j,Choixbiome)\n neighbourPositions = [[i-1,j-1],[i-1,j],[i-1,j+1],[i,j-1],[i,j],[i,j+1],[i+1,j-1],[i+1,j],[i+1,j+1]]\n for k in range(len(neighboursExist)):\n randomNumberChoice = random.randint(1,10)\n if (randomNumberChoice > 6) and (neighboursExist[k] == 1):\n if Choixbiome[neighbourPositions[k][0]][neighbourPositions[k][1]] == 0:\n Choixbiome[neighbourPositions[k][0]][neighbourPositions[k][1]] = 2\n cptLac += 1\n return Choixbiome\n\ndef makeSomeHouses(Choixbiome, iterations, howManyHouses):\n for i in range (howManyHouses):\n randomNumber1 = random.randint(0,iterations-1)\n randomNumber2 = random.randint(0,iterations-1)\n try:\n assert(Choixbiome[randomNumber1][randomNumber2] != 2)\n except AssertionError:\n randomNumber1 = random.randint(0,iterations-1)\n randomNumber2 = random.randint(0,iterations-1)\n Choixbiome[randomNumber1][randomNumber2] = 3\n return Choixbiome\n\ndef housesHavePlantations(Choixbiome, iterations, pourcentage_plantation):\n cptPlantation = 0\n while cptPlantation < ((pourcentage_plantation)*iterations*iterations):\n plantationStorage = copy.deepcopy(Choixbiome)\n for i in range(iterations):\n for j in range(iterations):\n if plantationStorage[i][j] == 3 or plantationStorage[i][j] == 4:\n neighboursExist = checkNeighboursExist(i,j,Choixbiome)\n neighbourPositions = [[i-1,j-1],[i-1,j],[i-1,j+1],[i,j-1],[i,j],[i,j+1],[i+1,j-1],[i+1,j],[i+1,j+1]]\n for k in range(len(neighboursExist)):\n randomNumberChoice = random.randint(1,10)\n if (randomNumberChoice > 6 and neighboursExist[k] == 1):\n if Choixbiome[neighbourPositions[k][0]][neighbourPositions[k][1]] != 3:\n Choixbiome[neighbourPositions[k][0]][neighbourPositions[k][1]] = 4\n cptPlantation += 1\n return Choixbiome\n \ndef Choixbiome(iterations, waterDroplets, forestSeeds, pourcentage_foret, pourcentage_lac, howManyHouses, pourcentage_plantation):\n Choixbiome=[] \n for i in range (iterations):\n Choixbiome.append([])\n for j in range (iterations):\n Choixbiome[i].append(0)\n Choixbiome = seedToForest(Choixbiome, iterations, forestSeeds, pourcentage_foret)\n Choixbiome = dropToLake(Choixbiome, iterations, waterDroplets, pourcentage_lac)\n Choixbiome = makeSomeHouses(Choixbiome, iterations, howManyHouses)\n Choixbiome = housesHavePlantations(Choixbiome, iterations, pourcentage_plantation)\n return Choixbiome\n\ndef Remplir_Grille_Initiale(Liste_Cases, iterations, waterDroplets, forestSeeds, pourcentage_foret, pourcentage_lac, howManyHouses, pourcentage_plantation):\n Creer_Grille(Liste_Cases, iterations)\n choix_Biome = Choixbiome(iterations, waterDroplets, forestSeeds, pourcentage_foret, pourcentage_lac, howManyHouses, pourcentage_plantation)\n for j in range(iterations):\n for i in range(iterations):\n Creer_Case (j,choix_Biome[i][j], 0, \"Vierge\", Liste_Cases)\n","sub_path":"NSI/simulateur feu presque final/GenerationTerrain.py","file_name":"GenerationTerrain.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"296450146","text":"# -*- coding: utf-8 -*-\n# %%\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsys.path.insert(0, '../../')\nimport mscl.plotting\ncolors = mscl.plotting.set_plotting_style()\n\n# Load the two data sets\nfigS6 = pd.read_csv('../../data/csv/van_den_Berg_2016_figS6B.csv')\nfigS6['error'] = figS6['Max'] - figS6['mean']\nfig5B = pd.read_csv('../../data/csv/poolman_fig5b.csv')\n\nfig, ax = plt.subplots(1, 2, figsize=(6, 3))\n\n# Format the axes\nrotation = (10, 0)\nfor i, a in enumerate(ax):\n a.xaxis.set_tick_params(labelsize=8, rotation=rotation[i])\n a.yaxis.set_tick_params(labelsize=8)\nax[0].set_ylabel('percent survival', fontsize=8)\nax[1].set_xlabel('MscL channels per cell', fontsize=8)\nax[1].set_ylabel('percent survival', fontsize=8)\nax[0].set_title('van den Berg et al. 2016 - Fig. S6B', fontsize=8,\n backgroundcolor=colors['pale_yellow'], y=1.04)\nax[1].set_title('van den Berg et al. 2016 - Fig. 5B', fontsize=8,\n backgroundcolor=colors['pale_yellow'], y=1.04)\n\n# Add the appropriate panel labels\nax[0].text(-0.3, 1.09, '(A)', fontsize=8, transform=ax[0].transAxes)\nax[1].text(-0.25, 1.09, '(B)', fontsize=8, transform=ax[1].transAxes)\n\n_ = ax[0].bar(width=0.5, bottom=0, x=figS6['sample'], height=figS6['mean'], yerr=figS6['error'], color=colors['red'])\n_ = ax[1].errorbar(fig5B['x'], fig5B['y'], xerr=fig5B['xmax'] - fig5B['x'],\n yerr=fig5B['y'] * 0.3, lw=1, fmt='.', color=colors['red'],\n capsize=2)\nplt.tight_layout()\nplt.savefig('../../figs/figRX_vandenberg_comparison.pdf', bbox_inches='tight')\nplt.savefig('../../figs/figRX_vandenberg_comparison.png', bbox_inches='tight', dpi=300)\n","sub_path":"figs/figRX_vandenberg_comparison.py","file_name":"figRX_vandenberg_comparison.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"95153879","text":"import flask\nimport requests\nimport os\n\n# use the genius api\ngenius_access_token = os.getenv(\"genius_access_token\")\n\n# search for the lyrics\ndef genius(name):\n genius_search_url = (\n f\"http://api.genius.com/search?q={name}&access_token={genius_access_token}\"\n )\n try:\n lyrics = requests.get(genius_search_url)\n lyrics_json = lyrics.json()\n lyrics_url = lyrics_json[\"response\"][\"hits\"][0][\"result\"][\"url\"]\n artist_img = lyrics_json[\"response\"][\"hits\"][0][\"result\"][\"primary_artist\"][\n \"image_url\"\n ]\n artist_url = lyrics_json[\"response\"][\"hits\"][0][\"result\"][\"primary_artist\"][\n \"url\"\n ]\n name_genius = lyrics_json[\"response\"][\"hits\"][0][\"result\"][\"primary_artist\"][\n \"name\"\n ]\n except:\n return flask.render_template(\n \"error.html\", error_state=\"Failed to fetch the lyrics page!\"\n )\n return lyrics_url, artist_img, artist_url, name_genius\n","sub_path":"lyrics.py","file_name":"lyrics.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629974246","text":"# https://www.interviewbit.com/problems/merge-overlapping-intervals/\n\n# Definition for an interval.\nclass Interval:\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n\nclass Solution:\n # @param intervals, a list of Intervals\n # @return a list of Interval\n def merge(self, intervals):\n intervals.sort(key=lambda x:(x.start, x.end))\n new_interval = None\n result = []\n for index, interval in enumerate(intervals):\n if new_interval is None:\n new_interval = interval\n elif self.isOverlap(new_interval, interval):\n new_interval.end = max(interval.end, new_interval.end)\n else:\n result.append(new_interval)\n new_interval = interval\n if not new_interval is None:\n result.append(new_interval)\n return result\n\n def isOverlap(self, i1, i2):\n if i1.start >= i2.start and i1.start <= i2.end:\n return True\n if i2.start >= i1.start and i2.start <= i1.end:\n return True\n return False\n","sub_path":"interview-bit-merge-overlapping-intervals.py","file_name":"interview-bit-merge-overlapping-intervals.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"578187460","text":"import typer\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pyanchor.link_checker import LinkResults\n\n\napp = typer.Typer()\n\n\ndef print_results(links: dict):\n \"\"\"Simple utility function to print to terminal\"\"\"\n num_of_failed_links = 0\n for k, v in links.items():\n if k == 200:\n for link in v:\n typer.echo(typer.style(f\"[ {k} ] - {link}\", fg=\"green\"))\n elif k == 500:\n for link in v:\n typer.echo(typer.style(f\"[ {k} ] - {link}\", fg=\"red\"))\n num_of_failed_links += 1\n else:\n for link in v:\n typer.echo(typer.style(f\"[ {k} ] - {link}\", fg=\"yellow\"))\n num_of_failed_links += 1\n\n typer.echo(\"========================\")\n typer.echo(f\"TOTAL LINKS CHECKED: {len(links[200]) + num_of_failed_links}\")\n typer.echo(f\"FAILED: {num_of_failed_links}\")\n\n\n@app.command()\ndef main(\n url: str,\n sitemap: bool = typer.Option(\n False, \"--sitemap\", help=\"Use if the URL is a sitemap.xml link\"\n ),\n):\n \"\"\"Check for broken links on any given webpage. Pass in a sitemap URL to \n check all link on a given website.\n \"\"\"\n\n if not url.startswith(\"http\"):\n raise ValueError(\"Please provide a URL with a valid HTTP scheme\")\n\n if sitemap is True:\n set_of_urls = set()\n results = dict()\n\n r = requests.get(url)\n if r.status_code == 200:\n soup = BeautifulSoup(r.content, \"lxml\")\n sitemap_links = soup.find_all(\"loc\")\n\n for sitemap_link in sitemap_links:\n set_of_urls.add(sitemap_link.text)\n\n for _url in set_of_urls:\n link_results = LinkResults(_url).results\n print_results(link_results)\n\n else:\n results = LinkResults(url).results\n print_results(results)\n\n\nif __name__ == \"__main__\":\n app()\n","sub_path":"pyanchor/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"43835410","text":"# -*- coding:utf8 -*-\r\n\r\nimport imp\r\nimport os\r\n\r\n\r\nclass JsonRPC(object):\r\n def __init__(self, json_data):\r\n self.json_data = json_data\r\n self.response = Response()\r\n\r\n def call_method(self):\r\n autoload = AutoLoad(self.json_data[\"method\"])\r\n try:\r\n for func in (autoload.is_valid_module, autoload.is_valid_method):\r\n if not func():\r\n return False\r\n return autoload.call_method()\r\n except LoadError as e:\r\n self.response.error = e.args[0]\r\n\r\n def validate_mandatory(self):\r\n mandatory = (\"jsonrpc\", \"id\", \"method\", \"params\")\r\n\r\n for i in mandatory:\r\n if i not in self.json_data:\r\n self.response.error = \"{0} is mandatory\".format(i)\r\n return False\r\n return True\r\n\r\n def validate_null(self):\r\n not_null = (\"jsonrpc\", \"id\", \"method\")\r\n\r\n for i in not_null:\r\n if not self.json_data[i]:\r\n self.response.error = \"{0} cannot be null\".format(i)\r\n return False\r\n return True\r\n\r\n def validate_jsonrpc(self):\r\n if self.json_data[\"jsonrpc\"] == \"2.0\":\r\n return True\r\n else:\r\n self.response.error = \"jsonrpc is not 2.0\"\r\n return False\r\n\r\n def validate_method(self):\r\n if self.json_data['method'].count('.') == 1 and self.json_data['method'].split('.')[0] and \\\r\n self.json_data['method'].split('.')[1]:\r\n return True\r\n else:\r\n self.response.error = \"the format of method is wrong\"\r\n\r\n def validate_id(self):\r\n if isinstance(self.json_data['id'], int):\r\n return True\r\n else:\r\n self.response.error = \"id is not number\"\r\n\r\n def validate_auth(self):\r\n no_login_method = (\"user.login\", \"api.info\")\r\n\r\n if self.json_data[\"method\"] in no_login_method:\r\n pass\r\n elif self.json_data.get(\"auth\", None):\r\n if not self.auth():\r\n return False\r\n else:\r\n self.response.error = \"auth needed\"\r\n return False\r\n return True\r\n\r\n def validate(self):\r\n for func in (self.validate_mandatory, self.validate_null, self.validate_jsonrpc, self.validate_method,\r\n self.validate_id, self.validate_auth):\r\n if not func():\r\n return False\r\n return True\r\n\r\n def auth(self):\r\n auth_string = (\"string1\", \"string2\")\r\n if self.json_data[\"auth\"] in auth_string:\r\n return True\r\n self.response.error = \"auth failed\"\r\n return False\r\n\r\n def execute(self):\r\n if not self.validate():\r\n return False\r\n\r\n self.response.id = self.json_data[\"id\"]\r\n method = self.call_method()\r\n if method:\r\n try:\r\n self.response.result = method(**self.json_data[\"params\"])\r\n except Exception as e:\r\n self.response.error = e.args[0]\r\n\r\n\r\nclass AutoLoad(object):\r\n def __init__(self, module_method, path=\"modules\"):\r\n file_dir = os.path.dirname(__file__)\r\n abs_path = os.path.abspath(file_dir)\r\n self.path = [os.path.join(abs_path, path)]\r\n self.module = module_method.split(\".\")[0].lower()\r\n self.method = module_method.split(\".\")[1].lower()\r\n self.load_module = None\r\n\r\n def is_valid_module(self):\r\n try:\r\n fp, path, desc = imp.find_module(self.module, self.path)\r\n except ImportError:\r\n raise LoadError(\"module '{0}' not found\".format(self.module, self.path))\r\n fp.close()\r\n return True\r\n\r\n def is_valid_method(self):\r\n fp, path, desc = imp.find_module(self.module, self.path)\r\n self.load_module = imp.load_module(self.module, fp, path, desc)\r\n if not getattr(self.load_module, self.method, None):\r\n raise LoadError(\"method '{0}' not found\".format(self.method))\r\n elif not callable(getattr(self.load_module, self.method)):\r\n raise LoadError(\"'{0}' not callable\".format(self.method))\r\n else:\r\n pass\r\n fp.close()\r\n return True\r\n\r\n def call_method(self):\r\n return getattr(self.load_module, self.method)\r\n\r\n\r\nclass Response(object):\r\n def __init__(self):\r\n self.id = None\r\n self.result = None\r\n self.error = None\r\n self.json_rpc = \"2.0\"\r\n\r\n def to_dict(self):\r\n _dict = dict()\r\n for x, y in self.__dict__.iteritems():\r\n if callable(y) or not y:\r\n pass\r\n else:\r\n _dict[x] = y\r\n if not _dict.get('id', None):\r\n _dict['id'] = -1\r\n return _dict\r\n\r\n\r\nclass LoadError(Exception):\r\n pass\r\n","sub_path":"app/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"276615605","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport scipy.io as sio\nfrom Bio import SeqIO\nimport collections\nimport sys\n\nimport torch\nimport argparse\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport random\n\n#sys.path.append(\"..\")\nfrom evaluation_metrics import *\nfrom data_processing import *\n\n\n######## File params ########\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--use_cuda', action='store_true', help='IF USE CUDA (Default == False)')\nparser.add_argument('--hidden_size', type=int, default=100, help='Size of hidden layer')\nparser.add_argument('--emb_dim', type=int, default=100, help='Embedding dimensions')\nparser.add_argument('--n_epochs', type=int, default=10, help='Number of single iterations through the data')\nparser.add_argument('--batch_size', type=int, default=80, help='Batch size')\nparser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate (for both, encoder and decoder)')\nparser.add_argument('--n_layers', type=int, default=1, help='Number of layers (for both, encoder and decoder)')\nparser.add_argument('--eval_every', type=int, default=1, help='num iterations before evaluation')\nparser.add_argument('--is_bidirectional', type=bool, default=False, help='Whether or not RNN is bidirectional')\nparser.add_argument('--print_every', type=int, default=1, help='num iterations before printing')\nparser.add_argument('--dropout_dec_p', type=float, default=0.1, help='Dropout (%) in the decoder')\nparser.add_argument('--main_data_dir', type=str, default= \"/scratch/ak6201/Capstone/data/\", help='Directory where data is saved (in folders tain/dev/test)')\nparser.add_argument('--out_dir', type=str, default=\"\", help=\"Directory to save the models state dict (No default)\")\nopt = parser.parse_args()\nprint(opt)\n\n#Human Sequences\nfasta = opt.main_data_dir + 'human_sequences.fasta'\ntest_set_file = opt.main_data_dir + 'human_annotations_temporal_holdout.mat'\n\nsequences, names = load_FASTA(fasta)\ntrain_inds, valid_inds, test_inds, y_trainHuman, y_validHuman, y_testHuman, go_termsHuman = load_test_sets(test_set_file)\n\ntrain_seqsHuman = [sequences[i] for i in train_inds]\nprint('Number of training prots: ' + str(len(train_seqsHuman)))\nvalid_seqsHuman = [sequences[i] for i in valid_inds]\nprint('Number of validation prots: ' + str(len(valid_seqsHuman)))\ntest_seqsHuman = [sequences[i] for i in test_inds]\nprint('Number of testing prots: ' + str(len(test_seqsHuman)))\n\n#Yeast sequences\nfasta = opt.main_data_dir + 'yeast_sequences.fasta'\ntest_set_file = opt.main_data_dir + 'yeast_MF_temporal_holdout.mat'\n\nsequences, names = load_FASTA(fasta)\ntrain_inds, valid_inds, test_inds, y_trainYeast, y_validYeast, y_testYeast, go_termsYeast = load_test_sets(test_set_file)\n\ntrain_seqsYeast = [sequences[i] for i in train_inds]\nprint('Number of training prots: ' + str(len(train_seqsYeast)))\nvalid_seqsYeast = [sequences[i] for i in valid_inds]\nprint('Number of validation prots: ' + str(len(valid_seqsYeast)))\ntest_seqsYeast = [sequences[i] for i in test_inds]\nprint('Number of testing prots: ' + str(len(test_seqsYeast)))\n\n\n##########################\n### 1. Data formation ###\n##########################\n\nyTrainYeast = torch.from_numpy(y_trainYeast).type(torch.LongTensor)\nyValidYeast = torch.from_numpy(y_validYeast).type(torch.LongTensor)\nyTestYeast = torch.from_numpy(y_testYeast).type(torch.LongTensor)\n\nyTrainHuman = torch.from_numpy(y_trainHuman).type(torch.LongTensor)\nyValidHuman = torch.from_numpy(y_validHuman).type(torch.LongTensor)\nyTestHuman = torch.from_numpy(y_testHuman).type(torch.LongTensor)\n\nk = 2 #value for kmers\n\ntrain_seqsHuman_length = sequence_lengths_with_kmers(train_seqsHuman, k)\nvalid_seqsHuman_length = sequence_lengths_with_kmers(valid_seqsHuman, k)\ntest_seqsHuman_length = sequence_lengths_with_kmers(test_seqsHuman, k)\ntrain_seqsYeast_length = sequence_lengths_with_kmers(train_seqsYeast, k)\nvalid_seqsYeast_length = sequence_lengths_with_kmers(valid_seqsYeast, k)\ntest_seqsYeast_length = sequence_lengths_with_kmers(test_seqsYeast, k)\n\n####################\n### 2.GET K-MERS ###\n####################\n\nk_mers_human = get_k_mers(train_seqsHuman, valid_seqsHuman, test_seqsHuman, k)\nk_mers_yeast = get_k_mers(train_seqsYeast, valid_seqsYeast, test_seqsYeast, k)\n\n### GET TENSORS FOR THE DATA WITH KMERS\n\nTrainSeqsYeast = TransformAAsToTensor_with_kmers(train_seqsYeast, k, k_mers_yeast)\nValidSeqsYeast = TransformAAsToTensor_with_kmers(valid_seqsYeast, k, k_mers_yeast)\nTestSeqsYeast = TransformAAsToTensor_with_kmers(test_seqsYeast, k, k_mers_yeast)\n\n# This can take a while for k >= 2\nTrainSeqsHuman = TransformAAsToTensor_with_kmers(train_seqsHuman, k, k_mers_human)\nValidSeqsHuman = TransformAAsToTensor_with_kmers(valid_seqsHuman, k, k_mers_human)\nTestSeqsHuman = TransformAAsToTensor_with_kmers(test_seqsHuman, k, k_mers_human)\n\n\n#############################\n### 3. GRU IMPLEMENTATION ###\n#############################\n\n\nclass RNN_GRU(nn.Module):\n \"\"\"\n GRU model\n \"\"\"\n\n def __init__(self, vocab_size, emb_dim, num_labels, hidden_size, n_layers=1, dropout=0.1, is_bidirectional = False):\n\n \"\"\"\n @param vocab_size: size of the vocabulary.\n @param emb_dim: size of the word embedding\n \"\"\"\n super(RNN_GRU, self).__init__()\n\n self.num_labels = num_labels\n self.num_directions = 1 # it is 2 if the rnn is bidirectional\n self.hidden_size = hidden_size\n self.is_bidirectional = is_bidirectional\n self.dropout = nn.Dropout(p=dropout)\n self.embed = nn.Embedding(vocab_size+1, emb_dim, padding_idx=0)\n self.gru = nn.GRU(emb_dim, hidden_size, n_layers, dropout, bidirectional=is_bidirectional)\n self.linear = nn.Linear(self.num_directions*hidden_size, num_labels)\n\n\n def forward(self, input_seqs):\n \"\"\"\n @param data: matrix of size (batch_size, max_sentence_length). Each row in data represents a\n review that is represented using n-gram index. Note that they are padded to have same length.\n @param length: an int tensor of size (batch_size), which represents the non-trivial (excludes padding)\n length of each sentences in the data.\n \"\"\"\n\n embedded = self.embed(input_seqs) # size = (max_length, batch_size, embed_size)\n hidden = None\n outputs, hidden = self.gru(embedded, hidden) # outputs are supposed to be probability distribution right?\n if self.is_bidirectional == True:\n outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:] # Sum bidirectional outputs\n\n last_hidden = self.dropout(outputs[-1,:,:].squeeze())\n output_probability = torch.nn.functional.sigmoid(self.linear(last_hidden))\n\n return output_probability # size : (batch_size, num_labels)\n\n\n#########################\n### 4. Early stopping ###\n#########################\n\ndef early_stop(val_acc_history, t=2, required_progress=0.00001):\n \"\"\"\n Stop the training if there is no non-trivial progress in k steps\n @param val_acc_history: a list contains all the historical validation acc\n @param required_progress: the next acc should be higher than the previous by\n at least required_progress amount to be non-trivial\n @param t: number of training steps\n @return: a boolean indicates if the model should earily stop\n \"\"\"\n # TODO: add your code here\n\n cnt = 0 # initialize the count --> to store count of cases where difference in\n # accuracy is less than required progress.\n\n if(len(val_acc_history) > 0): # if list has size > 0\n for i in range(t): # start the loop\n index = len(val_acc_history) - (i+1) # start from the last term in list and move to the left\n if (index >= 1): # to check if index != 0 --> else we can't compare to previous value\n if (abs(val_acc_history[index] - val_acc_history[index-1]) < required_progress):\n cnt += 1 # increase the count value\n else:\n break # break if difference is greater\n\n if(cnt != t): # if count is equal to t, return True\n return False\n else:\n return True\n\n\n#########################\n### 5. Training Stage ###\n#########################\n\n\ndef train(valid_sequences, valid_label, num_epochs, optimizer, data_iter, model, training_length, threshold):\n losses = []\n total_batches = int(training_length/ opt.batch_size) #375\n\n validation_losses = []\n F_scores = []\n calculated_f_score = np.zeros(len(threshold))\n max_precision = np.zeros(len(threshold))\n max_recall = np.zeros(len(threshold))\n\n for epoch in range(1, num_epochs+1):\n stop_training = False\n for i, (train_data, train_labels, length_batch) in enumerate(data_iter):\n # train_data size: (26, 34350) ; train_label size: (26, 147)\n # This needs to be modified. Max length is batch specific !!!!!\n model.train(True)\n model.zero_grad()\n outputs = model(train_data.transpose(0,1))\n loss = criterion(outputs, train_labels.float())\n losses.append(loss.data[0])\n loss.backward()\n\n\n clipped = torch.nn.utils.clip_grad_norm(model.parameters(), 0.5)\n # clip gradients because RNN\n for pr in model.parameters():\n pr.data.add_(-clipped, pr.grad.data)\n\n optimizer.step()\n\n if i%opt.eval_every == 0:\n# # Erly stop using validation loss\n model.eval()\n val_outputs = model(Variable((valid_sequences).transpose(0,1).type(torch.LongTensor), volatile=True))\n eval_loss = criterion(val_outputs.data, valid_label.type(torch.FloatTensor))\n print(eval_loss.data[0])\n validation_losses.append(eval_loss.data[0])\n stop_training = early_stop(validation_losses, 3)\n\n # Print statements\n if stop_training:\n print(\"earily stop triggered\")\n break\n if (i+1) % opt.print_every == 0:\n# print('Epoch: [{0}/{1}], Step: [{2}/{3}], Train loss: {4}, F_Score: {5}, Validation loss:{6}'.format(\n# epoch, num_epochs, i+1, total_batches, np.mean(losses)/(total_batches*epoch), np.max(calculated_f_score), np.mean(np.array(validation_acc_history)[:,-1])))\n print('Epoch: [{0}/{1}], Step: [{2}/{3}], Train loss: {4}, Validation loss:{5}'.format(\n epoch, num_epochs, i+1, total_batches, np.mean(losses)/(total_batches*epoch), np.mean(np.array(validation_losses))))\n if stop_training == True:\n break\n\n #return calculated_f_score, max_precision, max_recall\n\n\n#############################\n### 6. Learning the model ###\n#############################\n\n\n### HUMAN ###\n\ndata_size = len(train_seqsHuman) #9751\nnum_labels = go_termsHuman.shape[0] #147\nvocab_size = len(acid_dict) + len(k_mers_human)\n\nmodel = RNN_GRU(vocab_size, 50, num_labels, opt.hidden_size, n_layers=opt.n_layers, dropout=opt.dropout_dec_p, is_bidirectional=opt.is_bidirectional)\ncriterion = nn.MultiLabelSoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)\n\ndata_iter = batch_iter(opt.batch_size, TrainSeqsHuman, yTrainHuman, train_seqsHuman_length)\nthreshold = [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 1]\n# Model Training\nValidSeqsHuman_small, yValidHuman_small = reduced_set(ValidSeqsHuman, valid_seqsHuman_length, yValidHuman, 100)\n#f2, p2, r2 = train(ValidSeqsHuman_small, yValidHuman_small, num_epochs, optimizer, data_iter, model, data_size, threshold)\ntrain(ValidSeqsHuman_small, yValidHuman_small, opt.n_epochs, optimizer, data_iter, model, data_size, threshold)\ntorch.save(model.state_dict(), \"{}/saved_model_human_{}.pth\".format(opt.out_dir))\n\n### YEAST ###\n","sub_path":"GRU Implementation/main_model_gru.py","file_name":"main_model_gru.py","file_ext":"py","file_size_in_byte":12057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"498081948","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\nfrom itertools import islice\n\n#use this line for LS period search. # of cols to use depends on how many periods\n#ls_period_array = np.genfromtxt('./LS_periods/LS_periods.data', usecols=(1,4,7,10,13))\n#ls_period = [ f for f in listdir('/Volumes/ext_drive/work/KELT/jon_lcs/raw/ls_periods_month/') if isfile(join('/Volumes/ext_drive/work/KELT/jon_lcs/raw/ls_periods_month/',f))][1:]\n#ls_periodogram = [ f for f in listdir('/Volumes/ext_drive/work/KELT/jon_lcs/raw/period_month/') if isfile(join('/Volumes/ext_drive/work/KELT/jon_lcs/raw/period_month/',f))]\n\ntrial = 'e'\nsubsample = '1.0'\nfinetune = '0.1'\nsigma = '5'\nminperiod = '10.0'\nmaxperiod = '100.0'\nNumbins = '80'\n\naov_periodogram = [ f for f in listdir('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/aov/periodograms/{0}'.format(trial)) if isfile(join('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/aov/periodograms/{0}'.format(trial),f))][1:]\n\naov_period = [ f for f in listdir('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/aov/peaks/{0}'.format(trial)) if isfile(join('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/aov/peaks/{0}'.format(trial),f))][1:]\n\n\n\n#ls_period = ['/ABE-050/best_per_day_{0}.txt'.format(trial), '/ABE-075/best_per_day_{0}.txt'.format(trial)]\n#ls_periodogram = 'w_N03_lc_41046.dat.ls' #ABE-075\n#ls_periodogram = ['w_N11_lc_122551_{0}.dat.ls'.format(trial),'w_N03_lc_41046_{0}.dat.ls'.format(trial)] #ABE-050\n#kelt_id = ['w_N11_lc_122551.dat','w_N03_lc_41046.dat']\n#abenums = ['ABE-050','ABE-075']\n#hd_num = ['HD 345439','HD 23478']\n\nfor (i,f) in enumerate(aov_periodogram):\n#for (i,fname) in enumerate(ls_period[:4]):\n fname = f[:-4]\n data1 = np.genfromtxt('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/aov/peaks/{0}/{1}.txt'.format(trial,fname),usecols=2,skip_header=1) #data for phase folded plot with the best periods\n p_1 = data1[0]\n p_2 = data1[4]\n p_3 = data1[8]\n p_4 = data1[12]\n p_5 = data1[16]\n\n data2 = np.genfromtxt('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/aov/periodograms/{0}/{1}.aov'.format(trial,fname),skip_header=1) #data for periodogram\n freq = 1/data2[:,0]\n power = data2[:,1]\n period = data2[:,0]\n\n lc_data_comb = np.genfromtxt('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/combined/{0}'.format(fname)) #data for lightcurve (time and magnitude)\n\n\n t0 = 1500.\n time_ = lc_data_comb[:,0]\n mag = lc_data_comb[:,1]\n# time_e=east_data[:,0]\n# mag_e = east_data[:,1]\n# time_w=west_data[:,0]\n# mag_w = west_data[:,1]\n time = time_ - t0\n ph1 = time / p_1 - np.floor(time/p_1)\n ph2 = time / p_2 - np.floor(time/p_2)\n ph3 = time / p_3 - np.floor(time/p_3)\n ph4 = time / p_4 - np.floor(time/p_4)\n ph5 = time / p_5 - np.floor(time/p_5)\n\n\n#------------------begin averaging stuff--------------------\n #break up the ph1, ph2,... into 50 or so bins, and get the averages to plot on the same figure as the data points\n bin_size = 0.02\n Nbins = 50\n (bin_mag1,bin_mag2,bin_mag3,bin_mag4) = ([],[],[],[]) #initializes lists to hold mag info for each bin\n w_bin_mag = []\n w_med_list = []\n med_list = []\n middle_phase_list = []\n for m in range(1,5):\n exec( '(bin_mag{0},med_list{0},middle_phase_list{0}) = ([],[],[])'.format(m) )\n for j in range(Nbins):\n mid_phase =(j*bin_size + (j+1)*bin_size)/2\n for k in range(len(eval('ph{0}'.format(m)))):\n if (j*bin_size) < eval('ph{0}[k]'.format(m)) < ((j+1)*bin_size):\n eval('bin_mag{0}'.format(m)).append(mag[k])\n if len(eval('bin_mag{0}'.format(m))) > 30: # and len(w_bin_mag) > 30:\n exec('bin_med{0} = np.median(bin_mag{0})'.format(m))\n exec('med_list{0}.append(bin_med{0})'.format(m))\n eval('middle_phase_list{0}'.format(m)).append(mid_phase)\n exec( 'bin_mag{0} = []'.format(m) )\n\n exec('middle_phase_array{0} = np.array(middle_phase_list{0})'.format(m))\n \n#------------------end averaging stuff--------------------\n\n f, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharex=False, sharey=False)\n f.subplots_adjust(top = 0.9)\n f.suptitle(' AoV Periodogram'+' subsample= ' + subsample+ ' finetune = ' + finetune + ' whitened with ' + sigma + ' sigma clipping' \\\n + ' Trial ' + trial + '\\n' + 'Min Period = ' + minperiod + ' Max Period = ' + maxperiod + ' Nbins = ' + Numbins + '\\n')\n\n ax1.scatter(time/1000., mag, marker=',',s=0.05)\n ax1.set_title(fname)\n ax1.grid(True)\n ax1.set_xlabel('HJD / 1000',fontsize = 10)\n ax1.set_ylabel('Approx. V-mag')\n plt.xlim([time.min() - 20, time.max() + 20])\n plt.ylim([mag.min() - 0.01, mag.max() + 0.01])\n ax1.invert_yaxis()\n# plt.ylim([mag.max() + 0.01, mag.min() - 0.01])\n\n ax2.vlines(period,0,power,colors = 'k', linestyles = 'solid')\n# ax6.title('LS Periodogram for ' + str( ls_periodogram[i][:-3]) + ' ' + abenums[i] \\\n# + '\\n' + ' subsample = '+subsample + ' whitened with '+sigma+ ' sigma clipping' + ' Trial ' + trial + '\\n')\n\n plt.ylim([0, power.max()])\n ax2.set_xlabel('Period (days)')\n ax2.set_ylabel('AoV Power')\n #ax6.xscale('log')\n plt.xlim([0, period.max()])\n fig = plt.gcf()\n# ax = fig.gca()\n ax2.xaxis.grid(True)\n ax2.grid(True,which='both')\n\n ax3.plot(ph1, mag, 'k,',markersize=1.0)\n ax3.plot(ph1-1, mag, 'k,',markersize=2.0)\n ax3.plot(middle_phase_array1,med_list1,'r-')\n ax3.plot(middle_phase_array1-1,med_list1,'r-')\n ax3.set_title('Phase 1: ' + str(p_1) + ' days')\n ax3.grid(True)\n plt.xlim([-1.,1.])\n# plt.ylim([mag.max() + 0.01, mag.min() - 0.01])\n plt.ylim([mag.min() - 0.01, mag.max() + 0.01])\n ax3.invert_yaxis()\n \n\n ax4.plot(ph2, mag, 'k,',markersize=1.0)\n ax4.plot(ph2-1, mag, 'k,',markersize=2.0)\n ax4.plot(middle_phase_array2,med_list2,'r-')\n ax4.plot(middle_phase_array2-1,med_list2,'r-')\n ax4.set_title('Phase 2: ' + str(p_2) + ' days')\n ax4.grid(True)\n plt.xlim([-1.,1.])\n plt.ylim([mag.min() - 0.01, mag.max() + 0.01])\n ax4.invert_yaxis()\n# plt.ylim([mag.max() + 0.01, mag.min() - 0.01])\n\n ax5.plot(ph3, mag, 'k,',markersize=1.0)\n ax5.plot(ph3-1, mag, 'k,',markersize=2.0)\n ax5.set_title('Phase 3: ' + str(p_3) + ' days')\n ax5.plot(middle_phase_array3,med_list3,'r-')\n ax5.plot(middle_phase_array3-1,med_list3,'r-')\n ax5.grid(True)\n plt.xlim([-1.,1.])\n plt.ylim([mag.min() - 0.01, mag.max() + 0.01])\n ax5.invert_yaxis()\n# plt.ylim([mag.max() + 0.01, mag.min() - 0.01])\n\n ax6.plot(ph4, mag, 'k,',markersize=1.0)\n ax6.plot(ph4-1, mag, 'k,',markersize=2.0)\n ax6.set_title('Phase 4: ' + str(p_4) + ' days')\n ax6.plot(middle_phase_array4,med_list4,'r-')\n ax6.plot(middle_phase_array4-1,med_list4,'r-')\n ax6.grid(True)\n ax6.set_xlabel('Phase')\n plt.xlim([-1.,1.])\n plt.ylim([mag.min() - 0.01, mag.max() + 0.01])\n ax6.invert_yaxis()\n# plt.ylim([mag.max() + 0.01, mag.min() - 0.01])\n\n fig.set_size_inches(10.0,10.0)\n\n\n plt.tight_layout(pad=3.0, w_pad=1.5, h_pad = 1.5)\n\n plt.savefig('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/aov/figs/{0}/{1}.png'.format(trial,fname))\n plt.cla()\n plt.clf()\n\n\n\"\"\"\n\n\n f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, sharex=False, sharey=True, figsize=(8,17))\n\n plt.grid(True)\n f.subplots_adjust(top = 0.82)\n# f.suptitle(str(filename), horizontalalignment='right')\n f.text(.1, .9, str( kelt_id[i]) + ' min per = 0.01d ' + 'max per = 15.0d' + '\\n' \\\n + ' subsample = ' + subsample + ' whitened with ' + sigma + ' sigma clipping' + '\\n' \\\n + 'HD ' + hd_num[i] + ' ' + abenums[i] + 'Trial ' + trial)\n #f.text(.1, .86, 'HD number, RA, Dec (SIMBAD): \\n'+ simbad_hd_coords[i])\n plt.ylabel('Approx V-mag (KELT_mag - 3.9)')\n\n# ax1.plot(time_e, mag_e, 'r.', markersize=2.0)\n# ax1.plot(time_w, mag_w, 'b.',markersize=2.0)\n ax1.scatter(time, mag, s=2.0)\n ax1.set_title(ls_periodogram[i][:-3])\n ax1.grid(True)\n plt.xlim([time.min() - 20, time.max() + 20])\n plt.ylim([mag.max() + 0.1, mag.min() - 0.1])\n\n ax2.plot(ph1, mag, 'k.',markersize=2.0)\n ax2.plot(ph1-1, mag, 'k.',markersize=2.0)\n ax2.set_title('Phase: ' + str(p_1) + ' days')\n ax2.grid(True)\n plt.xlim([-1.,1.])\n plt.ylim([mag.max() + 0.1, mag.min() - 0.1]) \n\n ax3.plot(ph2, mag, 'k.',markersize=2.0)\n ax3.plot(ph2-1, mag, 'k.',markersize=2.0)\n ax3.set_title('Phase: ' + str(p_2) + ' days')\n ax3.grid(True)\n plt.xlim([-1.,1.])\n plt.ylim([mag.max() + 0.1, mag.min() - 0.1])\n\n ax4.plot(ph3, mag, 'k.',markersize=2.0)\n ax4.plot(ph3-1, mag, 'k.',markersize=2.0)\n ax4.set_title('Phase: ' + str(p_3) + ' days')\n ax4.grid(True)\n plt.xlim([-1.,1.])\n plt.ylim([mag.max() + 0.1, mag.min() - 0.1])\n\n ax5.plot(ph4, mag, 'k.',markersize=2.0)\n ax5.plot(ph4-1, mag, 'k.',markersize=2.0)\n ax5.set_title('Phase: ' + str(p_4) + ' days')\n ax5.grid(True)\n plt.xlim([-1.,1.])\n plt.ylim([mag.max() + 0.1, mag.min() - 0.1])\n\n plt.tight_layout()\n\n plt.savefig('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/Sigma_Ori_E/{0}/phased_lcs_ls/{0}_{1}.png'.format(abenums[i], trial))\n plt.cla()\n plt.clf()\n\n plt.vlines(period,0,power,colors = 'k', linestyles = 'solid')\n plt.title('LS Periodogram for ' + str( ls_periodogram[i][:-3]) + ' ' + abenums[i] \\\n + '\\n' + ' subsample = '+subsample + ' whitened with '+sigma+ ' sigma clipping' + ' Trial ' + trial + '\\n')\n\n plt.ylim([0, power.max()])\n plt.xlabel('Period (days)')\n plt.ylabel('LS Power')\n plt.xscale('log')\n plt.xlim([0, period.max()])\n fig = plt.gcf()\n ax = fig.gca()\n ax.xaxis.grid(True)\n plt.grid(True,which='both')\n fig.set_size_inches(8.0,8.0)\n plt.tight_layout()\n plt.savefig('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/Sigma_Ori_E/{0}/periodograms_ls/{0}_{1}.png'.format(abenums[i], trial) )\n plt.clf()\n plt.cla()\n\n\n plt.vlines(period_clipped,0,power_clipped,colors = 'k', linestyles = 'solid')\n plt.title('Clipped LS Periodogram for ' + str( ls_periodogram[i][:-3]) + ' ' + abenums[i] \\\n + '\\n' + ' subsample = '+ subsample + ' whitened with ' + sigma + ' sigma clipping' + ' Trial ' + trial + '\\n')\n\n plt.ylim([0, power.max()])\n plt.xlabel('Period (days)')\n plt.ylabel('LS Power')\n plt.xscale('log')\n plt.xlim([0, period.max()])\n fig = plt.gcf()\n ax = fig.gca()\n ax.xaxis.grid(True)\n plt.grid(True,which='both')\n plt.savefig('/Volumes/ext_drive/work/KELT/jon_lcs/tfa/Sigma_Ori_E/{0}/periodograms_ls_clipped/{0}_{1}.png'.format(abenums[i], trial) )\n plt.clf()\n plt.cla()\n plt.close()\n\"\"\"\n","sub_path":"phase_fold_aov_tfa.py","file_name":"phase_fold_aov_tfa.py","file_ext":"py","file_size_in_byte":10778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"642354144","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-:v\n\nimport os\nimport argparse\nimport pandas as pd\nfrom pprint import pprint\n\n\"\"\"\n\nCopyright:\n\tCopyright Universite of Sherbrooke, departement of biochemistry and\n\tdepartement of computation.\n\nDate:\n\tJully 2020\n\nDescription:\n\tThis script reads all ouput files from G4RNA Screener under the name\n\t'Sequences_WT_xxxxx.csv'. Overlapping windows will be merged. Here are the\n\tcolumns in the output : Strand, Chromosome, locStart, locEnd, GeneID,\n\tLocation, TranscriptID, meancGcC, meanG4H, meanG4NN, pG4Start, pG4End,\n\tG4Sequence.\n\n\"\"\"\n\ndef mergeOverlappingSequences(dfTmp):\n\t\"\"\"Merge the sequences of overlaping windows.\n\n\t:param dfTmp: contain overlaping windows.\n\t:type dfTmp: dataFrame\n\n\t:returns: seq, sequence merged.\n\t:rtype: string\n\t\"\"\"\n\tdfTmp = dfTmp.sort_values(by=['wStart'])\n\tseq = str(dfTmp.seqG4.iloc[0])\n\tfor w in range(1,len(dfTmp)):\n\t\tstepTmp = int(dfTmp.wStart.iloc[w] - dfTmp.wEnd.iloc[w-1])-1\n\t\t# convert to int elsewise it's a float\n\t\twSeq = dfTmp.seqG4.iloc[w]\n\t\tseq += wSeq[-stepTmp:]\n\treturn seq\n\ndef getInfo(df):\n\t\"\"\"Retrieves informations of a windows and parse it into a dictionary.\n\n\tAs gene windows and junction windows are not formated the same way, this\n\tfunction aims to parse them into the same type of dictionary.\n\n\t:param df: contain all overlaping windows.\n\t:type df: dataFrame\n\n\t:returns: dico, contains all infromation for one window.\n\t:rtype: dictionary\n\t\"\"\"\n\tgeneDesc = df.geneDesc.iloc[0]\n\tgeneDescSplit = geneDesc.split(':')\n\tdico = {'Gene' : [geneDescSplit[0]],\n\t\t\t'meancGcC' : [df.cGcC.mean()],\n\t\t\t'meanG4H' : [df.G4H.mean()],\n\t\t\t'meanG4NN' : [df.G4NN.mean()],\n\t\t\t'pG4Start' : [min(df.wStart)],\n\t\t\t'pG4End' : [max(df.wEnd)]}\n\t# dico['Chromosome'] = [geneDescSplit[0]]\n\t# dico['Strand'] = [geneDescSplit[2]]\n\treturn dico\n\ndef mergeWindows(df):\n\t\"\"\"Merge overlaping windows.\n\n\t:param df: contain overlaping windows.\n\t:type df: dataFrame\n\n\t:returns: pG4, contains the pG4 which is the merge of overlaping windows.\n\t:rtype: dictionary\n\t\"\"\"\n\tpG4rSeq = mergeOverlappingSequences(df)\n\tif len(pG4rSeq) >= 20:\n\t\tdicoInfo = getInfo(df)\n\t\tpG4Start = dicoInfo['pG4Start'][0]\n\t\tpG4End = dicoInfo['pG4End'][0]\n\t\tpG4 = {}\n\t\tpG4 = dicoInfo\n\t\tpG4['pG4Start'] = [min(pG4Start, pG4End)]\n\t\tpG4['pG4End'] = [max(pG4Start, pG4End)]\n\t\tpG4['Sequence'] = [pG4rSeq]\n\t\tpG4['Description'] = [df.geneDesc.iloc[0]]\n\n\telse:\n\t\tpG4 = {}\n\treturn pG4\n\ndef filterOnScores(dicoParam, dfWindows):\n\t\"\"\"Filter the windows based on thresholds.\n\n\t:param dicoParam: contains all parameters that were given to g4rna screener.\n\t:type dicoParam: dictionnary\n\t:param dfWindows: contains all windows of all genes from one specie.\n\t:type dfWindows: dataframe\n\n\t:returns: dfWindows, with only windows upper thresholds.\n\t:rtype: dataFrame\n\t\"\"\"\n\tdfWindows = dfWindows[ dfWindows.cGcC >= dicoParam[\"cGcC\"] ].dropna()\n\tdfWindows = dfWindows[ dfWindows.G4H >= dicoParam[\"G4H\"] ].dropna()\n\tdfWindows = dfWindows[ dfWindows.G4NN >= dicoParam[\"G4NN\"] ].dropna()\n\treturn dfWindows\n\ndef mergeG4(df, dicoParam):\n\t\"\"\"Browses all junction window to find those that are overlapping.\n\n\tHere we browse all junctions windows. We will only kept those that overlap\n\tthe 100 nucleotid. Indeed, if the window over thresholds don't overlap this\n\tposition, it only in a gene and not a junction.\n\n\t:param df: contains all windows.\n\t:type df: dataFrame\n\t:param dicoParam: contains all parameters that were given to g4rna screener.\n\t:type dicoParam: dictionnary\n\n\t:returns: dfpG4, contain all pG4 for that strand.\n\t:rtype: dataFrame\n\t\"\"\"\n\tdfTmp = pd.DataFrame()\n\tdfpG4 = pd.DataFrame()\n\tdfTmp = dfTmp.append(df[0:1]) # store the first window\n\tif len(df) == 1:\n\t\tpG4 = mergeWindows(dfTmp)\n\t\tdfTmp = pd.DataFrame.from_dict(pG4)\n\t\tdfpG4 = dfpG4.append(dfTmp)\n\telse:\n\t\tfor w in range(1,len(df)): # w for window\n\t\t\tpG4 = mergeWindows(dfTmp)\n\t\t\t# browses all windows over thresholds, exept the first one\n\t\t\tif (df.geneDesc.iloc[w] == df.geneDesc.iloc[w-1] and\n\t\t\t (df.wStart.iloc[w] >= df.wStart.iloc[w-1] and \\\n\t\t\t df.wStart.iloc[w] <= df.wEnd.iloc[w-1])):\n\t\t\t\t# if window overlap, add window at the current pG4\n\t\t\t\tdfTmp = dfTmp.append(df[w:w+1])\n\t\t\t\tif w == len(df)-1:\n\t\t\t\t\tpG4 = mergeWindows(dfTmp)\n\t\t\t\t\tdfTmp = pd.DataFrame.from_dict(pG4)\n\t\t\t\t\tdfpG4 = dfpG4.append(dfTmp)\n\t\t\telse: # new pG4\n\t\t\t\tpG4 = mergeWindows(dfTmp)\n\t\t\t\tdfTmp = pd.DataFrame.from_dict(pG4)\n\t\t\t\tdfpG4 = dfpG4.append(dfTmp)\n\t\t\t\tdfTmp = df.iloc[w:w+1]\n\t\t\t\tif w == len(df)-1 :\n\t\t\t\t\tpG4 = mergeWindows(dfTmp)\n\t\t\t\t\tdfTmp = pd.DataFrame.from_dict(pG4)\n\t\t\t\t\tdfpG4 = dfpG4.append(dfTmp)\n\treturn dfpG4\n\ndef merge(filename, dicoParam, repro):\n\tdfpG42 = pd.DataFrame()\n\ttry:\n\t\tdfWindows = pd.read_csv(filename, sep='\\t', index_col=0)\n\texcept:\n\t\t# print(\"This file couldn't be converted in data frame : \" + filename)\n\t\tpass\n\telse:\n\t\t# dataFrame with all windows from G4RNA Screener\n\t\tif filename == '/home/anais/Documents/Projet/G4Conservation/reviewTRCentro/saccharomyces_cerevisiae/CSVFile/Sequences_centromere_00001.csv':\n\t\t\tprint(dfWindows)\n\t\tdfWindows.columns = ['geneDesc','cGcC',\n\t\t\t\t\t\t\t'G4H','seqG4','wStart',\n\t\t\t\t\t\t\t'wEnd', 'G4NN']\n\t\tdfWindows = filterOnScores(dicoParam, dfWindows)\n\t\tif filename == '/home/anais/Documents/Projet/G4Conservation/reviewTRCentro/saccharomyces_cerevisiae/CSVFile/Sequences_centromere_00001.csv':\n\t\t\tprint(dfWindows)\n\t\t\tprint('---------')\n\t\tdfpG42 = dfpG42.append(mergeG4(dfWindows, dicoParam))\n\t\tdfpG42['Repro'] = repro\n\t\treturn dfpG42\n\ndef main(dicoParam, directory, repro):\n\tdfpG4MonoGene = pd.DataFrame()\n\tdfpG4DiGene = pd.DataFrame()\n\tdfpG4TriGene = pd.DataFrame()\n\tdfpG4WT = pd.DataFrame()\n\n\tfor path, dirs, files in os.walk(directory):\n\t\tfor file in files:\n\t\t\tif '_00' in file and '.csv' in file and 'centro' in file:\n\t\t\t\tinputfile = directory+'/CSVFile/'+file\n\t\t\t\tprint(inputfile)\n\t\t\t\tif '_Mono_' in file:\n\t\t\t\t\tdfpG4MonoGene = dfpG4MonoGene.append(merge(inputfile, dicoParam, repro))\n\t\t\t\t\tdfpG4MonoGene = dfpG4MonoGene.reset_index(drop=True)\n\t\t\t\telif '_Tri_' in file:\n\t\t\t\t\tdfpG4TriGene = dfpG4TriGene.append(merge(inputfile, dicoParam, repro))\n\t\t\t\t\tdfpG4TriGene = dfpG4TriGene.reset_index(drop=True)\n\t\t\t\telif 'Sequences_centromere' in file:\n\t\t\t\t\tdfpG4WT = dfpG4WT.append(merge(inputfile, dicoParam, repro))\n\t\t\t\t\tdfpG4WT = dfpG4WT.reset_index(drop=True)\n\n\tif len(dfpG4MonoGene) > 0:\n\t\tdfpG4MonoGene = dfpG4MonoGene.drop_duplicates(subset=None, keep='first', inplace=False)\n\t\tdfpG4MonoGene = dfpG4MonoGene.reset_index(drop=True)\n\tif len(dfpG4TriGene) > 0:\n\t\tdfpG4TriGene = dfpG4TriGene.drop_duplicates(subset=None, keep='first', inplace=False)\n\t\tdfpG4TriGene = dfpG4TriGene.reset_index(drop=True)\n\tif len(dfpG4WT) > 0:\n\t\tdfpG4WT = dfpG4WT.drop_duplicates(subset=None, keep='first', inplace=False)\n\t\tdfpG4WT = dfpG4WT.reset_index(drop=True)\n\n\t# dfpG4MonoGene.to_csv(path_or_buf=directory+'/pG4_Shuffle_Mono_Micro.csv', header=True, index=None, sep='\\t')\n\t# dfpG4TriGene.to_csv(path_or_buf=directory+'/pG4_Shuffle_Tri_Micro.csv', header=True, index=None, sep='\\t')\n\t# dfpG4WT.to_csv(path_or_buf=directory+'/pG4_Shuffle_WT_Micro.csv', header=True, index=None, sep='\\t')\n\tdfpG4MonoGene.to_csv(path_or_buf=directory+'/pG4_Shuffle_Mono_Centro.csv', header=True, index=None, sep='\\t')\n\tdfpG4TriGene.to_csv(path_or_buf=directory+'/pG4_Shuffle_Tri_Centro.csv', header=True, index=None, sep='\\t')\n\tdfpG4WT.to_csv(path_or_buf=directory+'/pG4_Shuffle_WT_Centro.csv', header=True, index=None, sep='\\t')\n\n\ndef createDicoParam(arg):\n\t\"\"\"Retrieves arguments and put them in a dictionary.\n\n\t:param arg: contains all arguments given to the script, those are principaly\n\t\tparameters from G4RNA Screener.\n\t:type arg: arg_parser\n\n\t:returns: dicoParam, contains all arguments given to the script.\n\t:rtype: dictionary\n\t\"\"\"\n\tdicoParam = {\"G4H\" : float(arg.THRESHOLD_G4H),\n\t\t\t\t\"cGcC\" : float(arg.THRESHOLD_CGCC),\n\t\t\t\t\"G4NN\" : float(arg.THRESHOLD_G4NN),\n\t\t\t\t\"windowLength\" : int(arg.WINDOW),\n\t\t\t\t\"step\" : int(arg.STEP)}\n\treturn dicoParam\n\ndef build_arg_parser():\n\tparser = argparse.ArgumentParser(description = 'G4Annotation')\n\t# parser.add_argument ('-p', '--path', default = '/home/vana2406/scratch/'+\\\n\t# \t'G4Conservation/reviewTRCentro/')\n\tparser.add_argument ('-p', '--path', default = '/home/anais/Documents/Projet/'+\\\n\t\t'G4Conservation/reviewTRCentro/')\n\tparser.add_argument ('-sp', '--specie', default = \\\n\t\t'escherichia_coli_str_k_12_substr_mg1655')\n\tparser.add_argument ('-r', '--repro', default = '1')\n\tparser.add_argument ('-G4H', '--THRESHOLD_G4H', default = 0.9)\n\tparser.add_argument ('-CGCC', '--THRESHOLD_CGCC', default = 4.5)\n\tparser.add_argument ('-G4NN', '--THRESHOLD_G4NN', default = 0.5)\n\tparser.add_argument ('-W', '--WINDOW', default = 60)\n\tparser.add_argument ('-S', '--STEP', default = 10)\n\treturn parser\n\nif __name__ == '__main__':\n\tparser = build_arg_parser()\n\targ = parser.parse_args()\n\tsp = arg.specie\n\trepro = arg.repro\n\tif repro == 'Wt':\n\t\tpath = arg.path+sp\n\telse:\n\t\tpath = arg.path+sp+'/Repro'+repro\n\tprint(\"specie : \" + sp)\n\tdicoParam = createDicoParam(arg)\n\tmain(dicoParam, path, repro)\n\tprint(\"\\tDone\")\n","sub_path":"scripts/review/pG4AnnotationCentro.py","file_name":"pG4AnnotationCentro.py","file_ext":"py","file_size_in_byte":9025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"253125715","text":"from discord.ext import commands\nfrom .utils import utils\nimport traceback\nimport datetime\nimport discord\nimport sys\nclass Events():\n def __init__(self,bot):\n self.bot = bot\n self.redis = bot.db.redis\n\n def Time(self):\n return datetime.datetime.now().strftime(\"%b/%d/%Y %H:%M:%S\")\n\n#############################################################\n# _ _ _ #\n# | | (_) | | #\n# | | _ ___ | |_ ___ _ __ ___ _ __ #\n# | | | | / __| | __| / _ \\ | '_ \\ / _ \\ | '__| #\n# | |____ | | \\__ \\ | |_ | __/ | | | | | __/ | | #\n# |______| |_| |___/ \\__| \\___| |_| |_| \\___| |_| #\n# #\n#############################################################\n\n async def on_server_join(self,server): #IF Bot join server, it will add to record of those.\n print (\"\\033[96m: \\033[94m {} :({}) -- {}\\033[00m\".format(self.Time(), server.id, server.name))\n utils.prGreen(\"\\t\\t Servers: {}\\t\\t Members: {}\".format(len(self.bot.servers), len(set(self.bot.get_all_members()))))\n await self.redis.hset(\"Info:Server\",str(server.id),str(server.name))\n await self.redis.set(\"Info:Total Server\",len(self.bot.servers))\n await self.redis.set(\"Info:Total Member\",len(set(self.bot.get_all_members())))\n await self.redis.set(\"{}:Config:CMD_Prefix\".format(server.id),\"!\")\n #Server setting\n await self.redis.hset(\"{}:Config:Delete_MSG\".format(server.id),\"core\",\"off\")\n\n async def on_server_remove(self,server): #IF bot left or no longer in that server. It will remove this\n print(\"\\033[91m:\\033[94m[ {} : \\033[96m({})\\033[92m -- {}\\033[00m\".format(self.Time(), str(server.id), str(server.name)))\n utils.prGreen(\"\\t\\t Severs:{}\\t\\tMembers:{}\".format(len(self.bot.servers), len(set(self.bot.get_all_members()))))\n await self.redis.hdel(\"Info:Server\",server.id)\n\n async def on_server_update(self,before,after): #If server update name and w/e, just in case, Update those\n print(\"\\033[95m:\\033[94m {} :\\033[96m {} \\033[93m | \\033[92m({}) -- {}\\033[00m\".format(self.Time(),after.name,after.id, after))\n if after.icon:\n await self.redis.set(\"{}:Icon\".format(after.id),after.icon)\n await self.redis.hset(\"Info:Server\",str(after.id),str(after))\n\n async def on_member_join(self,member):\n print(\"\\033[98m:\\033[94m {} :\\033[96m {} ||| \\033[93m ({})\\033[92m -- {} ||| {}\\033[00m\".format(self.Time(), member.server.name, member.server.id, member.name, member.id))\n await self.redis.set(\"Info:Total Member\",len(set(self.bot.get_all_members())))\n\n async def on_member_remove(self,member):\n print(\"\\033[93m:\\033[94m {}:\\033[96m {} ||| \\033[93m ({})\\033[92m -- {} ||| {}\\033[00m\".format(self.Time(), member.server.name, member.server.id, member.name, member.id))\n await self.redis.set(\"Info:Total Member\",len(set(self.bot.get_all_members())))\n\n async def on_member_update(self,before,after):\n check = await self.redis.get(\"Member_Update:{}:check\".format(after.id))\n if check: #If it true, return, it haven't cool down yet\n return\n if before.avatar != after.avatar:\n if after.avatar is None:\n return\n print(\"\\033[97m:\\033[94m {} :\\033[92m {} ||| {}\\033[00m\".format(self.Time(), after.name, after.id))\n await self.redis.hset(\"Info:Icon\",after.id,after.avatar)\n if before.name != after.name:\n print(\"\\033[97m: \\033[94m {}:\\033[93m Before : {} |||\\033[92m After : {} ||| {}\\033[00m\".format(self.Time(),before.name,after.name, after.id))\n await self.redis.hset(\"Info:Name\",after.id,after.name)\n await self.redis.set(\"Member_Update:{}:check\".format(after.id),'cooldown',expire=15) #To stop multi update\n\n async def on_command(self,command,ctx):\n if ctx.message.channel.is_private:\n return\n print(\"\\033[96m\\033[94m {0}:\\033[96m {1.server.name} ||| \\033[93m {1.author} ||| \\033[94m ({1.author.id})\\033[92m ||| {1.clean_content}\\033[00m\".format(self.Time(), ctx.message))\n\n async def on_message(self,msg):\n if self.bot.user.id == msg.author.id:\n if msg.channel.is_private is False:\n try:\n if self.bot.log_config.get(msg.server.id):\n if msg.channel.id in self.bot.log_config[msg.server.id]['channel']:\n return\n except:\n pass\n if msg.channel.is_private:\n utils.prCyan(\"PRIVATE\")\n utils.prGreen(\" {} : {} |||{}\".format(self.Time(), msg.author.name, msg.clean_content))\n else:\n utils.prGreen(\" {} : {} ||| {} ||| ({}) ||| {}\".format(self.Time(), msg.author.name,msg.server.name,msg.server.id, msg.clean_content))\n\n async def on_command_completion(self,command,ctx):\n if command.cog_name is None:\n return\n if ctx.message.channel.is_private:\n return\n try:\n print(command.cog_name)\n check = await self.bot.db.redis.hgetall(\"{}:Config:Delete_MSG\".format(ctx.message.server.id))\n if check.get(command.cog_name.lower()) == \"on\":\n await self.bot.delete_message(ctx.message)\n await self.redis.hincrby(\"{0.server.id}:Total_Command:{0.author.id}\".format(ctx.message),ctx.invoked_with, increment=1)\n await self.redis.hincrby(\"Info:Total_Command\", ctx.invoked_with, increment=1)\n await self.redis.hincrby(\"{0.server.id}:Total_Command:User:{0.author.id}\".format(ctx.message),ctx.invoked_with, increment=1)\n except:\n utils.prRed(\"Failed to delete user command - {0.name} - {0.id}\\n\".format(ctx.message.server))\n utils.prRed(traceback.format_exc())\n\n async def send_cmd_help(self,ctx):\n if ctx.invoked_subcommand:\n pages = self.bot.formatter.format_help_for(ctx,ctx.invoked_subcommand)\n for page in pages:\n await self.bot.send_message(ctx.message.channel, page.replace(\"\\n\",\"fix\\n\",1))\n else:\n pages = self.bot.formatter.format_help_for(ctx,ctx.command)\n for page in pages:\n await self.bot.send_message(ctx.message.channel,page.replace(\"\\n\",\"fix\\n\",1))\n\n async def on_command_error(self,error,ctx):\n if self.bot.user.id == \"181503794532581376\":\n print(error)\n if isinstance(error, commands.MissingRequiredArgument):\n await self.send_cmd_help(ctx)\n elif isinstance(error,commands.BadArgument):\n await self.send_cmd_help(ctx)\n elif isinstance(error, commands.CommandInvokeError):\n errors = traceback.format_exception(type(error), error, error.__traceback__)\n Current_Time = datetime.datetime.utcnow().strftime(\"%b/%d/%Y %H:%M:%S UTC\")\n utils.prRed(Current_Time)\n utils.prRed(\"Error!\")\n traceback.print_exception(type(error), error, error.__traceback__)\n cog_error = '```fix\\nCogs:{}\\tCommand:{}\\tAuthor:{}\\n{}\\nError:\\n{}```'.format(ctx.command.cog_name,ctx.command,ctx.message.author,ctx.message.clean_content,error)\n user=discord.utils.get(self.bot.get_all_members(),id=\"105853969175212032\")\n await self.bot.send_message(user, \"```py\\n{}```\\n{}\\n```py\\n{}\\n```\".format(Current_Time + \"\\n\"+ \"ERROR!\",cog_error,\"\".join(errors)))\n await self.bot.send_message(ctx.message.channel,\"There is problem, I have send report to creator,\\n hopefully it will fixed in time?,Maybe you did it wrongly.\")\n\ndef setup(bot):\n bot.add_cog(Events(bot))\n","sub_path":"Bot/cogs/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":8015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"212735088","text":"#\n# Copyright (C) 1997-2015 JDE Developers Team\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see http://www.gnu.org/licenses/.\n# Authors :\n# Alberto Martin Florido \n#\nfrom PyQt5.QtCore import QSize, pyqtSignal\nfrom PyQt5.QtGui import QImage, QPixmap\nfrom PyQt5.QtWidgets import QPushButton,QWidget, QLabel\nfrom PyQt5 import QtGui,QtCore\nimport cv2\n\nclass CameraWidget(QWidget):\n IMG_WIDTH=320\n IMG_HEIGHT=240\n\n imageUpdate=pyqtSignal()\n\n def __init__(self,winParent):\n super(CameraWidget, self).__init__()\n self.winParent=winParent\n self.labelImage=winParent.image\n self.labelImageFiltered = winParent.imageFiltered\n self.initUI()\n\n def initUI(self):\n\n self.setMinimumSize(680,500)\n self.setMaximumSize(680,500)\n\n self.imgLabel=QLabel(self)\n self.imgLabel.resize(640,360)\n self.imgLabel.move(10,5)\n self.imgLabel.show()\n\n def updateImage(self):\n\n img = self.winParent.getDrone().getImage().data\n if img is not None:\n resized = cv2.resize(img,(self.IMG_WIDTH,self.IMG_HEIGHT))\n image = QtGui.QImage(resized.data, resized.shape[1], resized.shape[0], resized.shape[1]*resized.shape[2], QtGui.QImage.Format_RGB888);\n size=QtCore.QSize(img.shape[1],img.shape[0])\n #self.label.resize(size)\n self.labelImage.setPixmap(QtGui.QPixmap.fromImage(image))\n\n #print the filtered images\n\n imgFiltered = self.winParent.getAlgorithm().getImageFiltered()\n if imgFiltered is not None:\n resized = cv2.resize(imgFiltered,(self.IMG_WIDTH,self.IMG_HEIGHT))\n image = QtGui.QImage(resized.data, resized.shape[1], resized.shape[0], resized.shape[1]*resized.shape[2], QtGui.QImage.Format_RGB888);\n size=QtCore.QSize(imgFiltered.shape[1],imgFiltered.shape[0])\n #self.label.resize(size)\n self.labelImageFiltered.setPixmap(QtGui.QPixmap.fromImage(image))\n\n def closeEvent(self, event):\n self.winParent.closeCameraWidget()\n","sub_path":"exercises/drone_cat_mouse/cat_py/gui/cameraWidget.py","file_name":"cameraWidget.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"301904563","text":"import pandas as pd\r\nfrom pandas import ExcelWriter\r\n\r\nx = 3 #number of sheets to use (Using a range because that is what is needed in most cases)\r\n\r\n#input which columns to use\r\nsheets = pd.read_excel('file_extraction_path', sheet_name = list(range(0, x)), usecols = 0) \r\n\r\n#Create List of Column Names\r\nnames = []\r\nfor i in range(0,x):\r\n names.append((\"Account_Number_Sheet_%d\"%(i + 1)))\r\n \r\n#Combine imported data to create new dataframe\r\ndf = pd.concat(sheets, axis=1) \r\npanda.columns = panda.columns[:0].tolist() + names \r\n \r\n#Export to excel\r\nwriter = ExcelWriter('save_path')\r\ndf.to_excel(writer) \r\nwriter.save()\r\n","sub_path":"Pulling_columns_comb_to_create_new_sheet.py","file_name":"Pulling_columns_comb_to_create_new_sheet.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"97628925","text":"from turtle import Screen\nimport time\nfrom snake import Snake\nfrom food import Food\nfrom scoreboard import Scoreboard\n\nSPEED = 0.1\nWIDTH = 600\nHEIGHT = 600\nHALF_WIDTH = int(WIDTH / 2) - 20\nHALF_HEIGHT = int(HEIGHT / 2) - 20\n\nscreen = Screen()\nscreen.bgcolor(\"black\")\nscreen.setup(width=WIDTH, height=HEIGHT)\nscreen.title(\"My Snake Game\")\nscreen.tracer(0)\n\nsnake = Snake()\nfood = Food()\nscoreboard = Scoreboard()\n\nscreen.listen()\n# These onkey methods require a function.\nscreen.onkey(snake.up, \"w\")\nscreen.onkey(snake.down, \"s\")\nscreen.onkey(snake.left, \"a\")\nscreen.onkey(snake.right, \"d\")\n\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(SPEED)\n\n snake.move()\n\n # detect collison with food\n if snake.head.distance(food) < 16:\n snake.extend()\n food.refresh()\n scoreboard.increase_score()\n\n # detect collision with wall\n if snake.head.xcor() < -HALF_WIDTH or snake.head.xcor() > HALF_WIDTH or snake.head.ycor() < -HALF_HEIGHT or snake.head.ycor() > HALF_HEIGHT:\n scoreboard.reset()\n snake.reset()\n\n for segment in snake.segments[1:]:\n if snake.head.distance(segment) < 10:\n scoreboard.reset()\n snake.reset()\n\n\nscreen.exitonclick()\n","sub_path":"day24/snake_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"33756072","text":"from PIL import Image\nfrom PIL.ExifTags import TAGS\n\n\ndef get_exif(fn):\n ret = {}\n i = Image.open(fn)\n info = i._getexif()\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n return ret\n\n\nif __name__ == \"__main__\":\n import sys\n\n get_exif(int(sys.argv[1]))\n","sub_path":"getexif.py","file_name":"getexif.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"321195487","text":"import os\nimport argparse\n\nimport numpy as np \nimport networkx as nx \nimport pandas as pd \nimport itertools\n\nfrom networkx.drawing.nx_agraph import write_dot, graphviz_layout, to_agraph\n\nimport matplotlib.pyplot as plt\n\nimport glob\n\ndef find_chains(u, n, g, l=set()): \n\n\tif n==0:\n\t\treturn [[u]]\n\t\n\tl_ = l .union( {u} )\n\t# if n > 1:\n\tneighbors = set(g.neighbors(u)) - l_\n\t# else:\n\t# \tneighbors = set(g.neighbors(u)) . intersection({start})\n\t\t\n\tpaths = ( [u] + chain\n\t\tfor neighbor in neighbors\n\t\tfor chain in find_chains(neighbor, n-1, g, l_) )\n\treturn paths\n\ndef find_cycles(u, n, g, start, l=set()): \n\t\n\tif n==0:\n\t\tassert u == start\n\t\treturn [[u]]\n\t\n\tl_ = l .union( {u} )\n\tif n > 1:\n\t\tneighbors = set(g.neighbors(u)) - l_\n\telse:\n\t\tneighbors = set(g.neighbors(u)) . intersection({start})\n\t\t\n\tpaths = ( [u] + cycle\n\t\tfor neighbor in neighbors\n\t\tfor cycle in find_cycles(neighbor, n-1, g, start, l_) )\n\treturn paths\n\ndef score_subgraph_module(g, groups):\n\tsubgraph = g.subgraph(groups)\n\n\t# all internal edges of subgraph\n\tk_in = len([(u, v) \n\t\tfor u, v, w in subgraph.edges(data=\"weight\") \n\t\tif u != v])\n\n\tk_self = len([(u, v) for \n\t\tu, v, w in subgraph.edges(data=\"weight\") \n\t\tif u == v])\n\n\tk_all = sum((len(list(g.neighbors(u))) for u in subgraph))\n\n\treturn (k_in + 0) / k_all\n\ndef score_subgraph_module_positive(g, groups):\n\tsubgraph = g.subgraph(groups)\n\n\t# k_neg = len([(u, v) \n\t# \tfor u, v, w in subgraph.edges(data=\"weight\") \n\t# \tif u != v and w < 0])\n\t# if k_neg > 0:\n\t# \treturn 0\n\n\t# all internal edges of subgraph\n\tk_in = len([(u, v) \n\t\tfor u, v, w in subgraph.edges(data=\"weight\") \n\t\tif u != v and w > 0])\n\n\tk_self = len([(u, v) \n\t\tfor u, v, w in subgraph.edges(data=\"weight\") \n\t\tif u == v and w > 0])\n\n\tk_all = sum((len(list(g.neighbors(u))) \n\t\tfor u in subgraph))\n\n\treturn (k_in + 0) / k_all\n\ndef score_subgraph_density(g, groups):\n\t\n\t'''\n\tscore a subgraph of g given by groups\n\t'''\n\n\tsubgraph = g.subgraph(groups)\n\tn = len(subgraph)\n\n\n\t# all internal edges of subgraph\n\tk_in = len([(u, v) \n\t\tfor u, v in subgraph.edges() \n\t\tif u != v])\n\n\tk_self = len([(u, v) \n\t\tfor u, v in subgraph.edges() \n\t\tif u == v])\n\n\treturn (k_in + 0) / n ** 2\n\ndef score_subgraph_density_positive(g, groups):\n\t\n\t'''\n\tscore a subgraph of g given by groups\n\t'''\n\n\tsubgraph = g.subgraph(groups)\n\tn = len(subgraph)\n\n\t# k_neg = len([(u, v) \n\t# \tfor u, v, w in subgraph.edges(data=\"weight\") \n\t# \tif u != v and w < 0])\n\t# if k_neg > 0:\n\t# \treturn 0\n\n\t# all internal edges of subgraph\n\tk_in = len([(u, v) \n\t\tfor u, v, w in subgraph.edges(data=\"weight\") \n\t\tif u != v and w > 0])\n\n\tk_self = len([(u, v) \n\t\tfor u, v, w in subgraph.edges(data=\"weight\") \n\t\tif u == v and w > 0])\n\n\treturn (k_in + 0) / n ** 2\n\ndef score_subgraph_bc(g, groups):\n\t\n\tbc = nx.betweenness_centrality(nx.DiGraph(g))\n\n\treturn np.mean([bc[n] for n in groups])\n\ndef collapse_subgraph(g, subgraph):\n\n\tg.add_node(subgraph)\n\n\tfor n in subgraph:\n\n\t\tnew_edges = []\n\n\t\tif isinstance(g, nx.DiGraph):\n\t\t\tfor u, _, w in g.in_edges(n, data=\"weight\"):\n\t\t\t\tif u in subgraph:\n\t\t\t\t\tu = subgraph\n\t\t\t\tnew_edges.append(\n\t\t\t\t\t(u, subgraph, {\"weight\": w}))\n\t\t\tfor _, v, w in g.out_edges(n, data=\"weight\"):\n\t\t\t\tif v == n: # self loops already included\n\t\t\t\t\tcontinue\n\t\t\t\tif v in subgraph:\n\t\t\t\t\tv = subgraph\n\t\t\t\tnew_edges.append(\n\t\t\t\t\t(subgraph, v, {\"weight\": w}))\n\t\telse:\n\t\t\t\n\t\t\t# undirected case\n\t\t\t# assert False \n\t\t\tfor _, v, w in g.edges(n, data=\"weight\"):\n\t\t\t\tif v in subgraph:\n\t\t\t\t\tv = subgraph\n\t\t\t\tnew_edges.append(\n\t\t\t\t\t(subgraph, v, {\"weight\": w}))\n\n\t\tg.add_edges_from(new_edges)\n\t\tg.remove_node(n)\n\ndef bottom_up_partition(g, \n\tscore_function=score_subgraph_density,\n\tsubgraph_sizes=(2, 3)):\n\n\t'''\n\tperform partition in bottom-up manner\n\t'''\n\n\tg = nx.MultiDiGraph(g.copy())\n\t# g = nx.DiGraph(g.copy())\n\t# g = nx.MultiGraph(g.copy())\n\tnum_edges = len(g.edges())\n\th = nx.DiGraph()\n\n\th.add_nodes_from( g.nodes() )\n\n\t# ## handle self loops\n\tfor u, _ in nx.selfloop_edges(g):\n\t\th.add_edge(u, frozenset([u]))\n\tg = nx.relabel_nodes(g, \n\t\tmapping={n: frozenset([n]) \n\t\tfor n, _ in nx.selfloop_edges(g)})\n\n\tmax_size = 15\n\n\t# repeat until g has collapsed into a single node \n\twhile len(g) > 1:\n\t\tprint (\"number of nodes in g:\", len(g),\n\t\t\t\"number of edges:\", len(g.edges()))\n\n\t\ts = 2\n\n\t\twhile s < max_size :\n\t\t\tprint (\"scoring subgraphs of size\", s)\n\t\t\tsubgraph_scores = {}\n\n\t\t\t# for cycle in map(frozenset, nx.simple_cycles(g)):\n\n\t\t\t# for s in subgraph_sizes:\n\t\t\tfor n in g.nodes():\n\t\t\t\tfor cycle in map(frozenset, \n\t\t\t\t\tfind_cycles(n, s, g, start=n)):\n\t\t\t\t\t# find_chains(n, s, g, )):\n\t\t\t\t\tif cycle in subgraph_scores:\n\t\t\t\t\t\tassert np.allclose(subgraph_scores[cycle], \n\t\t\t\t\t\t\tscore_function(g, cycle))\n\t\t\t\t\t\t# assert False\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tsubgraph_scores[cycle] = \\\n\t\t\t\t\t\tscore_function(g, cycle)\n\n\n\t\t\tif len(subgraph_scores) > 0:\n\t\t\t\tchosen_subgraph = max(subgraph_scores, \n\t\t\t\t\tkey=subgraph_scores.get)\n\t\t\t\t# chosen_subgraph = sorted_subgraphs.pop(0)\n\t\t\t\tchosen_subgraph_score = subgraph_scores[chosen_subgraph]\n\n\t\t\t\tif chosen_subgraph_score > 0:\n\t\t\t\t\tprint (\"found positive scoring subgraph of size\", \n\t\t\t\t\t\tlen(chosen_subgraph))\n\t\t\t\t\tbreak\n\t\t\ts += 1\n\n\t\tif s == max_size:\n\t\t\tprint (\"failed to find any postive scorign subgraphs of size\", max_size)\n\t\t\tchosen_subgraph = frozenset().union(g.nodes())\n\n\t\tcollapse_subgraph(g, chosen_subgraph)\n\n\t\t# add chosen subgraph to h\n\t\th.add_node(chosen_subgraph)\n\t\tfor n in chosen_subgraph:\n\t\t\th.add_edge(n, chosen_subgraph)\n\n\t\tassert len(g.edges()) == num_edges\n\t\t\n\n\n\t\t# if len(subgraph_scores) > 0:\n\n\t\t# \t# determine all highest scoring subgraphs\n\t\t# \t# sorted_subgraphs = sorted(subgraph_scores, \n\t\t# \t\t# key=lambda x: subgraph_scores[x],\n\t\t# \t\t# reverse=True)\n\t\t# \tchosen_subgraph = max(subgraph_scores, \n\t\t# \t\tkey=subgraph_scores.get)\n\t\t# \t# chosen_subgraph = sorted_subgraphs.pop(0)\n\t\t# \tchosen_subgraph_score = subgraph_scores[chosen_subgraph]\n\n\t\t# \tif chosen_subgraph_score > 0:\n\n\t\t# \t\tchosen_subgraphs = [chosen_subgraph]\n\n\t\t# \t\t# for subgraph in sorted_subgraphs:\n\t\t# \t\t# \tif subgraph_scores[subgraph] < chosen_subgraph_score:\n\t\t# \t\t# \t\tbreak\n\t\t# \t\t# \tchosen_subgraphs.append(subgraph)\n\n\t\t# \t\t# combine any chosen subgraphs that contain the \n\t\t# \t\t# same nodes\n\t\t# \t\t# if len(chosen_subgraphs) > 1:\n\n\t\t# \t\t# \tprint (\"number of chosen subgraphs before collapse\", \n\t\t# \t\t# \t\tlen(chosen_subgraphs))\n\n\t\t# \t\t# \toverlaps = np.array([[len(x.intersection(y)) \n\t\t# \t\t# \t\tfor y in chosen_subgraphs]\n\t\t# \t\t# \t\tfor x in chosen_subgraphs])\n\t\t# \t\t# \toverlap_g = nx.Graph(overlaps)\n\t\t# \t\t# \tchosen_subgraphs = [frozenset().union([x \n\t\t# \t\t# \t\tfor c in cc for x in chosen_subgraphs[c]]) \n\t\t# \t\t# \t\tfor cc in nx.connected_components(overlap_g)]\n\n\t\t# \t\t# \tprint (\"number of chosen subgraphs after collapse\", \n\t\t# \t\t# \t\tlen(chosen_subgraphs))\n\t\t# \telse:\n\t\t# \t\t# could not find a subgraph with positive score\n\t\t# \t\tprint (\"no subgraphs with positive score\", )\n\t\t# \t\tchosen_subgraphs = [frozenset().union([x for x in g])]\n\n\t\t# else:\n\t\t# \t# could not find a subgraph of selected sizes\n\t\t# \tprint (\"no subgraphs of sizes\", subgraph_sizes)\n\t\t# \tchosen_subgraphs = [frozenset().union([x for x in g])]\n\n\t\t# for chosen_subgraph in chosen_subgraphs:\n\n\t\t# \t# remove scores associated with merged nodes\n\t\t# \t# print (\"removing scores associated with\", \n\t\t# \t# \t\"chosen subgraph\")\n\t\t# \t# subgraph_scores = {k: v \n\t\t# \t# \tfor k, v in subgraph_scores.items()\n\t\t# \t# \tif not any([x in k for x in chosen_subgraph])}\n\t\t# \t# print (\"done\")\n\t\t\t\n\t\t# \t# merge subgraph into super-node\n\t\t\t\n\t\t# \tcollapse_subgraph(g, subgraph_scores)\n\n\t\t# \tassert len(g.edges()) == num_edges\n\n\t\t# \t# add chosen subgraph to h\n\t\t# \th.add_node(chosen_subgraph)\n\t\t# \tfor n in chosen_subgraph:\n\t\t# \t\th.add_edge(n, chosen_subgraph)\n\n\t\t# \t# add cycles containing new node\n\t\t# \t# print (\"determining new cycles containing new node\")\n\n\t\t# \t# for s in subgraph_sizes:\n\t\t# \t# \tfor cycle in map(frozenset, \n\t\t# \t# \tfind_cycles(chosen_subgraph, s, g, start=chosen_subgraph)):\n\t\t# \t# \t# find_chains(chosen_subgraph, s, g,)):\n\t\t# \t# \t\t# assert nx.is_strongly_connected(g.subgraph(cycle))\n\t\t# \t# \t\tif cycle in subgraph_scores:\n\t\t# \t# \t\t\tassert np.allclose (subgraph_scores[cycle], score_function(g, cycle))\n\t\t# \t# \t\t\t# assert False\n\t\t# \t# \t\t\tcontinue\n\t\t# \t# \t\tsubgraph_scores[cycle] = \\\n\t\t# \t# \t\t\tscore_function(g, cycle)\n\t\t# \t# print (\"done\")\n\n\treturn h\n\ndef decompose_all_sccs(g, \n\tscore_function=score_subgraph_density,\n\tsubgraph_sizes=(2, 3)):\n\t'''\n\trun decomposition on each SCC in g\n\t'''\n\th = nx.DiGraph()\n\troots = []\n\tfor scc in nx.strongly_connected_components(g):\n\t\tprint (\"processing SCC\", scc)\n\t\tscc = g.subgraph(scc)\n\t\tscc_tree = bottom_up_partition(scc, \n\t\t\tscore_function=score_function,\n\t\t\tsubgraph_sizes=subgraph_sizes)\n\t\tdegrees = dict(scc_tree.out_degree())\n\t\troot = min(degrees, key=degrees.get)\n\t\troots.append(root)\n\t\th = nx.union(h, scc_tree)\n\t\tprint ()\n\n\t# add final root to represent whole network\n\tall_nodes = frozenset(g.nodes())\n\tif len(roots) > 1:\n\t\tfor root in roots:\n\t\t\th.add_edge(root, all_nodes)\n\n\treturn h\n\ndef unpack(x):\n\tif isinstance(x, str):\n\t\treturn [x]\n\tif not any([isinstance(x_, frozenset) for x_ in x]):\n\t\treturn list(x)\n\telse:\n\t\treturn [_x for x_ in x for _x in unpack(x_)]\n\ndef parse_args():\n\t'''\n\tParse from command line\n\t'''\n\tparser = argparse.ArgumentParser(description=\"Read in edgelist and draw SCC decomposition\")\n\n\tparser.add_argument(\"--edgelist\", \n\t\tdest=\"edgelist\", type=str, default=None,\n\t\thelp=\"edgelist to load.\")\n\tparser.add_argument(\"--mapping\", \n\t\tdest=\"mapping\", type=str, default=None,\n\t\thelp=\"mapping file of node ids to names.\")\n\tparser.add_argument(\"--output\", dest=\"output\", \n\t\ttype=str, default=None,\n\t\thelp=\"Directory to save images/merge depths.\")\n\tparser.add_argument(\"--score-function\",\n\t\tdest=\"score_function\", \n\t\ttype=str, default=\"density_pos\", \n\t\tchoices=[\"density\", \"density_pos\", \n\t\t\t\"module\", \"module_pos\",\n\t\t\t\"num_loops\", \"bc\"],\n\t\thelp=\"Scoring function.\")\n\tparser.add_argument(\"--draw\", action=\"store_true\",\n\t\thelp=\"Flag to specify to plot or not.\")\n\n\treturn parser.parse_args()\n\ndef main():\n\n\targs = parse_args()\n\t\n\tedgelist_file = args.edgelist\n\t\n\tprint (\"decomposing\", edgelist_file)\n\n\tg = nx.read_weighted_edgelist(edgelist_file, \n\t\tcreate_using=nx.DiGraph(), \n\t\tdelimiter=\"\\t\")\n\n\tmapping_file = args.mapping\n\tif mapping_file is not None:\n\t\tprint (\"relabeling nodes using\", mapping_file)\n\t\tmapping = pd.read_csv(mapping_file, \n\t\t\tindex_col=0, header=None, dtype=str)[1].to_dict()\n\t\tmapping = {str(k): v for k, v in mapping.items()}\n\t\tg = nx.relabel_nodes(g, mapping=mapping)\n\n\tprint (\"found graph with\", len(g), \n\t\t\"nodes and\", len(g.edges()), \"edges\")\n\n\tnx.set_edge_attributes(g, name=\"arrowhead\",\n\t\tvalues={(u, v): (\"normal\" if w>0 else \"tee\") \n\t\t\tfor u, v, w in g.edges(data=\"weight\")})\n\n\tscore_function = args.score_function\n\tprint (\"using score function:\", score_function, )\n\n\toutput_dir = args.output\n\toutput_dir = os.path.join(output_dir, score_function)\n\tif not os.path.exists(output_dir):\n\t\tos.makedirs(output_dir, exist_ok=True)\n\n\tif score_function == \"density\":\n\t\tscore_function = score_subgraph_density\n\telif score_function == \"density_pos\":\n\t\tscore_function = score_subgraph_density_positive\n\telif score_function == \"module\":\n\t\tscore_function = score_subgraph_module\n\telif score_function == \"module_pos\":\n\t\tscore_function = score_subgraph_module_positive\n\telif score_function == \"num_loops\":\n\t\traise Exception\n\t\tscore_function = score_subgraph_num_loops\n\telif score_function == \"bc\":\n\t\tscore_function = score_subgraph_bc\n\telse:\n\t\traise Exception\n\n\th = decompose_all_sccs(g, \n\t\tscore_function=score_function,\n\t\tsubgraph_sizes=range(2, 4))\n\t\n\tprint (\"determining merge depths\")\n\n\tout_degrees = dict(h.out_degree())\n\troot = min(out_degrees, key=out_degrees.get)\n\n\tcore = list(\n\t\tmax(nx.strongly_connected_components(g),\n\t\tkey=len))\n\n\tmerge_depths = {node: \\\n\t\tnx.shortest_path_length(h, node, root) \n\t\tfor node in core }\n\n\tmerge_depth_filename = os.path.join(output_dir,\n\t\t\"merge_depths.csv\")\n\tprint (\"saving merge depths to\", merge_depth_filename)\n\tpd.Series(merge_depths).to_csv(merge_depth_filename)\n\n\tmax_merge_depth = max(merge_depths.values())\n\tgenes_with_max_merge_depth = [k \n\t\tfor k, v in merge_depths.items()\n\t\tif v == max_merge_depth]\n\tprint (\"genes with maximum merge depth\")\n\tprint (genes_with_max_merge_depth)\n\tprint ()\n\n\t# target input genes\n\tin_component = [\n\t\tn for n in g\n\t\tif n not in core\n\t\tand nx.has_path(g, n, core[0])\n\t]\n\n\tscores = {u: \n\t\tnp.mean([1. / nx.shortest_path_length(g, u, c) \n\t\t\tfor c in genes_with_max_merge_depth])\n\t\tfor u in in_component\n\t}\n\n\tscore_filename = os.path.join(output_dir, \n\t\t\"in_component_scores.csv\")\n\tprint (\"saving scores to\", score_filename)\n\tpd.DataFrame.from_dict(scores, \n\t\torient=\"index\").to_csv(score_filename)\n\tprint ()\n\n\n\t### DRAWING\n\tif args.draw:\n\n\t\tdraw_height = 2\n\n\t\tprint (\"DRAWING DECOMPOSITION AT HEIGHT\",\n\t\t\tdraw_height)\n\n\t\th = h.reverse()\n\n\t\tnode_id_map = {}\n\t\tnode_height_map = {}\n\n\t\tfor i, n in enumerate(h.nodes()):\n\t\t\tif isinstance(n, frozenset):\n\t\t\t\tg_ = nx.MultiDiGraph(g.subgraph(unpack(n)))\n\t\t\telse:\n\t\t\t\tg_ = nx.MultiDiGraph(g.subgraph([n]))\n\t\t\t\tg_.remove_edges_from(list(nx.selfloop_edges(g_)))\n\t\t\tnx.set_edge_attributes(g_, name=\"arrowhead\",\n\t\t\t\tvalues={(u, v, w): (\"normal\" if w>0 else \"tee\") \n\t\t\t\t\tfor u, v, w in g_.edges(data=\"weight\")})\n\n\t\t\tnode_id_map.update({n: i})\n\n\t\t\tchildren = list(h.neighbors(n))\n\t\t\tif len(children) == 0:\n\t\t\t\theight = 0\n\t\t\telse:\n\t\t\t\theight = max([nx.shortest_path_length(h, n, \n\t\t\t\t\tel) for el in unpack(n)])\n\n\t\t\tif height > draw_height:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tnode_height_map.update({n: height})\n\n\t\t\tfor no, child in enumerate(children):\n\t\t\t\t# make metanode\n\t\t\t\tnode = \"metanode_{}\".format(no)\n\t\t\t\timage_filename = os.path.join(output_dir, \n\t\t\t\t\t\"subgraph_{}.png\".format(node_id_map[child]))\n\t\t\t\tassert os.path.exists(image_filename)\n\t\t\t\tg_.add_node(node, label=\"\", \n\t\t\t\t\timage=image_filename)\n\t\t\t\tfor n_ in unpack(child):\n\t\t\t\t\tfor u, _, w in g_.in_edges(n_, data=\"weight\"):\n\t\t\t\t\t\tif u == node:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tg_.add_edge(u, node, \n\t\t\t\t\t\tweight=w, \n\t\t\t\t\t\tarrowhead=(\"normal\" if w>0 else \"tee\"))\n\t\t\t\t\tfor _, v, w in g_.out_edges(n_, data=\"weight\"):\n\t\t\t\t\t\tif v == node:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tg_.add_edge(node, v, \n\t\t\t\t\t\tweight=w,\n\t\t\t\t\t\tarrowhead=(\"normal\" if w>0 else \"tee\"))\n\t\t\t\t\tg_.remove_node(n_)\n\t\t\t\t\n\t\t\tplot_filename = os.path.join(output_dir,\n\t\t\t\t\"subgraph_{}.png\".format(i))\n\t\t\tg_.graph['edge'] = {'arrowsize': '.8', \n\t\t\t\t'splines': 'curved'}\n\t\t\tg_.graph['graph'] = {'scale': '3'}\n\n\t\t\ta = to_agraph(g_)\n\t\t\ta.layout('dot') \n\t\t\ta.draw(plot_filename)\n\n\t\t\t# print (\"plotted\", plot_filename)\n\t\t\n\t\th = h.reverse()\n\n\t\t# skip drawing tree for now\n\n\t\tnx.set_node_attributes(h, name=\"image\", \n\t\t\tvalues={n: \n\t\t\tos.path.join(output_dir, \n\t\t\t\"subgraph_{}.png\".format(i) )\n\t\t\tfor i, n in enumerate(h.nodes())})\n\t\tnx.set_node_attributes(h, name=\"label\", values=\"\")\n\n\t\ttree_plot_filename = os.path.join(output_dir, \n\t\t\t\"scc_tree.png\")\n\t\th.graph['edge'] = {'arrowsize': '.8', 'splines': 'curved'}\n\t\th.graph['graph'] = {'scale': '3'}\n\n\t\ta = to_agraph(h)\n\t\ta.layout('dot') \n\t\ta.draw(tree_plot_filename)\n\n\t\tprint (\"plotted\", tree_plot_filename)\n\n\t\t## cleanup of directory\n\t\tprint (\"cleaning up directory\")\n\t\tnodes_to_remove = [node_id_map[n]\n\t\t\tfor n, h in node_height_map.items()\n\t\t\tif h < draw_height]\n\t\t# for f in glob.iglob(os.path.join(output_dir, \n\t\t# \t\"subgraph_*.png\")):\n\t\tfor f in (os.path.join(output_dir, \n\t\t\t\"subgraph_{}.png\".format(n) )\n\t\t\tfor n in nodes_to_remove):\n\t\t\tos.remove(f)\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"src/decomp/hierarchical_scc_decomposition.py","file_name":"hierarchical_scc_decomposition.py","file_ext":"py","file_size_in_byte":15448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"322714019","text":"#!/usr/bin/env python\n\"\"\"\nName file: main.py\nDescription: Adaptive Gesture Recognition\nCamera resolution: 1920x1080\n\"\"\"\n__author__ = \"Victor Segura Tirado\"\n__copyright__ = \"CTRL4 ENVIRO S.L.\"\n__email__ = \"victor.segura@ctrl4enviro.com\"\n\n# Import libraries\nimport sys\nsys.path.append('/home/ubuntu/Workspace/caffe/python')\nimport glob, os\nos.environ['GLOG_minloglevel'] = '2' \nimport cv2 as cv \nimport os, sys, time\nimport numpy as np\nimport scipy\nimport matplotlib\nmatplotlib.use('Agg')\nimport caffe\nimport time\nimport warnings\nimport urllib\nimport psutil\nimport sys\nimport subprocess\nimport tempfile\nimport threading\nimport copy\nimport datetime\nimport contextlib\nimport thread\nimport io\nimport errno\nimport logging\nimport urllib2\nfrom PIL import Image\nfrom cStringIO import StringIO\nfrom threading import Thread, Lock\nfrom joblib import Parallel, delayed\nfrom config_reader import config_reader\nfrom socket import error as SocketError\nfrom stat import S_ISREG, ST_CTIME, ST_MODE\nfrom scipy.ndimage.filters import gaussian_filter\n\n# Custom libraries\nfrom GPIO_comms import *\nfrom body_analyzer import *\nfrom human_pose_estimation import *\nfrom util import *\n\n# Camera image parameters\nheight = 576\nwidth = 704\nchan = 3\n\n# Last mask parameters on moscow fountain\n# --mask 100,376,285,539\n\n# Heatmap average global variable. Must be global variable because threads\n# have to access at the same time on heatmap. Otherwise, pass this variable \n# through arguments on threads expends more time\nheatmap = 0\n\n# Anatomical keypoints on human person\nkeypoints = [0,1,2,3,4,5,6,7]\n\n# Arrays to save maps on threading execution\npeaks_binary = \t[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\narray_map_ori = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n\n\n#--------------------------------------------------------#\n#------------------ AUXILIAR FUNCTIONS ------------------# \n#--------------------------------------------------------#\ndef get_heatmap_paf(oriImg, scale, model, net, debug):\n \n \"\"\"\n Description: Compute CNN neural network using different scales\n Input: oriImg, scale, model, net, export_results\n Output: heatmap, paf\n \"\"\"\n\n # Check if export directories are created\n if(debug == \"YES\"):\n if not os.path.exists('results/keypoints/'):\n os.makedirs('results/keypoints/')\n if not os.path.exists('results/pafs/'):\n os.makedirs('results/pafs/')\n\n # Define hetmap\n global heatmap\n heatmap = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))\n paf = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))\n\n # Scale image resizing dimensions \n imageToTest = cv.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv.INTER_CUBIC)\n imageToTest_padded, pad = padRightDownCorner(imageToTest, model['stride'], model['padvalue'])\n\n # Compute convolutional neural network\n net.blobs['data'].reshape(*(1, 3, imageToTest_padded.shape[0], imageToTest_padded.shape[1]))\n net.blobs['data'].data[...] = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,2,0,1))/256 - 0.5;\n output_blobs = net.forward()\n\n # Check index of heatmpas and pags\n if net.blobs[output_blobs.keys()[1]].data.shape[1] == 19:\n idx_heatmap = 1\n idx_paf = 0\n else: \n idx_heatmap = 0\n idx_paf = 1 \n\n # Extract outputs, resize, and remove padding of heatmap\n heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[idx_heatmap]].data), (1,2,0)) \n heatmap = cv.resize(heatmap, (0,0), fx=model['stride'], fy=model['stride'], interpolation=cv.INTER_CUBIC)\n heatmap = heatmap[:imageToTest_padded.shape[0]-pad[2], :imageToTest_padded.shape[1]-pad[3], :]\n heatmap = cv.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv.INTER_CUBIC)\n\n # Extract outputs, resize, and remove padding of paf\n paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[idx_paf]].data), (1,2,0))\n paf = cv.resize(paf, (0,0), fx=model['stride'], fy=model['stride'], interpolation=cv.INTER_CUBIC)\n paf = paf[:imageToTest_padded.shape[0]-pad[2], :imageToTest_padded.shape[1]-pad[3], :]\n paf = cv.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv.INTER_CUBIC)\n\n # Export each keypoint detected displayed on image\n if(debug == \"YES\"):\n \n for ch in range(heatmap.shape[2]):\n # Save numpy array and image of heatmap\n path_tmp = 'results/keypoints/keypoint_num_'+str(ch)\n np.save(path_tmp+'.npy', heatmap[:,:,ch])\n img_array = np.load(path_tmp+'.npy')\n plt.imsave(path_tmp+'.png', img_array)\t\t\n\n for cp in range(paf.shape[2]):\n # Save numpy array and image of PAF\n path_tmp = 'results/pafs/paf_num_'+str(cp)\n np.save(path_tmp+'.npy', paf[:,:,cp])\n img_array = np.load(path_tmp+'.npy')\n plt.imsave(path_tmp+'.png', img_array)\n\n # Save accumulated heatmap first 8 keypoint\n heatmap_acc = cv.imread('results/keypoints/keypoint_num_7.png')\n for i in range(7):\n heatmap_tmp = cv.imread('results/keypoints/keypoint_num_'+str(i)+'.png')\n heatmap_acc = heatmap_acc + heatmap_tmp \n cv.imwrite(\"results/keypoints/keypoints_acc.png\",heatmap_acc)\n\n # Save accumulated paf first 16 limbs\n paf_acc = cv.imread('results/pafs/paf_num_12.png')\n for i in range(13,38):\n paf_tmp = cv.imread('results/pafs/paf_num_'+str(i)+'.png')\n paf_acc = paf_acc + paf_tmp \n cv.imwrite(\"results/pafs/pafs_acc.png\",paf_acc)\n\n return heatmap, paf\n\n\ndef compute_maps(keypoint):\n\n \"\"\"\n Description: compute maps at specific position\n Input: kepypoint id\n Output: None\n \"\"\"\n\n # Get position of heatmap average \n map_ori = heatmap[:,:,keypoint]\n array_map_ori[keypoint] = map_ori\n\n # Apply gaussian filter\n map = gaussian_filter(map_ori, sigma=3)\n\n # Get left map\n map_left = np.zeros(map.shape)\n map_left[1:,:] = map[:-1,:]\n\n # Get right map\n map_right = np.zeros(map.shape)\n map_right[:-1,:] = map[1:,:]\n\n # Get upper map\n map_up = np.zeros(map.shape)\n map_up[:,1:] = map[:,:-1]\n\n # Get lower map\n map_down = np.zeros(map.shape)\n map_down[:,:-1] = map[:,1:]\n \n # Get peak binary at specific location\n peaks_binary[keypoint] = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map_ori > param['thre1']))\n\n\ndef init_arrays():\n\n \"\"\"\n Description: initialize arrays to compute peaks\n Input: None\n Output: None\n \"\"\"\n \n global peaks_binary\n global array_map_ori\n peaks_binary = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n array_map_ori = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n\n\ndef remove_global_arrays():\n \n \"\"\"\n Description: remove global array\n Input: None\n Output: None\n \"\"\"\n \n del globals()['peaks_binary']\n del globals()['array_map_ori']\n\n\ndef compute_peaks():\n \n \"\"\"\n Description: compute peaks\n Input: none\n Output: all_peaks\n \"\"\"\n\n # Initialize variables \n init_arrays()\n all_peaks = []\n peak_counter = 0\n \n # Compute maps on differents thrreads\n Parallel(n_jobs=len(keypoints), verbose=0, backend=\"threading\")(map(delayed(compute_maps), keypoints))\n\n # Iterate each keypoint\n for i in range(len(keypoints)): \n peaks = zip(np.nonzero(peaks_binary[i])[1], np.nonzero(peaks_binary[i])[0])\n peaks_with_score = [x + (array_map_ori[i][x[1],x[0]],) for x in peaks]\n id = range(peak_counter, peak_counter + len(peaks))\n peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]\n all_peaks.append(peaks_with_score_and_id)\n peak_counter += len(peaks)\n\n # Remove variables unused\n remove_global_arrays()\n\n return all_peaks\n\n\ndef gesture_recognition(oriImg, multiplier, model, net, param, filename, eval_PDJ):\n \n \"\"\"\n Description: compute adaptive gesture recognition\n Input: oriImg, multiplier, model, net, param, eval_PDJ\n Output: canvas, code\n \"\"\"\n \n # Compute heatmap and PAF average\n start_time_g1 = time.time()\n global heatmap\n heatmap, paf = get_heatmap_paf(oriImg, multiplier, model, net, eval_PDJ)\n print('Compute neural network took %.2f s.' % ((time.time() - start_time_g1)))\n\n # Compute peaks of heatmap average\n start_time_g2 = time.time()\n all_peaks = compute_peaks()\n print('Compute human points took %.2f s.' % ((time.time() - start_time_g2)))\n\n # Compute human body pose estimation\n start_time_g3 = time.time()\n canvas, dict_positions = human_pose_estimation(all_peaks, keypoints, paf, param, oriImg, eval_PDJ, filename)\n print('Compute human pose took %.2f s.' % ((time.time() - start_time_g3)))\n\n # Analyze human body fix positions\n code = analyze_positions(dict_positions)\n\n # Send code to Jetson TX2 J21 Header GPIO\n send_code(code)\n\n return canvas, code\n\n\n#--------------------------------------------------------#\n#------------------- MAIN FUNCTION ----------------------# \n#--------------------------------------------------------#\nif __name__ == \"__main__\":\n\n # Reset all bits of Jetson TX2 J21 Header to 0\n init_pins()\n reset_pins()\n\n # Get model and parameters\n global param\n param, model = config_reader()\n multiplier = param['scale_search'] * model['boxsize'] / height\n \n # Set CAFFE with NVIDIA GPU from Jetson TX2 \n caffe.set_mode_gpu()\n caffe.set_device(0)\n net = caffe.Net(model['deployfile'], model['caffemodel'], caffe.TEST)\n\n \"\"\"\n Example execution main:\n TEST SINGLE IMAGE:\t sudo python main.py --image --evaluate --mask \n TEST VIDEO SEQUENCE: sudo python main.py --video --evaluate --mask \n TEST STREAMING CAMERA: sudo python main.py --mask \n \"\"\"\n\n # Check arguments\n if len(sys.argv) > 1: \n\n # Test on single image\n if sys.argv[1] == \"--image\":\n total_time = time.time() \n img = cv.imread(sys.argv[2])\n #filename_img = sys.argv[2].split(\"/\")\n #filename_img = filename_img[len(filename_img)-1][0:-4]\n dir_img = sys.argv[2]\n\n # Check evaluate flag\n evaluate_img = \"NO\"\n if len(sys.argv)>3:\n if sys.argv[3] == \"--evaluate\":\n evaluate_img = sys.argv[4]\n \n # Check mask flag\n if len(sys.argv)>5:\n if sys.argv[5] == \"--mask\":\n h1,h2,w1,w2 = sys.argv[6].split(\",\")\n img = img[int(h1):int(h2), int(w1):int(w2)] \n\n # Start image processing\n print(\"Analyzing human body positon...\")\n canvas, code = gesture_recognition(img, multiplier, model, net, param, filename=dir_img, eval_PDJ=evaluate_img)\n print(\"Analyzing human body positon... done\")\n print('Total time to process image: %.2f s.\\n' % (time.time()-total_time)) \n cv.imwrite(\"results/pose-estimation.jpg\",canvas)\n exit(0) \n\n # Test on single video\n if sys.argv[1] == \"--video\":\n cap = cv.VideoCapture(sys.argv[2])\n ret, img = cap.read()\n\n # Check evaluate flag\n if len(sys.argv)>3:\n if sys.argv[3] == \"--evaluate\":\n evaluate_img = sys.argv[4]\n\n # Check mask flag\n if len(sys.argv)>5:\n if sys.argv[5] == \"--mask\":\n h1,h2,w1,w2 = sys.argv[6].split(\",\")\n img = img[int(h1):int(h2), int(w1):int(w2)] \n\n # Start video processing\n while (ret):\n total_time = time.time()\n print(\"Analyzing human body positon...\")\n canvas, code = gesture_recognition(img, multiplier, model, net, param, filename=\"None\", eval_PDJ=evaluate_img)\n print(\"Analyzing human body positon... done\")\n print('Total time to process image: %.2f s.\\n' % (time.time()-total_time))\n cv.imshow(\"image\", canvas)\n cv.waitKey(1) \n ret, img = cap.read()\n # Check mask flag\n if len(sys.argv)>5:\n if sys.argv[5] == \"--mask\":\n h1,h2,w1,w2 = sys.argv[6].split(\",\")\n img = img[int(h1):int(h2), int(w1):int(w2)] \n \n exit(0) \n \n else:\n\n # Start video processing from camera\n while (True):\n total_time = time.time()\t\t# Start total timer \n frame_time = time.time() \t# Start frame timer\n stop = False\t\t\t# Init stop conditional\n\n while not stop:\n img_pil = Image.open(open('cam/frame-cam.jpg','rb'))\t\t# Open image from camera \n b,g,r = img_pil.split()\t\t\t\t\t\t# Split into BGR channels\t\n img_pil = Image.merge(\"RGB\", (r,g,b))\t\t\t\t# Order BGR channels to RGB \n img = np.asarray(img_pil)\t\t\t\t\t# Convert to np array instead of PIL image\n if img.shape == (height, width, chan): \t\t# Check consistency\n stop = True\n print('Get frame from camera took: %.2f s.' % (time.time()-frame_time)) \n\n # Check mask flag\n if len(sys.argv)>5:\n if sys.argv[5] == \"--mask\":\n h1,h2,w1,w2 = sys.argv[6].split(\",\")\n img = img[int(h1):int(h2), int(w1):int(w2)] \n\n # Start image processing \n print(\"Analyzing human body positon...\")\n canvas, code = gesture_recognition(img, multiplier, model, net, param, filename=\"None\", eval_PDJ=evaluate_img)\n print(\"Analyzing human body positon... done\")\n print('Total time to process image: %.2f s.\\n' % (time.time()-total_time)) \n del img\n \n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"118141148","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\nPlugin to send FMS-, ZVEI- and POCSAG-messages via Telegram\n\n@author: Peter Laemmle\n\n@requires: none\n\"\"\"\n\n#\n# Imports\n#\nimport logging # Global logger\nimport httplib, urllib, telegram, googlemaps\nfrom includes import globals # Global variables\n\n# Helper function, uncomment to use\nfrom includes.helper import configHandler\nfrom includes.helper import timeHandler\n\n# local variables\nBOTTokenAPIKey = None\nBOTChatIDAPIKey = None\nRICforLocationAPIKey = None\nGoogleAPIKey = None\n\n##\n#\n# onLoad (init) function of plugin\n# will be called one time by the pluginLoader on start\n#\ndef onLoad():\n\t\"\"\"\n\tWhile loading the plugins by pluginLoader.loadPlugins()\n\tthis onLoad() routine is called one time for initialize the plugin\n\n\t@requires: nothing\n\n\t@return: nothing\n\t\"\"\"\n\tglobal BOTTokenAPIKey\n\tglobal BOTChatIDAPIKey\n\tglobal RICforLocationAPIKey\n\tglobal GoogleAPIKey\n\n\tconfigHandler.checkConfig(\"sendTelegram\")\n\tBOTTokenAPIKey = globals.config.get(\"sendTelegram\",\"BOTTokenAPIKey\")\n\tBOTChatIDAPIKey = globals.config.get(\"sendTelegram\",\"BOTChatIDAPIKey\")\n\tRICforLocationAPIKey = globals.config.get(\"sendTelegram\",\"RICforLocationAPIKey\")\n\tGoogleAPIKey = globals.config.get(\"sendTelegram\",\"GoogleAPIKey\")\n\t\n\treturn\n\n\n##\n#\n# Main function of plugin\n# will be called by the alarmHandler\n#\ndef run(typ,freq,data):\n\t\"\"\"\n\tThis function is the implementation of the Plugin.\n\n\tIf necessary the configuration hast to be set in the config.ini.\n\n\t@type typ: string (FMS|ZVEI|POC)\n\t@param typ: Typ of the dataset\n\t@type data: map of data (structure see interface.txt)\n\t@param data: Contains the parameter for dispatch\n\t@type freq: string\n\t@keyword freq: frequency of the SDR Stick\n\n\t@requires: If necessary the configuration hast to be set in the config.ini.\n\n\t@return: nothing\n\t@exception: nothing, make sure this function will never thrown an exception\n\t\"\"\"\n\t\n\tglobal BOTTokenKey\n\tglobal BOTChatIDAPIKey\n\tglobal RICforLocationAPIKey\n\tglobal GoogleAPIKey\n\n\ttry:\n\t\tif configHandler.checkConfig(\"sendTelegram\"): #read and debug the config (let empty if no config used)\n\n\t\t\t########## User Plugin CODE ##########\n\t\t\tif typ == \"POC\" and data[\"functionChar\"] != \"d\":\n\t\t\t\tlogging.debug(\"Compose output from POCSAG-message\")\n\t\t\t\t# compose message content\n\t\t\t\toutput = timeHandler.curtime()+\"\\n\"+data[\"ric\"]+\"(\"+data[\"functionChar\"]+\")\\n\"+data[\"description\"]+\"\\n\"+data[\"msg\"]\n\t\t\t\t\n\t\t\t\t# Initiate Telegram Bot\n\t\t\t\tlogging.debug(\"Initiate Telegram BOT\")\n\t\t\t\tbot = telegram.Bot(token='%s' % BOTTokenAPIKey)\t\n\n\t\t\t\t# Send message to chat via Telegram BOT API\n\t\t\t\tlogging.debug(\"Send message to chat via Telegram BOT API\")\n\t\t\t\tbot.sendMessage('%s' % BOTChatIDAPIKey, output)\n\n\t\t\t\t# Generate location information only for specific RIC\n\t\t\t\tif data[\"ric\"] == RICforLocationAPIKey:\t\t\t\t\n\t\t\t\t\t# Generate map\n\t\t\t\t\tlogging.debug(\"Extract address from POCSAG message\")\n\t\t\t\t\t#address = \"+\".join(data[\"msg\"].split('/')[1].split(')')[0].replace('(',' ').split())\n\t\t\t\t\taddress = \"+\".join(data[\"msg\"].split(')')[0].split('/',1)[1].replace('(',' ').split())\n\t\t\t\t\taddress = address.replace('Schelmenholz', 'Winnenden', 1)\n\t\t\t\t\n\t\t\t\t\tlogging.debug(\"Retrieve maps from Google\")\n\t\t\t\t\turl = \"+\".join([\"http://maps.googleapis.com/maps/api/staticmap?markers=\", address, \"&size=480x640&maptype=roadmap&zoom=16&key=\", GoogleAPIKey])\n\t\t\t\t\t#url = \"+\".join([\"http://maps.googleapis.com/maps/api/staticmap?markers=\", address, \"&size=240x320&scale=2&maptype=roadmap&zoom=15&key=\", GoogleAPIKey])\n\t\t\t\t\turllib.urlretrieve(url, \"overview_map.png\")\n\t\t\t\t\t#url = \"+\".join([\"http://maps.googleapis.com/maps/api/staticmap?markers=\", address, \"&size=480x640&maptype=hybrid&zoom=18&key=\", GoogleAPIKey])\n\t\t\t\t\turl = \"+\".join([\"http://maps.googleapis.com/maps/api/staticmap?markers=\", address, \"&size=240x320&scale=2&maptype=hybrid&zoom=17&key=\", GoogleAPIKey])\n\t\t\t\t\turllib.urlretrieve(url, \"detail_map.png\")\n\n\t\t\t\t\t# Send message and map with Telegram\n\t\t\t\t\tlogging.debug(\"Send message and maps via Telegram BOT\")\n\t\t\t\t\tbot.sendPhoto('%s' % BOTChatIDAPIKey, open('overview_map.png', 'rb'))\n\t\t\t\t\tbot.sendPhoto('%s' % BOTChatIDAPIKey, open('detail_map.png', 'rb'))\n\n\t\t\t\t\t# Geocoding of address\n\t\t\t\t\tlogging.debug(\"Geocode address\")\n\t\t\t\t\tgcode = googlemaps.Client(key='%s' % GoogleAPIKey)\n\t\t\t\t\tgcode_result = gcode.geocode(address)\n\t\t\t\t\tlogging.debug(\"Send location via Telegram BOT API\")\n\t\t\t\t\tbot.sendLocation('%s' % BOTChatIDAPIKey, gcode_result[0]['geometry']['location']['lat'], gcode_result[0]['geometry']['location']['lng'])\n\n\t\t\telif typ == \"FMS\":\n\t\t\t\tlogging.debug(\"FMS not supported yet\")\n\t\t\telif typ == \"ZVEI\":\n\t\t\t\tlogging.debug(\"ZVEI not supported yet\")\n\t\t\telse:\n\t\t\t\tlogging.warning(\"Invalid Typ: %s\", typ)\n\t\t\t########## User Plugin CODE ##########\n\n\texcept:\n\t\tlogging.error(\"unknown error\")\n\t\tlogging.debug(\"unknown error\", exc_info=True)\n","sub_path":"sendTelegram/sendTelegram.py","file_name":"sendTelegram.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"34318213","text":"from threading import Thread\nfrom multiprocessing import Pool\nclass Decryptor(object):\n \"\"\"docstring for Decryptor\"\"\"\n def __init__(self):\n super().__init__()\n\n def decrypt(self, keys, CRT, k, path_to_file):\n return self.__decrypt_file(keys['private_d'], keys['public_n'], keys, path_to_file, CRT, k)\n # if False:\n # return self.count_with_CTR(encrypted, keys, k)\n # else:\n # decrpted = pow(encrypted, keys['private_d'], keys['public_n'])\n # return decrpted\n \n\n # def count_with_CTR(self, encrypted, keys, k):\n # threads = []\n # promises = []\n # results = []\n # result_dict = {} \n # #print(keys)\n # for i in range(k):\n # thread = Thread(target = self.compute_part_of_message, kwargs={\n # 'encrypted': encrypted, 'keys': keys, 'thread_num': i, 'result_dict': result_dict})\n # threads.append(thread)\n # thread.daemon = False\n # thread.start()\n # for thread in threads:\n # thread.join()\n # h_list = []\n # m_list = []\n # for i in range (1, k):\n # current_h = keys['inverted'][i] * (result_dict[i - 1] - result_dict[i]) % keys['primes'][i-1]\n # current_m = result_dict[i] + current_h * keys['primes'][i]\n # h_list.append(current_h)\n # m_list.append(current_m)\n \n def compute_part_of_message(self, encrypted, keys, t_id):\n d_power = keys['d_primes'][1]\n d = keys['primes'][1]\n decrypted = pow(int(encrypted), int(d_power), int(d))\n #result_dict[thread_num] = decrpted\n return (decrypted, t_id)\n\n def __decrypt_file(self, private_d, public_n, keys, path_to_file, CRT, k):\n if CRT:\n pool = Pool(processes = k)\n promises = []\n decrpted_data = ''\n with open(path_to_file, 'r') as f:\n encrypted_data = f.read()\n encrypted_data_chunks = list(map(''.join, zip(*[iter(encrypted_data)]*len(str(public_n)))))\n for i in range(len(encrypted_data_chunks)):\n stripped = encrypted_data_chunks[i].lstrip('0')\n if CRT:\n promise = pool.apply_async(self.compute_part_of_message, args=(stripped, keys, i))\n promises.append(promise)\n else:\n decrpted_data += chr(self.__decrypt_message(stripped, private_d, public_n))\n if CRT:\n results = [promise.get() for promise in promises]\n decrypted_sorted = sorted(results, key = lambda x: x[1])\n for data in decrypted_sorted:\n decrpted_data += chr(data[0])\n\n if CRT:\n pool.close()\n with open(path_to_file + '.dec', 'w') as f:\n f.write(decrpted_data)\n return decrpted_data\n\n def __decrypt_message(self, stripped, private_d, public_n):\n return pow(int(stripped), private_d, public_n)","sub_path":"Lista4/src/decryptor.py","file_name":"decryptor.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"560422477","text":"#!/usr/bin/env python\n# written by Philipp Resl\n\nimport os\nimport sys\nimport argparse\nfrom Bio import SeqIO\n\nif sys.version_info[0] < 3:\n raise Exception(\"Must be using Python 3\")\n\npars = argparse.ArgumentParser(prog=\"filter_alignments.py\", description = \"\"\"This script will remove alignments with duplicated sequences.\"\"\", epilog = \"\"\"written by Philipp Resl\"\"\")\npars.add_argument('--alignments', dest=\"align\", required=True, help=\"alignment files\")\npars.add_argument('--outdir', dest=\"outdir\", required=True, help=\"output directory\")\npars.add_argument('--per_sample', dest=\"perseq\", action='store_true', help=\"if set, only samples with duplicated sequences will be removed. Else the whole alignment\")\nargs=pars.parse_args()\n\nalgn_list = args.align.split(\" \")\n\nfor al in algn_list:\n\tseqfile = open(al, \"r\")\n\tsequences = []\n\tfor seq_record in SeqIO.parse(seqfile, \"fasta\"):\n\t\tsequences.append(seq_record)\n\tnames_list = [seq.id for seq in sequences]\n\tif len(names_list) == len(set(names_list)):\n\t\tprint(\"Create symlink: \", args.outdir+\"/\"+al.split(\"/\")[-1])\n\t\tos.symlink(al, args.outdir+\"/\"+al.split(\"/\")[-1])\n\telse:\n\t\tprint(\"Warning: File %s contains duplicated sequence IDs!\" % al)\n\t\tif (args.perseq == True):\t\n\t\t\tprint(\"Duplicated sequences will be removed and copy of file will be made.\")\n\t\t\tdups = [name for name in names_list if names_list.count(name)>1]\n\t\t\tprint(\"Warning: Will remove %d sequences\" % len(dups))\n\t\t\tdups = set(dups)\n\t\t\tsequences = [sequence for sequence in sequences if sequence.id not in dups]\n\t\t\toutfile = open(args.outdir+\"/\"+al.split(\"/\")[-1], \"w\")\n\t\t\tfor sequence in sequences:\n\t\t\t\tprint(\">\"+sequence.id, file=outfile)\n\t\t\t\tprint(sequence.seq, file=outfile)\t\t\t\t\t\n\t\t\toutfile.close()\n\t\telse:\n\t\t\tprint(\"Warning: %s file will be discarded\" % al)\t\n\n\n","sub_path":"bin/filter_alignments.py","file_name":"filter_alignments.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"200800016","text":"import nltk\nimport csv\nnltk.download('punkt')\nfrom newspaper import Article\n\nwith open('fourth.csv', mode='w') as employee_file:\n my_file = open(\"small.txt\", \"r\")\n content_list = my_file.readlines()\n test_list = list(set(content_list))\n length = len(test_list)\n\n print(length)\n\n for i in test_list:\n print(i)\n toi_article = Article(i, language=\"en\") # en for English\n toi_article.download()\n toi_article.parse()\n toi_article.nlp()\n employee_writer = csv.writer(employee_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n employee_writer.writerow([toi_article.title])\n","sub_path":"readfromText.py","file_name":"readfromText.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"444337374","text":"import json\n\nclass Exercise():\n\n def __init__(self):\n self.name = \"\"\n self.ID = 0\n self.form = None\n self.joints = []\n self.reps = 0\n self.RepCounter = None\n self.RepStack = []\n\nclass ExerciseEncoder(json.JSONEncoder):\n def default(self,o):\n return o.__dict__\n","sub_path":"src/classes/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"222683787","text":"from django.urls import path\n\nfrom . import views\nfrom .feeds import BlogPostFeed\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('subscribe/thank-you/', views.subscribe_post, name='subscribe_post'),\n path('p//', views.content_page, name='page'),\n path('blog/', views.blog_index, name='blog_index'),\n path('blog/page//', views.blog_index, name='blog_index_page'),\n path('blog//', views.blog_post, name='blog_post'),\n # path('showcase/', views.showcase_index, name='showcase_index'),\n path('contribute/', views.contribute_index, name='contribute_index'),\n path('feed/', BlogPostFeed(), name='feed'),\n]\n","sub_path":"www/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"479154010","text":"\n\nfrom xai.brain.wordbase.nouns._scorcher import _SCORCHER\n\n#calss header\nclass _SCORCHERS(_SCORCHER, ):\n\tdef __init__(self,): \n\t\t_SCORCHER.__init__(self)\n\t\tself.name = \"SCORCHERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"scorcher\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_scorchers.py","file_name":"_scorchers.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"493595648","text":"# ERPNext - web based ERP (http://erpnext.com)\n# Copyright (C) 2012 Web Notes Technologies Pvt Ltd\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport webnotes\nfrom webnotes.utils import cint, cstr, flt, getdate, now, nowdate\nfrom webnotes.model.doc import addchild\nfrom webnotes.model.doclist import getlist\nfrom webnotes.model.code import get_obj\nfrom webnotes import msgprint, errprint\n\nsql = webnotes.conn.sql\n\t\n# -----------------------------------------------------------------------------------------\n\n\nclass DocType:\n\tdef __init__(self, doc, doclist=[]):\n\t\tself.doc = doc\n\t\tself.doclist = doclist\n\t\tself.item_dict = {}\n\t\t\n\tdef get_item_details(self, item_code):\n\t\t\"\"\" Pull other item details from item master\"\"\"\n\n\t\titem = sql(\"\"\"select description, stock_uom, default_bom from `tabItem` where name = %s \n\t\t\tand (ifnull(end_of_life,'')='' or end_of_life = '0000-00-00' or end_of_life > now())\"\"\", item_code, as_dict =1 )\n\t\tret = {\n\t\t\t'description'\t: item and item[0]['description'],\n\t\t\t'stock_uom'\t\t: item and item[0]['stock_uom'],\n\t\t\t'bom_no'\t\t: item and item[0]['default_bom']\n\t\t}\n\t\treturn ret\n\t\n\n\tdef get_so_details(self, so):\n\t\t\"\"\"Pull other details from so\"\"\"\n\t\tso = sql(\"select transaction_date, customer, grand_total from `tabSales Order` where name = %s\", so, as_dict = 1)\n\t\tret = {\n\t\t\t'sales_order_date': so and so[0]['transaction_date'] or '',\n\t\t\t'customer' : so[0]['customer'] or '',\n\t\t\t'grand_total': so[0]['grand_total']\n\t\t}\n\t\treturn ret\n\n\n\tdef clear_so_table(self):\n\t\t\"\"\" Clears sales order table\"\"\"\n\t\tself.doc.clear_table(self.doclist, 'pp_so_details')\n\n\n\n\tdef clear_item_table(self):\n\t\t\"\"\" Clears item table\"\"\"\n\t\tself.doc.clear_table(self.doclist, 'pp_details')\n\n\n\n\tdef get_open_sales_orders(self):\n\t\t\"\"\" Pull sales orders which are pending to deliver based on criteria selected\"\"\"\n\t\tcond = self.get_filter_condition()\n\t\topen_so = sql(\"\"\"\n\t\t\tselect \n\t\t\t\tdistinct t1.name, t1.transaction_date, t1.customer, t1.grand_total \n\t\t\tfrom \n\t\t\t\t`tabSales Order` t1, `tabSales Order Detail` t2, `tabDelivery Note Packing Detail` t3, tabItem t4\n\t\t\twhere \n\t\t\t\tt1.name = t2.parent and t1.name = t3.parent and t3.parenttype = 'Sales Order' and t1.docstatus = 1 and t2.item_code = t3.parent_item \n\t\t\t\tand t4.name = t3.item_code and t1.status != 'Stopped' and t1.company = '%s' and ifnull(t2.qty, 0) > ifnull(t2.delivered_qty, 0) \n\t\t\t\tand (ifnull(t4.is_pro_applicable, 'No') = 'Yes' or ifnull(t4.is_sub_contracted_item, 'No') = 'Yes') %s\n\t\t\torder by t1.name desc\n\t\t\"\"\"% (self.doc.company, cond), as_dict = 1)\n\n\t\tself.add_so_in_table(open_so)\n\n\n\n\tdef validate_company(self):\n\t\tif not self.doc.company:\n\t\t\tmsgprint(\"Please enter Company\", raise_exception=1)\n\n\t\t\n\n\tdef get_filter_condition(self):\n\t\tself.validate_company()\n\n\t\tcond = ''\n\t\tif self.doc.from_date:\n\t\t\tcond += ' and t1.transaction_date >= \"' + self.doc.from_date + '\"'\n\t\tif self.doc.to_date:\n\t\t\tcond += ' and t1.transaction_date <= \"' + self.doc.to_date + '\"'\n\t\tif self.doc.customer:\n\t\t\tcond += ' and t1.customer = \"' + self.doc.customer + '\"'\n\t\tif self.doc.fg_item:\n\t\t\tcond += ' and t3.item_code = \"' + self.doc.fg_item + '\"'\n\n\t\treturn cond\n\n\n\n\tdef add_so_in_table(self, open_so):\n\t\t\"\"\" Add sales orders in the table\"\"\"\n\t\tso_list = []\n\t\tfor d in getlist(self.doclist, 'pp_so_details'):\n\t\t\tso_list.append(d.sales_order)\n\t\tfor r in open_so:\n\t\t\tif cstr(r['name']) not in so_list:\n\t\t\t\tpp_so = addchild(self.doc, 'pp_so_details', 'PP SO Detail', 1, self.doclist)\n\t\t\t\tpp_so.sales_order = r['name']\n\t\t\t\tpp_so.sales_order_date = cstr(r['transaction_date'])\n\t\t\t\tpp_so.customer = cstr(r['customer'])\n\t\t\t\tpp_so.grand_total = flt(r['grand_total'])\n\n\n\n\tdef get_items_from_so(self):\n\t\t\"\"\" Pull items from Sales Order, only proction item\n\t\t\tand subcontracted item will be pulled from Packing item \n\t\t\tand add items in the table\n\t\t\"\"\"\n\t\tso = self.get_included_so()\n\t\titems = self.get_packing_items(so)\n\t\tself.add_items(items)\n\n\n\tdef get_included_so(self):\n\t\tso = \"'\" + \"','\".join([cstr(d.sales_order) for d in getlist(self.doclist, 'pp_so_details') if d.include_in_plan]) + \"'\"\n\t\treturn so\n\n\n\n\tdef get_packing_items(self, so):\n\t\tpacking_items = sql(\"\"\"\n\t\t\tselect \n\t\t\t\tt0.name, t2.parent_item, t2.item_code, \n\t\t\t\t(t1.qty - ifnull(t1.delivered_qty,0)) * (ifnull(t2.qty,0) / ifnull(t1.qty,1)) as 'pending_qty' \n\t\t\tfrom\n\t\t\t\t`tabSales Order` t0, `tabSales Order Detail` t1, `tabDelivery Note Packing Detail` t2, `tabItem` t3\n\t\t\twhere \n\t\t\t\tt0.name = t1.parent and t0.name = t2.parent and t1.name = t2.parent_detail_docname\n\t\t\t\tand t0.name in (%s) and t0.docstatus = 1 and t1.qty > ifnull(t1.delivered_qty,0) and t3.name = t2.item_code \n\t\t\t\tand (ifnull(t3.is_pro_applicable, 'No') = 'Yes' or ifnull(t3.is_sub_contracted_item, 'No') = 'Yes')\n\t\t\"\"\" % so, as_dict=1)\n\t\treturn packing_items\n\t\t\n\n\n\tdef add_items(self, packing_items):\n\t\tfor d in getlist(self.doclist, 'pp_details'):\n\t\t\tif d.sales_order:\n\t\t\t\td.parent = ''\n\n\t\tfor p in packing_items:\t\n\t\t\titem_details = sql(\"select description, stock_uom, default_bom from tabItem where name=%s\", p['item_code'])\n\t\t\tpi = addchild(self.doc, 'pp_details', 'PP Detail', 1, self.doclist)\n\t\t\tpi.sales_order\t\t\t\t= p['name']\n\t\t\tpi.parent_packing_item\t\t= p['parent_item']\n\t\t\tpi.item_code\t\t\t\t= p['item_code']\n\t\t\tpi.description\t\t\t\t= item_details and item_details[0][0] or ''\n\t\t\tpi.stock_uom\t\t\t\t= item_details and item_details[0][1] or ''\n\t\t\tpi.bom_no\t\t\t\t\t= item_details and item_details[0][2] or ''\n\t\t\tpi.so_pending_qty\t\t\t= flt(p['pending_qty'])\n\t\t\tpi.planned_qty\t\t\t\t= flt(p['pending_qty'])\n\t\n\n\n\tdef validate_data(self):\n\t\tfor d in getlist(self.doclist, 'pp_details'):\n\t\t\tif not d.pro_created:\n\t\t\t\tself.validate_bom_no(d)\n\n\t\t\t\tif not flt(d.planned_qty):\n\t\t\t\t\tmsgprint(\"Please Enter Planned Qty for item: %s at row no: %s\"% (d.item_code, d.idx), raise_exception=1)\n\t\treturn 'validated'\n\n\t\t\t\t\n\n\tdef validate_bom_no(self, d):\n\t\tif not d.bom_no:\n\t\t\tmsgprint(\"Please enter bom no for item: %s at row no: %s\" % (d.item_code, d.idx), raise_exception=1)\n\t\telse:\n\t\t\tbom = sql(\"\"\"select name from `tabBill Of Materials` where item = %s and docstatus = 1 \n\t\t\t\tand name = %s and ifnull(is_active, 'No') = 'Yes'\"\"\", (d.item_code, d.bom_no), as_dict = 1)\n\t\t\tif not bom:\n\t\t\t\tmsgprint(\"\"\"Incorrect BOM No: %s entered for item: %s at row no: %s\n\t\t\t\t\tMay be BOM is inactive or for other item or does not exists in the system\"\"\"% (d.bom_no, d.item_doce, d.idx))\n\n\n\n\tdef download_raw_materials(self):\n\t\t\"\"\" Create csv data for required raw material to produce finished goods\"\"\"\n\t\tbom_dict = self.get_distinct_bom(action = 'download_rm')\n\t\tself.get_raw_materials(bom_dict)\n\t\treturn self.get_csv()\n\n\n\n\t\n\tdef get_raw_materials(self, bom_dict):\n\t\t\"\"\" Get raw materials considering sub-assembly items \"\"\"\n\t\tfor bom in bom_dict:\n\t\t\tif self.doc.consider_sa_items == 'Yes':\n\t\t\t\t# Get all raw materials considering SA items as raw materials, \n\t\t\t\t# so no childs of SA items\n\t\t\t\tfl_bom_items = sql(\"\"\"\n\t\t\t\t\tselect item_code, ifnull(sum(qty_consumed_per_unit), 0) * '%s', description, stock_uom \n\t\t\t\t\tfrom `tabBOM Material` \n\t\t\t\t\twhere parent = '%s' and docstatus < 2 \n\t\t\t\t\tgroup by item_code\n\t\t\t\t\"\"\" % (flt(bom_dict[bom]), bom))\n\n\t\t\telse:\n\t\t\t\t# get all raw materials with sub assembly childs\t\t\t\t\t\n\t\t\t\tfl_bom_items = sql(\"\"\"\n\t\t\t\t\tselect \n\t\t\t\t\t\titem_code,ifnull(sum(qty_consumed_per_unit),0)*%s as qty, description, stock_uom\n\t\t\t\t\tfrom \n\t\t\t\t\t\t( \n\t\t\t\t\t\t\tselect distinct fb.name, fb.description, fb.item_code, fb.qty_consumed_per_unit, fb.stock_uom \n\t\t\t\t\t\t\tfrom `tabFlat BOM Detail` fb,`tabItem` it \n\t\t\t\t\t\t\twhere it.name = fb.item_code and ifnull(it.is_pro_applicable, 'No') = 'No'\n\t\t\t\t\t\t\tand ifnull(it.is_sub_contracted_item, 'No') = 'No' and fb.docstatus<2 and fb.parent=%s\n\t\t\t\t\t\t) a\n\t\t\t\t\tgroup by item_code,stock_uom\n\t\t\t\t\"\"\" , (flt(bom_dict[bom]), bom))\n\t\t\t\n\t\t\tself.make_items_dict(fl_bom_items)\n\n\n\n\tdef make_items_dict(self, item_list):\n\t\tfor i in item_list:\n\t\t\tself.item_dict[i[0]] = [(flt(self.item_dict.get(i[0], [0])[0]) + flt(i[1])), i[2], i[3]]\n\n\n\tdef get_csv(self):\n\t\titem_list = [['Item Code', 'Description', 'Stock UOM', 'Required Qty', 'Warehouse', 'Indented Qty', 'Ordered Qty', 'Actual Qty']]\n\t\tfor d in self.item_dict:\n\t\t\titem_list.append([d, self.item_dict[d][1], self.item_dict[d][2], self.item_dict[d][0]]),\n\t\t\titem_qty= sql(\"select warehouse, indented_qty, ordered_qty, actual_qty from `tabBin` where item_code = %s\", d)\n\t\t\ti_qty, o_qty, a_qty = 0,0,0\n\t\t\tfor w in item_qty:\n\t\t\t\ti_qty, o_qty, a_qty = i_qty + flt(w[1]), o_qty + flt(w[2]), a_qty + flt(w[3])\n\t\t\t\titem_list.append(['', '', '', '', w[0], flt(w[1]), flt(w[2]), flt(w[3])])\n\t\t\tif item_qty:\n\t\t\t\titem_list.append(['', '', '', '', 'Total', i_qty, o_qty, a_qty])\n\n\t\treturn item_list\n\t\t\n\n\n\tdef raise_production_order(self):\n\t\t\"\"\"It will raise production order (Draft) for all distinct FG items\"\"\"\n\t\tself.validate_company()\n\t\tself.validate_data()\n\n\t\tpp_items = self.get_distinct_bom(action = 'raise_pro_order')\n\t\tpro = get_obj(dt = 'Production Control').create_production_order(self.doc.company, pp_items)\n\t\tif pro:\n\t\t\tfor d in getlist(self.doclist, 'pp_details'):\n\t\t\t\td.is_pro_created = 1\n\t\t\tmsgprint(\"Following Production Order has been generated:\\n\" + '\\n'.join(pro))\n\t\telse :\n\t\t\tmsgprint(\"No Production Order is generated.\")\n\n\n\n\tdef get_distinct_bom(self, action):\n\t\t\"\"\" Club similar BOM and item for processing\"\"\"\n\n\t\tbom_dict, item_dict, pp_items = {}, {}, []\n\t\tfor d in getlist(self.doclist, 'pp_details'):\n\t\t\tif action == 'download_rm':\n\t\t\t\tbom_dict[d.bom_no] = bom_dict.get(d.bom_no, 0) + flt(d.planned_qty)\n\t\t\telif not d.is_pro_created:\n\t\t\t\titem_dict[d.item_code] = [(item_dict.get(d.item_code, 0) + flt(d.planned_qty)), d.bom_no, d.description, d.stock_uom]\n\n\t\tif action == 'raise_pro_order':\n\t\t\tfor d in item_dict:\n\t\t\t\tpp_items.append({\n\t\t\t\t\t'production_item'\t: d, \n\t\t\t\t\t'qty'\t\t\t\t: item_dict[d][0],\n\t\t\t\t\t'bom_no'\t\t\t: item_dict[d][1],\n\t\t\t\t\t'description'\t\t: item_dict[d][2],\n\t\t\t\t\t'stock_uom'\t\t\t: item_dict[d][3],\n\t\t\t\t\t'consider_sa_items' : self.doc.consider_sa_items\n\t\t\t\t})\n\n\t\treturn action == 'download_rm' and bom_dict or pp_items\n","sub_path":"erpnext/production/doctype/production_planning_tool/production_planning_tool.py","file_name":"production_planning_tool.py","file_ext":"py","file_size_in_byte":10554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"558728218","text":"import logging\nimport sqlite3\nfrom sqlite3 import Row\n\nfrom .constants import DEFAULT_DB_CREATE_SCHEMA_FILE_PATH\nfrom .utilities import Utility\n\nlogger = logging.getLogger(__name__)\n\n\nclass Database(object):\n __slots__ = []\n\n _connection = None\n _cursor = None\n _database_file_path = None\n\n @classmethod\n def _create_schema(cls):\n cls._cursor.executescript(Utility.read_file(DEFAULT_DB_CREATE_SCHEMA_FILE_PATH))\n\n @classmethod\n def close_connection(cls):\n logger.debug(\n 'Close connection to SQLite database\\nSQLite database file => %s',\n cls._database_file_path,\n )\n\n cls._cursor.close()\n cls._connection.close()\n\n @classmethod\n def commit(cls):\n cls._connection.commit()\n\n @classmethod\n def execute(cls, sql_statement, parameters):\n cls._cursor.execute(sql_statement, parameters)\n\n return cls._cursor.fetchall()\n\n @classmethod\n def get_row_count(cls):\n return cls._cursor.rowcount\n\n @classmethod\n def open_connection(cls, database_file_path):\n cls._database_file_path = database_file_path\n\n cls._connection = sqlite3.connect(cls._database_file_path)\n cls._connection.row_factory = Row\n cls._cursor = cls._connection.cursor()\n\n logger.debug(\n 'Opened connection to SQLite database\\nSQLite database file => %s',\n database_file_path,\n )\n\n cls._create_schema()\n","sub_path":"smooth_streams_epg_generator/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"175710566","text":"# -*- coding: utf-8 -*-\n\"\"\"A module for handling with typing and type hints.\n\nFunctions:\n cast: Casts a value to a specific type.\n get_signature: Retrieves the signature of a function.\n get_type_hints: Gets all type hints for an object, including comment type\n hints.\n\nClasses:\n Bounded: A sliceable subclass of any class that raises a ValueError if the\n initialization value is out of bounds.\n Length: A sliceable subclass of any class that implements __len__ that\n raises a ValueError if the length of the initialization value is out of\n bounds.\n Singleton: A metaclass to force a class to only ever be instantiated once.\n\nInstances:\n NoneType: A type alias for type(None)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom typing import ( # type: ignore\n _eval_type,\n _ForwardRef,\n _get_defaults\n)\nfrom typing import ( # noqa: F401 pylint: disable=unused-import\n Any,\n ByteString,\n Callable,\n cast as std_cast,\n Dict,\n Generator,\n get_type_hints as std_get_type_hints,\n List,\n MutableSequence,\n MutableSet,\n Optional,\n Sequence,\n Tuple,\n Type,\n TypeVar,\n Union\n)\nimport collections\nimport functools\nimport inspect\nimport logging\nimport re\nimport sys\nimport tokenize\nimport types\n\nimport six\n\nfrom . import exceptions as exc\nfrom .decorators import memoize\n\n\n_LOGGER = logging.getLogger(__name__)\n\n_T = TypeVar('_T')\n\n_SEQUENCE_TYPES = (\n list,\n tuple,\n set,\n frozenset,\n collections.deque,\n collections.Counter,\n MutableSequence,\n MutableSet,\n Tuple\n)\n\nNoneType = type(None)\n\n\nclass Singleton(type):\n \"\"\"A metaclass to turn a class into a singleton.\"\"\"\n\n __instance__ = None # type: type\n\n def __call__(cls, *args, **kwargs):\n # type: (*Any, **Any) -> type\n \"\"\"Instantiate the class only once.\"\"\"\n if not cls.__instance__:\n cls.__instance__ = super(Singleton, cls).__call__(*args, **kwargs)\n return cls.__instance__\n\n\nclass _Uninstantiable(type):\n \"\"\"A metaclass that disallows instantiation.\"\"\"\n\n def __call__(cls, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Do not allow the class to be instantiated.\"\"\"\n raise TypeError('Type {} cannot be instantiated.'.format(cls.__name__))\n\n\nclass _ClsReprMeta(type):\n \"\"\"A metaclass that returns a custom type repr if defined.\"\"\"\n\n __class_repr__ = None # type: Optional[str]\n\n def __repr__(cls):\n # type: () -> str\n \"\"\"Return a custom string for the type repr if defined.\"\"\"\n if cls.__class_repr__:\n return cls.__class_repr__\n return super(cls.__class__, cls).__repr__()\n\n\nclass _BoundedMeta(_Uninstantiable):\n \"\"\"A metaclass that adds slicing to a class that creates new classes.\"\"\"\n\n def __getitem__(cls, args):\n # type: (Union[Tuple[_T, Any], Tuple[_T, Any, Callable]]) -> type\n \"\"\"Create a new subclass of a type bounded by the arguments.\n\n If a callable is passed as the third argument of the slice, it will be\n used as the comparison function for the boundaries.\n\n Args:\n args: A tuple with two or three parameters: a type, a slice\n representing the minimum and maximum lengths allowed for values\n of that type and, optionally, a function to use on values\n before comparing against the bounds.\n \"\"\"\n type_, bound, keyfunc = cls._get_args(args)\n keyfunc_name = cls._get_fullname(keyfunc)\n identity = cls._identity\n try:\n class _(type_): # type: ignore\n \"\"\"Check if type_ is subclassable.\"\"\"\n BaseClass = type_\n except TypeError:\n BaseClass = object # type: ignore\n\n class _BoundedSubclassMeta(\n _ClsReprMeta, BaseClass.__class__): # type: ignore\n \"\"\"Use the type_ metaclass and include class repr functionality.\"\"\"\n\n @six.add_metaclass(_BoundedSubclassMeta)\n class _BoundedSubclass(BaseClass): # type: ignore\n \"\"\"A subclass of type_ or object, bounded by a slice.\"\"\"\n\n def __new__(cls, __value, *args, **kwargs):\n # type: (Any, *Any, **Any) -> _T\n \"\"\"Return __value cast to _T.\n\n Any additional arguments are passed as-is to the constructor.\n\n Args:\n __value: A value that can be converted to type _T.\n args: Any additional positional arguments passed to the\n constructor.\n kwargs: Any additional keyword arguments passed to the\n constructor.\n \"\"\"\n try:\n instance = BaseClass(__value, *args, **kwargs)\n except TypeError:\n instance = __value\n cmp_val = keyfunc(instance)\n if bound.start and cmp_val < bound.start:\n if keyfunc is not identity:\n raise ValueError(\n 'The value of {}({}) [{}] is below the minimum '\n 'allowed value of {}.'.format(\n keyfunc_name, repr(__value), repr(cmp_val),\n bound.start))\n raise ValueError(\n 'The value {} is below the minimum allowed value '\n 'of {}.'.format(repr(__value), bound.start))\n if bound.stop and cmp_val > bound.stop:\n if keyfunc is not identity:\n raise ValueError(\n 'The value of {}({}) [{}] is above the maximum'\n ' allowed value of {}.'.format(\n keyfunc_name, repr(__value), repr(cmp_val),\n bound.stop))\n raise ValueError(\n 'The value {} is above the maximum allowed value '\n 'of {}.'.format(repr(__value), bound.stop))\n return instance\n\n _BoundedSubclass.__class_repr__ = cls._get_class_repr(\n type_, bound, keyfunc, keyfunc_name)\n return _BoundedSubclass\n\n def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):\n # type: (Any, slice, Callable, str) -> str\n \"\"\"Return a class representation using the slice parameters.\n\n Args:\n type_: The type the class was sliced with.\n bound: The boundaries specified for the values of type_.\n keyfunc: The comparison function used to check the value\n boundaries.\n keyfunc_name: The name of keyfunc.\n\n Returns:\n A string representing the class.\n \"\"\"\n if keyfunc is not cls._default:\n return '{}.{}[{}, {}, {}]'.format(\n cls.__module__, cls.__name__, cls._get_fullname(type_),\n cls._get_bound_repr(bound), keyfunc_name)\n else:\n return '{}.{}[{}, {}]'.format(\n cls.__module__, cls.__name__, cls._get_fullname(type_),\n cls._get_bound_repr(bound))\n\n def _get_args(cls, args):\n # type: (tuple) -> Tuple[Any, slice, Callable]\n \"\"\"Return the parameters necessary to check type boundaries.\n\n Args:\n args: A tuple with two or three elements: a type, a slice\n representing the minimum and maximum lengths allowed for values\n of that type and, optionally, a function to use on values\n before comparing against the bounds.\n\n Returns:\n A tuple with three elements: a type, a slice, and a function to\n apply to objects of the given type. If no function was specified,\n it returns the identity function.\n \"\"\"\n if not isinstance(args, tuple):\n raise TypeError(\n '{}[...] takes two or three arguments.'.format(cls.__name__))\n elif len(args) == 2:\n type_, bound = args\n keyfunc = cls._identity\n elif len(args) == 3:\n type_, bound, keyfunc = args\n else:\n raise TypeError(\n 'Too many parameters given to {}[...]'.format(cls.__name__))\n if not isinstance(bound, slice):\n bound = slice(bound)\n if isinstance(type_, six.string_types):\n # pragma pylint: disable=protected-access\n type_ = _ForwardRef(type_)._eval_type(globals(), globals())\n # pragma pylint: enable=protected-access\n return type_, bound, keyfunc\n\n @staticmethod\n def _get_bound_repr(bound):\n # type: (slice) -> str\n \"\"\"Return a string representation of a boundary slice.\n\n Args:\n bound: A slice object.\n\n Returns:\n A string representing the slice.\n \"\"\"\n if bound.start and not bound.stop:\n return '{}:'.format(bound.start)\n if bound.stop and not bound.start:\n return ':{}'.format(bound.stop)\n return '{}:{}'.format(bound.start, bound.stop)\n\n @staticmethod\n def _identity(obj):\n # type: (_T) -> _T\n \"\"\"Return the given object.\n\n Args:\n obj: An object.\n\n Returns:\n The given object.\n \"\"\"\n return obj\n\n _default = _identity # type: Callable[[Any], Any]\n\n @staticmethod\n def _get_fullname(obj):\n # type: (Any) -> str\n \"\"\"Get the full name of an object including the module.\n\n Args:\n obj: An object.\n\n Returns:\n The full class name of the object.\n \"\"\"\n if not hasattr(obj, '__name__'):\n obj = obj.__class__\n if obj.__module__ in ('builtins', '__builtin__'):\n return obj.__name__\n return '{}.{}'.format(obj.__module__, obj.__name__)\n\n\n@six.add_metaclass(_BoundedMeta)\nclass Bounded(object):\n \"\"\"A type that creates a bounded version of a type when sliced.\n\n Bounded can be sliced with two or three elements: a type, a slice\n representing the minimum and maximum lengths allowed for values of that\n type and, optionally, a function to use on values before comparing against\n the bounds.\n\n >>> Bounded[int, 5:10](7)\n 7\n >>> Bounded[int, 5:10](1)\n Traceback (most recent call last):\n ...\n ValueError: The value 1 is below the minimum allowed value of 5.\n >>> Bounded[int, 5:10](11)\n Traceback (most recent call last):\n ...\n ValueError: The value 11 is above the maximum allowed value of 10.\n >>> Bounded[str, 5:10, len]('abcde')\n 'abcde'\n \"\"\"\n\n\nclass _LengthBoundedMeta(_BoundedMeta):\n \"\"\"A metaclass that bounds a type with the len function.\"\"\"\n\n _default = len\n\n def _get_args(cls, args):\n # type: (tuple) -> Tuple[Type[_T], slice, Callable]\n \"\"\"Return the parameters necessary to check type boundaries.\n\n Args:\n args: A tuple with two parameters: a type, and a slice representing\n the minimum and maximum lengths allowed for values of that\n type.\n\n Returns:\n A tuple with three parameters: a type, a slice, and the len\n function.\n \"\"\"\n if not isinstance(args, tuple) or not len(args) == 2:\n raise TypeError(\n '{}[...] takes exactly two arguments.'.format(cls.__name__))\n return super(_LengthBoundedMeta, cls)._get_args(args + (len,))\n\n\n@six.add_metaclass(_LengthBoundedMeta)\nclass Length(object):\n \"\"\"A type that creates a length bounded version of a type when sliced.\n\n Length can be sliced with two parameters: a type, and a slice representing\n the minimum and maximum lengths allowed for values of that type.\n\n >>> Length[str, 5:10]('abcde')\n 'abcde'\n >>> Length[str, 5:10]('abc')\n Traceback (most recent call last):\n ...\n ValueError: The value of len('abc') [3] is below the minimum ...\n >>> Length[str, 5:10]('abcdefghijk')\n Traceback (most recent call last):\n ...\n ValueError: The value of len('abcdefghijk') [11] is above the maximum ...\n \"\"\"\n\n\n@memoize\ndef get_signature(func):\n # type: (Callable) -> Tuple[Tuple[str, ...], str, str]\n \"\"\"Return the signature of the given function.\n\n inspect.getargspec() no longer exists as of Python 3.6, so detect the\n correct method of accessing the signature for each language and return the\n list of argument names.\n\n Args:\n func: The function from which to retrieve parameter names.\n\n Returns:\n A list of valid parameter names for the given function.\n \"\"\"\n getargspec = getattr(\n inspect, 'get{}argspec'.format('full' if six.PY3 else ''))\n args, vararg, kwarg = getargspec(func)[:3]\n args = args[1:] if isinstance(func, types.MethodType) else args\n _LOGGER.debug('Found signature parameters: %s', (args, vararg, kwarg))\n return args, vararg, kwarg\n\n\ndef get_type_hints(obj, # type: Any\n globalns=None, # type: Optional[Dict[str, Any]]\n localns=None # type: Optional[Dict[str, Any]]\n ):\n # type: (...) -> Dict[str, Any]\n \"\"\"Return all type hints for the function.\n\n This attempts to use typing.get_type_hints first, but if that returns None\n then it will attempt to reuse much of the logic from the Python 3 version\n of typing.get_type_hints; the Python 2 version does nothing. In addition to\n this logic, if no code annotations exist, it will attempt to extract\n comment type hints for Python 2/3 compatibility.\n\n Args:\n obj: The object to search for type hints.\n globalns: The currently known global namespace.\n localns: The currently known local namespace.\n\n Returns:\n A mapping of value names to type hints.\n \"\"\"\n # pragma pylint: disable=protected-access\n globalns, localns = _get_namespace(obj, globalns, localns)\n hints = std_get_type_hints(obj, globalns, localns) or {} # type: ignore\n if not hints and not getattr(obj, '__no_type_check__', None):\n hints.update(getattr(obj, '__annotations__', {}))\n if not hints:\n hints.update(_get_comment_type_hints(obj, globalns, localns))\n for name, value in six.iteritems(hints):\n if value is None:\n value = NoneType\n if isinstance(value, six.string_types):\n value = _ForwardRef(value)\n value = _eval_type(\n value, globalns, localns)\n if _is_optional(obj, name):\n value = Optional[value] # type: ignore\n hints[name] = value\n return hints\n # pragma pylint: enable=protected-access\n\n\ndef _get_namespace(obj, # type: Any\n globalns, # type: Optional[Dict[str, Any]]\n localns # type: Optional[Dict[str, Any]]\n ):\n # type: (...) -> Tuple[Dict[str, Any], Dict[str, Any]]\n \"\"\"Retrieve the global and local namespaces for an object.\n\n Args:\n obj: An object.\n globalns: The currently known global namespace.\n localns: The currently known local namespace.\n\n Returns:\n A tuple containing two dictionaries for the global and local namespaces\n to be used by eval.\n \"\"\"\n if globalns is None:\n globalns = getattr(obj, '__globals__', {})\n if localns is None:\n localns = globalns\n elif localns is None:\n localns = globalns\n return globalns, localns\n\n\ndef _get_comment_type_hints(func, # type: Callable\n globalns, # type: Dict[str, Any]\n localns # type: Dict[str, Any]\n ):\n # type: (...) -> Dict[str, Any]\n \"\"\"Get a mapping of parameter names to type hints from type hint comments.\n\n Args:\n func: The function to search for type hint comments.\n\n Returns:\n A dictionary mapping the function parameters to the type hints found\n for each parameter in the type hint comments.\n \"\"\"\n try:\n source = inspect.getsource(func)\n except IOError:\n return {}\n hints = {}\n full_signature = get_signature(func)\n signature = full_signature[0] + list(s for s in full_signature[1:] if s)\n for comment in _get_type_comments(source):\n name, value = comment\n name = name.strip()\n value = value.strip()\n if name in signature:\n hints[name] = value\n elif name.startswith('(') and name.endswith(')'):\n hints['return'] = value\n type_values = _parse_short_form(name, globalns, localns)\n if len(type_values) == len(signature) + 1:\n type_values = type_values[1:]\n if len(type_values) == len(signature):\n hints.update(zip(signature, type_values)) # type: ignore\n return hints\n\n\ndef _is_optional(func, name):\n # type: (Callable, str) -> bool\n \"\"\"Determine if the argument is optional for the function.\n\n Args:\n func: A function that takes arguments.\n name: The name of an argument to the function.\n\n Returns:\n True if the argument is optional for the function; otherwise, False.\n \"\"\"\n defaults = _get_func_defaults(func)\n return bool(name in defaults and defaults[name] is None)\n\n\n@memoize\ndef _get_func_defaults(func):\n # type: (Callable) -> Dict[str, Any]\n \"\"\"Get the default values for the function parameters.\n\n Args:\n func: The function to inspect.\n\n Returns:\n A mapping of parameter names to default values.\n \"\"\"\n # pragma pylint: disable=protected-access\n _func_like = functools.wraps(func)(lambda: None)\n if not hasattr(_func_like, '__kwdefaults__'): # type: ignore\n _func_like.__kwdefaults__ = {} # type: ignore\n return _get_defaults(_func_like)\n # pragma pylint: enable=protected-access\n\n\ndef _get_type_comments(source):\n # type: (str) -> Generator[Tuple[str, str], None, None]\n \"\"\"Yield type hint comments from the source code.\n\n Args:\n source: The source code of the function to search for type hint\n comments.\n\n Yields:\n All type comments that come before the body of the function as\n (name, type) pairs, where the name is the name of the variable and\n type is the type hint. If a short-form type hint is reached, it is\n yielded as a single string containing the entire type hint.\n \"\"\"\n reader = six.StringIO(source).readline\n name = last_token = None\n tokens = tokenize.generate_tokens(reader) # type: ignore\n for token, value, _, _, _ in tokens:\n if token == tokenize.INDENT:\n return\n if token == tokenize.NAME:\n name = value\n elif token == tokenize.COMMENT:\n match = re.match(r'#\\s*type:(.+)', value)\n if match:\n type_sig = match.group(1).strip()\n if '->' in type_sig and last_token == tokenize.NEWLINE:\n yield type_sig.split('->')\n elif name:\n yield name, type_sig\n name = None\n last_token = token\n\n\ndef _parse_short_form(comment, globalns, localns):\n # type: (str, Dict[str, Any], Dict[str, Any]) -> Tuple[type, ...]\n \"\"\"Return the hints from the comment.\n\n Parses the left-hand side of a type comment into a list of type objects.\n (e.g. everything to the left of \"->\").\n\n Returns:\n A list of types evaluated from the type comment in the given global\n name space.\n \"\"\"\n if '(...)' in comment:\n return ()\n comment = comment.replace('*', '')\n hints = eval(comment, globalns, localns) # pylint: disable=eval-used\n if not isinstance(hints, tuple):\n hints = (hints,)\n return hints\n\n\ndef cast(type_, value):\n # type: (Type[_T], Any) -> _T\n \"\"\"Cast the value to the given type.\n\n Args:\n type_: The type the value is expected to be cast.\n value: The value to cast.\n\n Returns:\n The cast value if it was possible to determine the type and cast it;\n otherwise, the original value.\n \"\"\"\n assert type_ is not NoneType\n if type_ is Any:\n return value\n if type_ is ByteString:\n return value.encode(sys.stdin.encoding or sys.getdefaultencoding())\n if isinstance(type_, type):\n if any(issubclass(type_, typ) # type: ignore\n for typ in _SEQUENCE_TYPES):\n return _cast_sequence(type_, value) # type: ignore\n for typ in _get_cast_types(type_):\n try:\n return typ(value)\n except (TypeError, ValueError):\n pass\n raise exc.CastError(type_, value)\n\n\ndef _get_cast_types(type_):\n # type: (Any) -> List[Union[type, Callable]]\n \"\"\"Return all type callable type constraints for the given type.\n\n Args:\n type_: The type variable that may be callable or constrainted.\n\n Returns:\n A list of all callable type constraints for the type.\n \"\"\"\n cast_types = [type_] if callable(type_) else []\n if (hasattr(type_, '__constraints__') and\n isinstance(type_.__constraints__, collections.Iterable)):\n cast_types.extend(type_.__constraints__)\n if (hasattr(type_, '__args__') and\n isinstance(type_.__args__, collections.Iterable)):\n cast_types.extend(type_.__args__)\n return cast_types\n\n\ndef _cast_sequence(type_, value):\n # type: (Any, Any) -> Sequence[_T]\n \"\"\"Cast the value to the given sequence type.\n\n Args:\n type_: The type the value is expected to be cast.\n value: The value to cast.\n\n Returns:\n A sequence containing all of the values in value cast to the\n appropriate type if it was possible to determine a type and\n successfully cast the value to it. If the value is a string, it will\n attempt to parse it as a CSV string.\n \"\"\"\n if issubclass(type_, (tuple, Tuple)): # type: ignore\n return _cast_tuple(type_, value)\n if hasattr(type_, '__args__') and type_.__args__:\n typ = type_.__args__[0]\n value = [cast(typ, v) for v in value]\n if issubclass(type_, (set, MutableSet)):\n return std_cast(Sequence[_T], set(value))\n if issubclass(type_, frozenset):\n return std_cast(Sequence[_T], frozenset(value))\n if issubclass(type_, collections.deque):\n return std_cast(Sequence[_T], collections.deque(value))\n if issubclass(type_, collections.Counter):\n return std_cast(Sequence[_T], collections.Counter(value))\n return list(value)\n\n\ndef _cast_tuple(type_, values):\n # type: (Type[_T], Any) -> Tuple[_T, ...]\n \"\"\"Cast the value to a tuple.\n\n Args:\n type_: The type the value is expected to be cast.\n values: A list of values to be converted to a tuple and cast using\n tuple logic.\n\n Returns:\n A tuple containing all of the values cast to the appropriate types.\n\n Raises:\n ValueError: Raised if the number of tuple parameter type arguments does\n not match the number of arguments in the values.\n \"\"\"\n has_args = hasattr(type_, '__args__') and type_.__args__ # type: ignore\n tuple_types = type_.__args__ if has_args else () # type: ignore\n if not tuple_types and hasattr(type_, '__tuple_params__'):\n tuple_types = type_.__tuple_params__ # type: ignore\n if tuple_types:\n if (len(tuple_types) == 2 and tuple_types[1] is Ellipsis) or (\n len(tuple_types) == 1 and\n getattr(type_, '__tuple_use_ellipsis__', None)):\n values = [cast(tuple_types[0], val) for val in values]\n elif len(values) != len(tuple_types):\n raise exc.CastError(type_, values)\n else:\n values = [cast(typ, val) for typ, val in zip(tuple_types, values)]\n return tuple(values)\n","sub_path":"rcli/typing.py","file_name":"typing.py","file_ext":"py","file_size_in_byte":23927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"402424024","text":"\n# data = {\n# \"bars\": [\n# {\n# \"t\": \"2021-11-01T08:25:00Z\",\n# \"o\": 332.9,\n# \"h\": 332.9,\n# \"l\": 332.9,\n# \"c\": 332.9,\n# \"v\": 694,\n# \"n\": 34,\n# \"vw\": 332.988963\n# },\n# {\n# \"t\": \"2021-11-01T08:28:00Z\",\n# \"o\": 332.9,\n# \"h\": 332.9,\n# \"l\": 332.9,\n# \"c\": 332.9,\n# \"v\": 419,\n# \"n\": 16,\n# \"vw\": 332.934129\n# }\n# ],\n# \"symbol\": \"MSFT\",\n# \"next_page_token\": \"null\"\n# }\n\nfrom redisTimeseriesData import RealTimeBars\nfrom redisUtil import RedisTimeFrame, TimeStamp, AlpacaAccess\nfrom datetime import datetime\n\nrtb = RealTimeBars()\n\n\ndef print_bar(data):\n print(\"Bars:\")\n for bar in data[\"data\"]:\n\n # timestamp to datetime\n dt_object = datetime.fromtimestamp(bar['t'])\n # format datetime to string\n dt_string = dt_object.strftime(\"%H:%M:%S\")\n bar['t'] = dt_string\n print(\"\\t{}\".format(bar))\n\n\ndef print_datetime_now():\n # timestamp to datetime\n dt_object = datetime.fromtimestamp(TimeStamp.now())\n # format datetime to string\n dt_string = dt_object.strftime(\"%H:%M:%S\")\n print(\"\\t{}\".format(dt_string))\n\n\nsymbol = \"FANG\"\ndata1 = rtb.RedisGetRealtimeData(None, symbol, RedisTimeFrame.MIN1)\ndata2 = rtb.RedisGetRealtimeData(None, symbol, RedisTimeFrame.MIN2)\ndata5 = rtb.RedisGetRealtimeData(None, symbol, RedisTimeFrame.MIN5)\n\nprint_datetime_now()\nprint_bar(data1)\nprint_bar(data2)\nprint_bar(data5)\n\napi = AlpacaAccess.connection()\nassets = api.list_assets(status='active')\nprint(assets)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"217378799","text":"from __future__ import print_function\nimport pprint\nimport os\nimport cPickle\nimport mxnet as mx\nimport numpy as np\n\nfrom ..symbol import *\nfrom ..dataset import *\nfrom ..core.loader import TestLoader\nfrom ..core.tester import Predictor\nfrom ..utils.load_model import load_param\nfrom ..config import config, generate_config, default\nfrom ..core.tester import im_detect\nfrom ..utils import sliding_window\nfrom ..io.image import transform\nfrom ..processing.nms import py_nms_wrapper\nfrom tqdm import tqdm\n\n\ndef test_predictor(network, dataset, image_set, root_path, dataset_path,\n ctx, prefix, epoch,\n shuffle, has_rpn, proposal):\n # set config\n if has_rpn:\n config.TEST.HAS_RPN = True\n\n # print config\n # pprint.pprint(config)\n\n # load symbol and testing data\n if has_rpn:\n sym = eval('get_' + network + '_test')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)\n imdb = eval(dataset)(image_set, root_path, dataset_path, ['__background__', 'adult_males', 'subadult_males', 'adult_females', 'juveniles',\n 'pups'])\n roidb = imdb.gt_roidb()\n else:\n sym = eval('get_' + network + '_rcnn_test')(num_classes=config.NUM_CLASSES)\n imdb = eval(dataset)(image_set, root_path, dataset_path, ['__background__', 'adult_males', 'subadult_males', 'adult_females', 'juveniles',\n 'pups'])\n gt_roidb = imdb.gt_roidb()\n roidb = eval('imdb.' + proposal + '_roidb')(gt_roidb)\n\n # get test data iter\n test_data = TestLoader(roidb, batch_size=1, shuffle=shuffle, has_rpn=has_rpn)\n\n # load model\n arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)\n\n # infer shape\n\n data_shape_dict = dict(test_data.provide_data)\n arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)\n arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))\n aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))\n\n # check parameters\n for k in sym.list_arguments():\n if k in data_shape_dict or 'label' in k:\n continue\n assert k in arg_params, k + ' not initialized'\n assert arg_params[k].shape == arg_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(\n arg_params[k].shape)\n for k in sym.list_auxiliary_states():\n assert k in aux_params, k + ' not initialized'\n assert aux_params[k].shape == aux_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(\n aux_params[k].shape)\n\n # decide maximum shape\n data_names = [k[0] for k in test_data.provide_data]\n label_names = None\n max_data_shape = [('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]\n if not has_rpn:\n max_data_shape.append(('rois', (1, config.TEST.PROPOSAL_POST_NMS_TOP_N + 30, 5)))\n\n # create predictor\n predictor = Predictor(sym, data_names, label_names,\n context=ctx, max_data_shapes=max_data_shape,\n provide_data=test_data.provide_data, provide_label=test_data.provide_label,\n arg_params=arg_params, aux_params=aux_params)\n\n return predictor, data_shape_dict\n\n\ndef test_predict(network, symbol, epoch, dataset, ims, stride, threshold=0.7, test=False):\n generate_config(network, dataset)\n predictor, data_shape_dict = test_predictor(default.network, default.dataset, default.test_image_set,\n default.root_path, default.dataset_path,\n mx.gpu(0), symbol, epoch,\n False, config.TEST.HAS_RPN, \"rpn\")\n\n nms = py_nms_wrapper(config.TEST.NMS)\n all_boxes = [[[] for _ in xrange(len(ims))]\n for _ in xrange(config.NUM_CLASSES)]\n for i, im in tqdm(enumerate(ims)):\n positions = sliding_window.sliding_window(im, data_shape_dict[\"data\"][2], int(stride), 0)[1]\n for position in positions:\n im_array, im_scale = im[position], 1\n im_array = transform(im_array, config.PIXEL_MEANS)\n im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)\n data = [mx.nd.array(im_array), mx.nd.array(im_info)]\n data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]\n data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)\n scores, boxes, data_dict = im_detect(predictor, data_batch, [\"data\", \"im_info\"], 1)\n print(scores)\n for j in range(1, config.NUM_CLASSES):\n indexes = np.where(scores[:, j] > threshold)[0]\n cls_scores = scores[indexes, j, np.newaxis]\n cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]\n for n in range(len(cls_boxes)):\n cls_boxes[n][[0, 2]] += position[1].start\n cls_boxes[n][[1, 3]] += position[0].start\n cls_dets = np.hstack((cls_boxes, cls_scores))\n keep = nms(cls_dets)\n all_boxes[j][i].append(cls_dets[keep, :])\n\n for i in range(len(all_boxes)):\n for j in range(len(all_boxes[i])):\n try:\n all_boxes[i][j] = np.vstack(all_boxes[i][j])\n keep = nms(all_boxes[i][j])\n all_boxes[i][j] = all_boxes[i][j][keep, :]\n except Exception as e:\n pass\n det_file = os.path.join(default.root_path, \"cache\",\n \"{}_general_{}_detections_{}_{}.pkl\".format(network, default.test_image_set,\n \"test\" if test else \"val\", epoch))\n with open(det_file, 'wb') as f:\n cPickle.dump(all_boxes, f, protocol=cPickle.HIGHEST_PROTOCOL)\n # boxes_this_image = [[]] + [all_boxes[j][0] for j in range(1, config.NUM_CLASSES)]\n print(all_boxes)\n return all_boxes\n\n\nfrom ..config import config, default, generate_config\nfrom ..symbol import *\nfrom ..core import callback, metric\nfrom ..core.loader import AnchorLoader\nfrom ..core.module import MutableModule\nfrom ..utils.load_data import load_gt_roidb, merge_roidb, filter_roidb\nfrom ..utils.load_model import load_param\nimport logging\n\n\ndef train_net(ctx, pretrained, epoch, prefix, begin_epoch, end_epoch,\n shuffle, resume, frequent,\n lr=0.001, lr_step='5', lr_factor=0.5):\n # set up logger\n log_file = \"log\"\n log_dir = prefix.rsplit(\"/\", 1)[0]\n print(log_dir)\n log_file_full_name = os.path.join(log_dir, log_file)\n head = '%(asctime)-15s Node[' + str(mx.kvstore.create(\"local\").rank) + '] %(message)s'\n\n logger = logging.getLogger()\n handler = logging.FileHandler(log_file_full_name)\n formatter = logging.Formatter(head)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n\n # load symbol\n sym = eval('get_' + default.network + '_train')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)\n feat_sym = sym.get_internals()['rpn_cls_score_output']\n\n # setup multi-gpu\n batch_size = len(ctx)\n input_batch_size = config.TRAIN.BATCH_IMAGES * batch_size\n\n # print config\n pprint.pprint(config)\n\n # load dataset and prepare imdb for training\n imdb = eval(default.dataset)(default.image_set, default.root_path, default.dataset_path,\n ['__background__', 'adult_males', 'subadult_males', 'adult_females', 'juveniles',\n 'pups'])\n roidb = imdb.gt_roidb()\n roidb = filter_roidb(roidb)\n\n # load training data\n train_data = AnchorLoader(feat_sym, roidb, batch_size=input_batch_size, shuffle=shuffle,\n ctx=ctx, work_load_list=None,\n feat_stride=config.RPN_FEAT_STRIDE, anchor_scales=config.ANCHOR_SCALES,\n anchor_ratios=config.ANCHOR_RATIOS, aspect_grouping=config.TRAIN.ASPECT_GROUPING)\n\n # infer max shape\n max_data_shape = [\n ('data', (input_batch_size, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]\n max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)\n max_data_shape.append(('gt_boxes', (input_batch_size, 100, 5)))\n print('providing maximum shape', max_data_shape, max_label_shape)\n\n # infer shape\n data_shape_dict = dict(train_data.provide_data + train_data.provide_label)\n arg_shape, out_shape, aux_shape = sym.infer_shape(**data_shape_dict)\n arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))\n out_shape_dict = dict(zip(sym.list_outputs(), out_shape))\n aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))\n print('output shape')\n pprint.pprint(out_shape_dict)\n\n # load and initialize params\n if resume:\n arg_params, aux_params = load_param(prefix, begin_epoch, convert=True)\n else:\n arg_params, aux_params = load_param(pretrained, epoch, convert=True)\n arg_params['rpn_conv_3x3_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['rpn_conv_3x3_weight'])\n arg_params['rpn_conv_3x3_bias'] = mx.nd.zeros(shape=arg_shape_dict['rpn_conv_3x3_bias'])\n arg_params['rpn_cls_score_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['rpn_cls_score_weight'])\n arg_params['rpn_cls_score_bias'] = mx.nd.zeros(shape=arg_shape_dict['rpn_cls_score_bias'])\n arg_params['rpn_bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['rpn_bbox_pred_weight'])\n arg_params['rpn_bbox_pred_bias'] = mx.nd.zeros(shape=arg_shape_dict['rpn_bbox_pred_bias'])\n arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['cls_score_weight'])\n arg_params['cls_score_bias'] = mx.nd.zeros(shape=arg_shape_dict['cls_score_bias'])\n arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.001, shape=arg_shape_dict['bbox_pred_weight'])\n arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=arg_shape_dict['bbox_pred_bias'])\n\n # check parameter shapes\n for k in sym.list_arguments():\n if k in data_shape_dict:\n continue\n assert k in arg_params, k + ' not initialized'\n assert arg_params[k].shape == arg_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(\n arg_params[k].shape)\n for k in sym.list_auxiliary_states():\n assert k in aux_params, k + ' not initialized'\n assert aux_params[k].shape == aux_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(\n aux_params[k].shape)\n\n # create solver\n fixed_param_prefix = config.FIXED_PARAMS\n data_names = [k[0] for k in train_data.provide_data]\n label_names = [k[0] for k in train_data.provide_label]\n mod = MutableModule(sym, data_names=data_names, label_names=label_names,\n logger=logger, context=ctx, work_load_list=None,\n max_data_shapes=max_data_shape, max_label_shapes=max_label_shape,\n fixed_param_prefix=fixed_param_prefix)\n\n # decide training params\n # metric\n rpn_eval_metric = metric.RPNAccMetric()\n rpn_cls_metric = metric.RPNLogLossMetric()\n rpn_bbox_metric = metric.RPNL1LossMetric()\n eval_metric = metric.RCNNAccMetric()\n cls_metric = metric.RCNNLogLossMetric()\n bbox_metric = metric.RCNNL1LossMetric()\n eval_metrics = mx.metric.CompositeEvalMetric()\n for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric]:\n eval_metrics.add(child_metric)\n # callback\n batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=frequent)\n means = np.tile(np.array(config.TRAIN.BBOX_MEANS), config.NUM_CLASSES)\n stds = np.tile(np.array(config.TRAIN.BBOX_STDS), config.NUM_CLASSES)\n epoch_end_callback = callback.do_checkpoint(prefix, means, stds)\n # decide learning rate\n base_lr = lr\n lr_factor = lr_factor\n lr_epoch = [int(epoch) for epoch in lr_step.split(',')]\n lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]\n lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))\n lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]\n print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)\n lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(lr_iters, lr_factor)\n # optimizer\n optimizer_params = {'momentum': 0.9,\n 'wd': 0.0005,\n 'learning_rate': lr,\n 'lr_scheduler': lr_scheduler,\n 'rescale_grad': (1.0 / batch_size),\n 'clip_gradient': 5}\n\n # train\n mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,\n batch_end_callback=batch_end_callback, kvstore=default.kvstore,\n optimizer='sgd', optimizer_params=optimizer_params,\n arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch)\n","sub_path":"rcnn/tools/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":13475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"469899160","text":"#!/usr/bin/python\n# coding: utf-8\n\n# Author: Yan Wu\n# statistics of user infomation\n\nimport pylab\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport argparse\n\n# get input \nparser = argparse.ArgumentParser(description='see average ratings in different dimensions')\nparser.add_argument(\"prop\", help=\"which one want to see\")\nargs = parser.parse_args()\n\n\n# load data\nScriptPath = os.path.split( os.path.realpath(sys.argv[0]))[0]\npath = ScriptPath + \"/data\"\n\nuser_ratings = pd.read_csv(path + '/user_ratings.csv')\noccupation = pd.read_csv(path + '/occupation.csv')\nage = pd.read_csv(path + '/age.csv')\n\ndata = pd.merge(age, pd.merge(user_ratings, occupation))\n#print(userInfo.head())\npath = ScriptPath + \"/output\"\nunique_genre = set()\nfor genre in user_ratings.genres.values:\n unique_genre.update(genre.split('|'))\n\ndef age():\n\tpieces = []\n\tfor genre in unique_genre:\n\t\ttemp = pd.pivot_table(data, values= 'rating',index = genre,columns='age_group',aggfunc='mean')[1:]\n\t\tpieces.append(pd.DataFrame(temp.values, index = [genre], columns = temp.columns))\n\n\tdf_age = pd.concat(pieces)\n\tdf_age.to_csv(path + '/Analysis1/genre_age.csv')\n\tdf_age = df_age.dropna(how = 'any')\n\t#print(df_age)\n\tplt.figure(figsize=(8,8))\n\tsns.set(font_scale=0.8)\n\tsns.heatmap(df_age)\n\tplt.title(\"Average ratings of each genre by each age group\")\n\tplt.ylabel('Genres')\n\tplt.xlabel('Age group');\n\tplt.show()\n\ndef gender():\n\tpieces = []\n\tfor genre in unique_genre:\n\t\ttemp = pd.pivot_table(data, values= 'rating',index = genre,columns='gender',aggfunc='mean')[1:]\n\t\tpieces.append(pd.DataFrame(temp.values, index = [genre], columns = temp.columns))\n\n\tdf_gender = pd.concat(pieces)\n\tdf_gender.to_csv(path + '/Analysis1/genre_gender.csv')\n\tdf_gender = df_gender.dropna(how = 'any')\n\t#print(df_gender)\n\tplt.figure(figsize=(14,8))\n\tlist = df_gender.index\n\tplt.xticks(range(len(list)), list, rotation=30)\n\t#ax.set_xticklabels(list, rotation=30)\n\tplt.title('Average ratings of each genre by each gender')\n\tplt.plot(df_gender.M.values, 'gx--', label='male')\n\tplt.plot(df_gender.F.values, 'b-', label='female')\n\tplt.legend(loc='best') \n\tplt.title(\"Average ratings of each genre by each age group\")\n\tplt.ylabel('Rating')\n\tplt.xlabel('Genres');\n\tplt.show()\n\ndef occupation():\n\tpieces = []\n\tfor genre in unique_genre:\n\t\ttemp = pd.pivot_table(data, values= 'rating',index = genre,columns='job_description',aggfunc='mean')[1:]\n\t\tpieces.append(pd.DataFrame(temp.values, index = [genre], columns = temp.columns))\n\n\tdf_occu = pd.concat(pieces)\n\tdf_occu.to_csv(path + '/Analysis1/genre_occupation.csv')\n\tdf_occu = df_occu.dropna(how = 'any')\n\t#print(df_occu)\n\tplt.figure(figsize=(8,8))\n\tsns.set(font_scale=0.8)\n\tplt.xticks(rotation=30)\n\tplt.yticks(rotation=30)\n\tsns.heatmap(df_occu)\n\tplt.title(\"Average ratings of each genre by each occupation\")\n\tplt.ylabel('Genres')\n\tplt.xlabel('Occupation');\n\tplt.show()\n\n\nif args.prop == 'all': \n\tage()\n\tgender()\n\toccupation()\nelif args.prop == 'age':\n\tage()\nelif args.prop == 'gender':\n\tgender()\nelse:\n\toccupation()\n\n\n\n\n","sub_path":"2016fall/python/Final/Analysis1.py","file_name":"Analysis1.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"409732739","text":"from sklearn.metrics.pairwise import euclidean_distances\n\ndef score(X, centers, labels):\n n_samples = X.shape[0]\n n_clusters = centers.shape[0]\n xsquarednorms = squarednorms(X)\n norms = xsquarednorms.reshape(-1,1)\n ysquarednorms = squarednorms(centers)\n eucs = euclidean_distances(X, centers, Y_norm_squared=ysquarednorms, X_norm_squared=norms, squared=True)\n\n totalInertia = 0.0\n for i in range(n_samples):\n label = labels[i]\n inertia = eucs[i][label]\n totalInertia += inertia\n \n return (totalInertia / n_samples)\n\ndef squarednorms(X):\n return (X**2).sum(axis=1)\n","sub_path":"scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"132165583","text":"#!/usr/bin/python\n# version 2018/07/03\n\nimport sys\nfrom typing import Dict, Any, Union\n\nimport numpy as np\nimport time\nimport copy\nfrom esp_qalpha import esp, molecule\nfrom helper import readlog, readqd, readmul\nfrom collections import Counter\nimport logging as log\nimport datetime\n\n\"\"\"\nThis function is used to fit BCCs and polarizations.\n\nParameters:\n-----------\nngesp\nnmol2\nout\n-SCF\n-thole\n-bccpol\n-charge\n-dipscale\n-wrst\n-hrst\n-zeromodel\n\nImportant varibles:\n---------\n pol_groups: dict\n Definition which atom gets which pol parameter\n bond_groups: dict\n Definition how many BCCs and between which atoms.\n \n\nThe definition which atoms are allowed to have different polarization is currently\nimplemented in a way that is using AMBER atom types. This should be replaced\nsoon with a SMIRKS pattern. Similar is the definition for BCCs. BCCs are allowed\nto be different between two atoms of different bond_groups. SMIRKS can help to\nimprove this scheme.\n\nThe module is using the esp and the molecule class. The molecule class is used\nto read in the information about bond terms and are therefore used to \ndefine the scaling of the atoms and for the definitions of the bond in the BCCs\nThe esp class reads in one gaussian ESP file and creates the corresponding matrix \nfor the optimisation.\n\nThis program is combining the matrix from multiple gaussian ESP and setting the corresponding \nvalues in the ESP class.\n\"\"\"\n\n# Definition of bond groups\n\"\"\"\npol_groups: Dict[Union[str, Any], Union[int, Any]] = {\n 'c': 0,\n 'c2': 4,\n 'c3': 0,\n 'ca': 4,\n 'cp': 4,\n 'h1': 1,\n 'h2': 1,\n 'h4': 1,\n 'ha': 1,\n 'hc': 1,\n 'hn': 1,\n 'hx': 1,\n 'ho': 1,\n 'n': 2,\n 'n4': 2,\n 'na': 2,\n 'o': 3,\n 'oh': 3,\n 'os': 3, }\n\nbond_groups: Dict[Union[str, Any], Union[int, Any]] = {\n 'c': 0,\n 'c2': 1,\n 'c3': 2,\n 'ca': 3,\n 'cp': 4,\n 'h1': 5,\n 'h2': 6,\n 'h4': 7,\n 'ha': 8,\n 'hc': 9,\n 'hn': 10,\n 'hx': 11,\n 'ho': 12,\n 'n': 13,\n 'n4': 14,\n 'na': 15,\n 'o': 16,\n 'oh': 17,\n 'os': 18, }\n\"\"\"\ndef read_groups(group_file):\n '''\n Read in BCC and POL groups.\n :param group_file: string\n Path to file containing the group definition\n :return: bcc_dict: dictonary\n define which atom types are belonging together\n pol_dict: dictonary\n define which atom types have the same polarizability\n The groups define which atoms have the same polarizability and which bonds share the\n same bond charge correction.\n '''\n f=open(group_file,'r')\n lines=f.readlines()\n f.close()\n bcc_dict={}\n pol_dict={}\n for i,line in enumerate(lines):\n if 'Polarisation groups' in line:\n n_pol=int(lines[i].split()[-1])\n for j in range(n_pol):\n key=lines[i + 1 + j].split()[0].strip(':').strip(\"'\")\n value=int(lines[i + 1 + j].split()[1].strip(','))\n pol_dict[key]=value\n elif 'BCC groups' in line:\n n_bcc=int(lines[i].split()[-1])\n for j in range(n_bcc):\n key=lines[i + 1 + j].split()[0].strip(':').strip(\"'\")\n value=int(lines[i + 1 + j].split()[1].strip(','))\n bcc_dict[key]=value\n return bcc_dict, pol_dict\n# returns list of input file names\ndef read_nmol2(nmol2):\n \"\"\"\n Takes a file and return the lines of this file in a list.\n\n :param nmol2: string\n Path to file with Path to all mol2 files of the set.\n :return: list of strings\n\n Used to write in the nmol2 files in this project\n \"\"\"\n mol1 = []\n f = open(nmol2, 'r')\n l = f.readlines()\n for o in range(len(l)):\n if l[o] != '\\n':\n mol1.append(molecule(l[o].strip()))\n return (mol1)\n\n\ndef readngesp(txtfile):\n \"\"\"\n Reads in a ngesp file and stores the information.\n :param txtfile: string\n Path to file with ngeps file\n :return: list of string, list of [float, float, float], {list of string}\n\n This function does not read in the gesp files per se.\n It only reads in the Path of the gesp file and the\n corresponding electric fields. If a fifth argument is given\n in a line it corresponds to a baseline QM calculation which is substracted\n from the gesp given by the first argument.\n \"\"\"\n f = open(txtfile, 'r')\n ngesp = [] # List of Paths to all gesp files\n eext = []\n lines = f.readlines()\n base = []\n molecules = []\n for line in lines:\n if line != '\\n':\n entry = line.split()\n if entry[0] not in molecules:\n ngesp.append([])\n eext.append([])\n molecules.append(entry[0])\n base.append([])\n ngesp[int(entry[0])].append(entry[1])\n eext[int(entry[0])].append([float(entry[2]), float(entry[3]), float(entry[4])])\n if len(entry) == 6:\n base[int(entry[0])].append(entry[5])\n return (ngesp, eext, base)\n\n\ndef loadalpha(datei,npol):\n \"\"\"\n Takes the output of an old BCC-Pol calculation and reads in the values\n :param datei: string\n Path to file\n :return: list of float\n polarizations.\n\n Up to now there is no check if the definition of BCCs is the same\n Should be implemendet in the next version.\n\n \"\"\"\n f = open(datei, 'r')\n lines = f.readlines()\n f.close()\n bond = np.zeros(len(bondtypes))\n pol = [0.0 for i in range(npol)]\n readout = 0\n for line in lines:\n entry = line.split()\n if entry[0] == 'Bond':\n readout = 1\n elif entry[0] == 'Polarizabilities:':\n readout = 2\n elif readout == 1:\n for o in range(len(bondtypes)):\n if bondnames[o][0] == entry[1] and bondnames[o][1] == entry[2]:\n bond[o] = entry[0]\n elif readout == 2:\n num=pol_groups[entry[1]]\n pol[num]=float(entry[0])\n pol = np.array(pol)\n return pol\n\ndef loadbcc(datei):\n f = open(datei, 'r')\n lines = f.readlines()\n f.close()\n bond = np.zeros(len(bondtypes))\n readout = 0\n for line in lines:\n entry = line.split()\n if entry[0] == 'Bond':\n readout = 1\n\n elif entry[0] == 'Polarizabilities:':\n readout = 2\n elif readout == 1:\n for o in range(len(bondtypes)):\n if bondnames[o][0] == entry[1] and bondnames[o][1] == entry[2]:\n bond[o] = entry[0]\n return bond\n\n\n\ndef loadbccpol(datei,npol):\n alpha=loadalpha(datei,npol)\n bond=loadbcc(datei)\n bondpol = np.concatenate((bond, alpha), axis=0)\n return bondpol\n\n\"\"\"Constants\"\"\"\nBOHR = float(0.52917722086)\n\n# Timing\nstime = time.time() # Starttime\ntimef = open('timing.dat', 'w')\n\n# Setting default values\noutputf = 'output.txt'\noutputlog = 'output.log'\npath1='./molecule'\nngesp = []\nnconf = []\nmode = 'bcc'\ntest = False\nSCF = False\ndipscale = 0.01\nwrst = ''\nthole = False\nzeromodel = False\ngroups=None\n\n# Read in input\nfor i in range(len(sys.argv)):\n if sys.argv[i] == '-nmol2': # the usual input file format for the mol2\n nmol2 = sys.argv[i + 1]\n if sys.argv[i] == '-chg': # prefix of the gaussain output for the atomic charges\n chg = sys.argv[i + 1]\n if sys.argv[i] == '-ngesp': # list of esps have to be defined in this file with the Eext\n gesp = sys.argv[i + 1]\n mconf = 1\n if sys.argv[i] == '-out': # Outfile\n outputf = sys.argv[i + 1]\n outputlog = outputf.split('.')[0] + '.log'\n if sys.argv[i] == '-mode': # BCC or alpha(polarizaton only)\n mode = sys.argv[i + 1]\n if sys.argv[i] == '-SCF':\n SCF = True\n if sys.argv[i] == '-thole' or sys.argv[i] == '-Thole':\n thole = True\n if sys.argv[i] == '-dipscale':\n dipscale = float(sys.argv[i + 1])\n if sys.argv[i] == '-wrst': # restraint constant for parabolic restraints\n wrst = float(sys.argv[i + 1])\n rst_on = 1\n if sys.argv[i] == '-bccpol': # Read in starting values of a previous run\n bccpol = sys.argv[i + 1]\n if sys.argv[i] == '-startpol': # Read in starting values of a previous run\n startpol = sys.argv[i + 1]\n if sys.argv[i] == '-startbcc': # Read in starting values of a previous run\n startbcc = sys.argv[i + 1]\n if sys.argv[i] == '-groups': # Read group defintion\n groups = sys.argv[i + 1]\n if sys.argv[i] == '-zeromodel': # All charges qfix are set to zero\n zeromodel = True\n if sys.argv[i] == '-path': # All charges qfix are set to zero\n path1=sys.argv[i + 1].strip('/') + '/molecule'\n# Finish reading commmand line arguments\n\nlog.basicConfig(filename=outputlog, level=10)\ncmdline = ''\nfor i in range(len(sys.argv)):\n cmdline = cmdline + sys.argv[i].strip(\"\\'\") + ' '\nlog.info(cmdline)\n\nif groups==None:\n log.error('Group Definition is missing')\n exit()\nelse:\n bond_groups,pol_groups=read_groups(groups)\n\n\n# Debug\nlog.debug('Pol Groups:')\nlog.debug(pol_groups)\nlog.debug('BCC Groups:')\nlog.debug(bond_groups)\n\nbondtypes = [] # used to save all occuring bond types: Every combination of different bond numbers is one bond type\nbondnames = [] # just for output reason. So that I can easily print out which bond atoms are involved in this BCC\n\n# Initalize molecules with bond matrix and atom types\nif 'nmol2' in globals():\n mol1 = read_nmol2(nmol2)\n\n # Check what kinds of bonds exists in the whole test set\n for i in range(len(mol1)):\n mol1[i].bondtypes(bond_groups, bondtypes, bondnames) # the group definiton; the already occuring bonds and the corresponding bond names\n # print(bondtypes)\n\n # Build matrixes defining which bcc occurs in which atom and waht polarizability is used for which atom (R/T)\n for i in range(len(mol1)):\n mol1[i].bondmatrix(bondtypes)\n mol1[i].poltypes(pol_groups)\n\nelse: # for single molecule use only\n print('nmol2 file is not definied.')\n# Timing\ntime1 = time.time()\nduration = time1 - stime\ntimef.write('Time for reading in mol2files: {}\\n'.format(duration))\n\n# Initialize all esps\nngesp, eext, base = readngesp(gesp)\nbases = [[] for i in range(len(base))]\nesps = [[] for i in range(len(ngesp))]\nfor k in range(len(ngesp)):\n for i in range(len(base[k])):\n bases[k].append(esp(base[k][i], mol1[k], mode=mode))\n for i in range(len(ngesp[k])):\n # esps.append(esp(ngesp[i], mode, aniso='OFF', ext=eext[i], eqdipoles=eqdipoles, eqatoms=eqatoms))\n esps[k].append(esp(ngesp[k][i], mol1[k], mode=mode, ext=eext[k][i], SCF=SCF, thole=thole, wrst1=wrst,\n dipscale=dipscale)) # initilaze all esps\n if len(bases[k]) == len(esps[k]):\n for i in range(len(ngesp[k])):\n esps[k][i].subtract_base_esp(bases[k][i])\n elif len(bases[k]) == 0:\n pass\n else:\n log.error(\"Something terrible is happening right now\\n\")\n nconf.append(len(ngesp[k]))\n log.info('Using {} molecule-conformations'.format(len(ngesp[k])))\ndel bases\n\n# Timing\ntime2 = time.time()\nduration = time2 - time1\ntimef.write('Time for reading in espfiles: {}\\n'.format(duration))\n\n# Main part of the program\n\n# Create scaling matrix and intialize necessary lists\nfor k in range(len(ngesp)):\n if test: # Just for testing purposes\n for i in range(nconf[k]):\n esps[k][i].distances()\n esps[k][i].make_test(esps[0].testq, esps[0].testd)\n print('Making a test example')\n exit()\n for i in range(nconf[k]): # Start\n esps[k][i].scaling(mol1[k].bonds) # calculate all scaling matrix\nXmol = [None for i in range(len(nconf))] # Stores all matrixes\nYmol = [None for i in range(len(nconf))] # Stores all vectors of the euqation\nq = [None for i in range(len(nconf))] # Stores all atomic charges\nqtmp = [None for i in range(len(nconf))]\n\n# Timing Stuff\nsumme3 = 0.0 # Stores quadratic sum of all esp points\ntime3 = time.time()\nduration = time3 - time2\ntimef.write('Time for calculation distances: {}\\n'.format(duration))\n\n# Read in Atomic charges. Most of the time from gaussian outputs Attetntion: Absolut Path\n# Subtracts the ESP of the charges from the input ESP, except we are fitting to\n# ESP differences\nfor k in range(len(nconf)):\n for i in range(nconf[k]):\n if 'Mulliken' in chg:\n try:\n q[k] = readmul(\n path1 + str(k + 1) + '/conf0/' + chg + '.log',\n natoms=esps[k][0].natoms)\n except:\n q[k] = readmul(\n path1.replace('mschauperl','mis') + str(k + 1) + '/conf0/' + chg + '.log',\n natoms=esps[k][0].natoms)\n elif 'RESP' in chg:\n chgfile=chg.split('=')[1]\n try:\n q[k] = readqd('/home/mschauperl/kirkwood/charge_method/medium_set/charges/'+chgfile+'.txt_' + str(k))\n except:\n q[k] = readqd('/home/mis/kirkwood/charge_method/medium_set/charges/'+chgfile+'.txt_' + str(k))\n\n elif 'zero_chg' in chg:\n q[k]=[0.0 ,0.0]\n elif 'blaconf1' in chg:\n try:\n q[k] = readqd('/home/mschauperl/kirkwood/charge_method/medium_set/charges/RESPconf1_co_opt_direct.txt_' + str(k))\n except:\n q[k] = readqd('/home/mis/kirkwood/charge_method/medium_set/charges/RESPconf1_co_opt_direct.txt_' + str(k))\n elif 'resp' in chg:\n try:\n q[k] = readqd(\n path1 + str(k + 1) + '/conf0/' + chg)\n except:\n q[k] = readqd(path1.replace('mschauperl','mis') + str(k + 1) + '/conf0/' + chg)\n\n else:\n try:\n q[k] = readlog(\n path1 + str(k + 1) + '/conf0/' + chg + '.log',\n natoms=esps[k][0].natoms)\n except:\n q[k] = readlog(\n path1.replace('mschauperl','mis') + str(k + 1) + '/conf0/' + chg + '.log',\n natoms=esps[k][0].natoms)\n if zeromodel:\n if esps[k][i].charge == 0:\n q[k] = np.zeros(len(q[k]))\n elif k==4:\n q[k] = np.zeros(len(q[k]))\n q[k][6]=-0.5\n q[k][7]=-0.5\n elif k==7:\n q[k] = np.zeros(len(q[k]))\n q[k][4]=1.0\n\n else:\n q[k] = np.array([esps[k][i].charge / len(q[k]) for l in range(len(q[k]))])\n # qtmp[k]=readqd('/home/mis/data_kirkwood/charge_method/smallset/molecule'+str(k+1)+'/conf'+str(i)+'/MP2aug-cc-pVTZ'+str(i)+'.resp')\n \"\"\" \n #q[k]=readqd('/home/mis/data_kirkwood/charge_method/smallset/molecule'+str(k+1)+'/conf0/MP2aug-cc-pVTZ0.resp')\n \"\"\"\n\nfor k in range(len(nconf)):\n for i in range(nconf[k]):\n esps[k][i].distances() # Calculated all distances\n esps[k][i].calc_sse(q[k]) # Calculate initital error/\n summe3 += esps[k][i].sse / nconf[k] # Store overall error\n # Include charges from the lower QM calculation (AM1 calculation)\n if mode == 'analysisalpha' or mode == 'alpha':\n esps[k][i].qfix = np.zeros(len(q[k]))\n q[k]=np.zeros(len(q[k]))\n esps[k][i].step=2\n else:\n esps[k][i].qfix = q[k]\n esps[k][i].sub_esp_qd(q[k]) # Substract static esp from the atomic charges\n esps[k][i].delete_dist()\n\n#Finish reading input charges\n\n# Timing stuff again\ntime4 = time.time()\nduration = time4 - time3\ntimef.write('Time for substracting fixed charge esp {}\\n'.format(duration))\n\n\n# Read in starting values from an old run\nif 'bccpol' in globals():\n try:\n npoltypes = len(Counter(pol_groups.values()))\n bcc_alp = loadbccpol(bccpol,npoltypes)\n except:\n print('Using the Zeromodel option')\n if mode == 'alpha' or mode=='analysisalpha' :\n bcc_alp[:len(bondtypes)] = np.zeros(len(bondtypes))\n qbond = bcc_alp[:len(bondtypes)]\n pol = bcc_alp[len(bondtypes):]\n\n\nif 'startpol' in globals():\n npoltypes = len(Counter(pol_groups.values()))\n pol=loadalpha(startpol,npoltypes)\n if 'startbcc' not in globals():\n qbond = np.zeros(len(bondtypes))\n bcc_alp = np.concatenate((qbond, pol), axis=0)\nif 'startbcc' in globals():\n qbond=loadbcc(startbcc)\n if 'startpol' not in globals():\n pol = np.array([0.0 for i in range(len(Counter(pol_groups.values())))])\n bcc_alp = np.concatenate((qbond, pol), axis=0)\n\n# Optimization process\nwhile True:\n # Timing stuff\n ecaltime = 0.0\n matrixtime = 0.0\n\n for k in range(len(nconf)):\n\n for i in range(nconf[k]):\n etime_s = time.time()\n \"\"\"\n Initializing values for the next run. If no starting values are given. \n Starting values are set to 0\n Calculating all necesaary distances and the electric field at every atom\n position\n \"\"\"\n try:\n esps[k][i].qbond = bcc_alp[:len(bondtypes)]\n esps[k][i].pol = bcc_alp[len(bondtypes):]\n esps[k][i].bcc_to_qd(mol1[k])\n except:\n # esps[k][i].qd[:esps[k][i].natoms]=np.zeros(len(q[k]))\n esps[k][i].qbond = np.zeros(len(bondtypes))\n esps[k][i].pol = np.array([0.0 for i in range(len(Counter(pol_groups.values())))])\n esps[k][i].bcc_to_qd(mol1[k])\n\n log.debug(esps[k][i].qd)\n esps[k][i].distances() # Calculated all distances\n if mode=='alphabccfix':\n esps[k][i].sub_esp_qd(esps[k][i].qd)\n esps[k][i].qfix=esps[k][i].qfix+esps[k][i].qd[:esps[k][i].natoms]\n esps[k][i].mode='alpha'\n esps[k][i].qbond = np.zeros(len(bondtypes))\n\n esps[k][i].get_e_int() # get initial electric field\n\n if mode=='alphabccfix':\n mode='alpha'\n\n if mode == 'analysis' or mode=='analysisalpha' :\n \"\"\"No optimization required. Just calculating the error with the given starting values \"\"\"\n npoltypes = len(Counter(pol_groups.values())) # Number of different polariztion groups\n break\n\n for k in range(len(nconf)):\n for i in range(nconf[k]):\n \"\"\"\n Creating the matrixes X and Y for all GESP files used.\n Combine all matrixes for one molecule to Xmol and then combining\n all matrixes of all molecules to one total matrix Xtot.\n \"\"\"\n # Timing stuff\n etime_e = time.time()\n ecaltime += etime_e - etime_s\n matrixtime_s = time.time()\n\n esps[k][i].make_Xn(mol1[k]) # Make Matrix\n esps[k][i].make_Yn(mol1[k]) # Make Vectors\n\n # Timing stuff\n matrixtime_e = time.time()\n matrixtime += matrixtime_e - matrixtime_s\n\n esps[k][i].delete_dist() # Calculated all distances\n npoltypes = len(Counter(pol_groups.values())) # Number of different polariztion groups\n\n # Initialize Molecule Matrix\n Xmol[k] = np.zeros(\n (len(bondtypes) + npoltypes, len(bondtypes) + npoltypes)) # stores the matrixes for molecule k\n Ymol[k] = np.zeros(len(bondtypes) + npoltypes) # stores the vector Y for molecule k\n\n # Combines all conformations\n for j in range(nconf[k]):\n Xmol[k] = np.add(Xmol[k], esps[k][j].X) # Add all matrices from molecule k\n Ymol[k] = np.add(Ymol[k], esps[k][j].Y) # Add all vectors from molecule k\n\n # Combine all molecules\n if k == 0:\n Xtot = copy.copy(Xmol[0])\n Xtot = Xtot / nconf[k] # Normalize by the number of conformations\n Ytot = copy.copy(Ymol[0])\n Ytot = Ymol[0] / nconf[k]\n else:\n Xmol[k] = Xmol[k] / nconf[k]\n Ymol[k] = Ymol[k] / nconf[k]\n Xtot = np.add(Xtot, Xmol[k])\n Ytot = np.add(Ytot, Ymol[k])\n\n # If one polarizability does not occur in the molecules the value is set to 0.0\n for i in range(len(Xtot)):\n if all(tmp == 0.0 for tmp in Xtot[i]):\n Xtot[i][i] = 1\n\n try:\n bcc_alp_old = copy.copy(bcc_alp)\n except:\n bcc_alp_old = np.zeros(len(Ytot))\n\n # Timing stuff\n time5 = time.time()\n duration = time5 - time4\n timef.write('Time for calculation matrixes: {}\\n'.format(duration))\n timef.write('Time for calculation efield: {}\\n'.format(ecaltime))\n timef.write('Time for calculation matrixes(pure): {}\\n'.format(matrixtime))\n\n # Solve the equation\n bcc_alp = np.linalg.solve(Xtot, Ytot)\n log.debug(\"1 optimisation step\")\n # Timing stuff\n time6 = time.time()\n duration = time6 - time5\n timef.write('Time for solve lin eq: {}\\n'.format(duration))\n time4 = time.time()\n if np.abs(np.subtract(bcc_alp_old, bcc_alp)).sum() < 0.001: # Determination condition\n break\n\n# End of optimization\n\n\n\"\"\"\nThis is all just output writing. \n\"\"\"\nsumme = 0.0 # stores the initial error\nsumme2 = 0.0 # store the residual error\noutfile = open(outputf, 'w')\noutfile.write(str(datetime.datetime.now()) + '\\n')\noutfile.write(str(cmdline) + '\\n')\nfor k in range(len(nconf)):\n sumk = 0.0\n sumk2 = 0.0\n for i in range(nconf[k]):\n esps[k][i].distances() # Calculated all distances\n\n summe += np.square(esps[k][i].pot).sum() / nconf[k]\n sumk += np.square(esps[k][i].pot).sum() / nconf[k]\n single = np.square(esps[k][i].pot).sum()\n log.info('Molekule ' + str(k + 1))\n log.info('The initial sum of square errors: {0:5.3f}'.format(single))\n esps[k][i].qbond = bcc_alp[:len(bondtypes)]\n esps[k][i].pol = bcc_alp[len(bondtypes):]\n esps[k][i].bcc_to_qd(mol1[k]) # calculates also the new charges\n esps[k][i].calc_sse(esps[k][i].qd)\n tmp_sse = esps[k][i].sse\n esps[k][i].delete_dist() # delete all distances\n log.info('The residual sum of square errors: {0:10.8f}'.format(esps[k][i].sse))\n summe2 += esps[k][i].sse / nconf[k]\n sumk2 += esps[k][i].sse / nconf[k]\n outfile.write('{}\\t{}\\t{}\\n'.format(k, sumk, sumk2))\noutfile.write('{}\\t{}\\t{}\\n'.format('SUM', summe, summe2))\n\"\"\"print('DEBUG')\nfor i in range(3):\n for j in range(3):\n esps[0][0].qd[i+3*j]+=0.0001\nesps[0][0].calc_sse(esps[0][0].qd)\nprint(esps[0][0].sse-tmp_sse)\nfor i in range(3):\n for j in range(3):\n esps[0][0].qd[i+3*j]-=0.0002\nesps[0][0].calc_sse(esps[0][0].qd)\nprint(esps[0][0].sse-tmp_sse)\nfor i in range(3):\n for j in range(3):\n esps[0][0].qd[i+3*j]+=0.0001\n\"\"\"\n\n# Print out summary\nlog.info(summe) # before bond charge correction and polarization\nlog.info(summe2) # residual error\nlog.info(summe3) # sum of squared ESP values\n# Write out bond charge corrections\noutfile.write('Bond Charge Corrections:\\n')\nfor i in range(len(bondtypes)):\n outfile.write('{}\\t{}\\t{}\\n'.format(bcc_alp[i], bondnames[i][0], bondnames[i][1]))\noutfile.write('Polarizabilities:\\n')\nfor i in range(npoltypes):\n outfile.write(\n '{}\\t{}\\n'.format(bcc_alp[i + len(bondtypes)], list(pol_groups.keys())[list(pol_groups.values()).index(i)]))\n#if 'bccpol' in globals() and mode!='analysis':\n# outfile.write('{}\\t{}\\n'.format(esps[7][0].pol[4],'n4'))\noutfile.close()\n\n\"\"\"\nthis part was just to investigate if I am in a minimum or if the approximate analytical gradient does something bad.\nfor t in range(len(qbond)):\n qbond[t]+=0.01\n summet=0.0\n summe2t=0.0\n for k in range(len(nconf)):\n for i in range(nconf[k]):\n summet += np.square(esps[k][i].pot).sum()\n single=np.square(esps[k][i].pot).sum()\n #print('The initial sum of square errors: {0:5.3f}'.format(single))\n esps[k][i].qbond=qbond\n esps[k][i].test_bcc(mol1[k])\n esps[k][i].calc_sse(esps[k][i].qd)\n #print('The residual sum of square errors: {0:5.3f}'.format(esps[k][i].sse))\n summe2t+=esps[k][i].sse/nconf[k]\n print(summe2t-summe2)\n if summe2t-summe2<0.0:\n print('Warning')\n qbond[t]-=0.01\nprint('Finish')\n\nfor t in range(len(qbond)):\n qbond[t]-=0.01\n summet=0.0\n summe2t=0.0\n for k in range(len(nconf)):\n for i in range(nconf[k]):\n summet += np.square(esps[k][i].pot).sum()\n single=np.square(esps[k][i].pot).sum()\n #print('The initial sum of square errors: {0:5.3f}'.format(single))\n esps[k][i].qbond=qbond\n esps[k][i].test_bcc(mol1[k])\n esps[k][i].calc_sse(esps[k][i].qd)\n #print('The residual sum of square errors: {0:5.3f}'.format(esps[k][i].sse))\n summe2t+=esps[k][i].sse/nconf[k]\n print(summe2t-summe2)\n if summe2t-summe2<0.0:\n print('Warning')\n print(bondtypes[k])\n qbond[t]+=0.01\n\"\"\"\n\"\"\"\nesps=[]\nesps.append(esp(gesp,mode='q'))\nesps[0].distances()\nsumme = 0.0\nsumme += np.square(esps[0].pot).sum()\nprint('The initial sum of square errors: {0:5.3f}'.format(summe))\nq=readlog('/home/mis/data_kirkwood/charge_method/smallset/molecule1/conf0/hf631AM1Mulliken.log',natoms=esps[0].natoms)\nprint(q)\nesps[0].sub_esp_qd(q)\nsumme = 0.0\nsumme += np.square(esps[0].pot).sum()\nprint('The initial sum of square errors: {0:5.3f}'.format(summe))\nesps[0].make_A(mol1[0])\nesps[0].make_Y1(mol1[0])\nesps[0].opt_bcc(mol1[0])\nprint(bondtypes)\nesps[0].calc_sse(esps[0].qd)\nprint(esps[0].sse)\n\"\"\"\n\n\"\"\"\nsse0=esps[0].sse\nprint('TEST')\nfor i in range(len(esps[0].qbond)):\n esps[0].qbond[i]+=0.01\n esps[0].test_bcc(mol1[0])\n esps[0].calc_sse(esps[0].qd)\n print(esps[0].sse-sse0)\n esps[0].qbond[i]-=0.01\n esps[0].qbond[i]-=0.01\n esps[0].test_bcc(mol1[0])\n esps[0].calc_sse(esps[0].qd)\n print(esps[0].sse-sse0)\n esps[0].qbond[i]+=0.01\nesps[0].make_X()\nesps[0].make_Y()\nesps[0].opt_scf()\nprint(esps[0].qd)\nesps[0].calc_sse(esps[0].qd)\nprint(esps[0].sse)\n\"\"\"\n","sub_path":"resppol/bccpol.py","file_name":"bccpol.py","file_ext":"py","file_size_in_byte":25973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"12379230","text":"from eeg_net.eeg_cnn import conv_bn\nfrom eeg_net.eeg_resnet import *\nfrom eeg_net.eeg_net_base import *\nfrom eeg_net.eeg_cnn import * \nfrom functools import partial\nfrom torch.nn.modules import dropout\nfrom eeg_net.eeg_net_base import * \nfrom torch import nn \n\n\nclass CNNEEGLSTM(nn.Module):\n def __init__(self,in_channels, num_classes=4,options={}):\n super().__init__() \n _conv1_size = options.pop('conv1_size',3)\n _conv1_out_channel = options.pop('conv1_out_channel',128)\n _conv1_pool = options.pop('conv1_pool',4)\n _conv2_size = options.pop('conv2_size',3)\n _conv2_out_channel = options.pop('conv2_out_channel',64)\n _conv2_pool = options.pop('conv2_pool',2)\n _feature_pool_type = options.pop('pool_type','max')\n _activation = options.pop('activation','none')\n _lstm_hidden_size = options.pop('lstm_hidden_size',256)\n _lstm_drop_rate = options.pop('lstm_drop_rate',0.0)\n _lstm_layer = options.pop('lstm_layer',2)\n\n conv1 = partial(Conv1dAuto,kernel_size=_conv1_size,stride=1)\n self.pool1 = nn.MaxPool1d(kernel_size=_conv1_pool,stride=_conv1_pool)\n conv2 = partial(Conv2dAuto,kernel_size=(1,_conv2_size),stride = 1)\n self.lstm = nn.LSTM(input_size=_conv1_out_channel,\n hidden_size = _lstm_hidden_size,\n dropout=_lstm_drop_rate,\n num_layers =_lstm_layer,\n batch_first = True)\n self.fc = nn.Linear(in_features=_lstm_hidden_size,\n out_features=num_classes)\n if len(options) >0:\n extra = ', '.join('\"%s\"' % k for k in list(options.keys()))\n raise ValueError('Unrecognized arguments in options%s' % extra)\n \n #self.conv = conv_bn(conv=conv1,in_channels=in_channels,out_channels=_conv1_out_channel,bias=False)\n self.conv1 = nn.Conv1d(in_channels =in_channels, \n out_channels = _conv1_out_channel,\n kernel_size=_conv1_size,stride=1)\n self.conv1_norm = nn.BatchNorm1d(_conv1_out_channel)\n self.activation = activation_func(_activation)\n self.softmax = nn.Softmax(dim=1)\n self.conv_layer = nn.Sequential(\n conv_bn(conv=conv1,in_channels=in_channels,out_channels=_conv1_out_channel,bias=False),\n #pool_fn(_feature_pool_type,kernel_size=(1,_conv1_pool),stride=(1,_conv1_pool)),\n self.pool1,\n activation_func(_activation),\n )\n \n self.lstm_layer = nn.Sequential(\n self.lstm \n )\n self.fc_layer = nn.Sequential(\n self.fc \n )\n def forward(self,x):\n #x = self.conv_layer(x)\n x = self.conv1(x)\n x = self.conv1_norm(x)\n x = self.activation(x)\n x =self.pool1(x)\n #print(x.shape)\n x = x.permute(0,2,1)\n x,_ = self.lstm_layer(x)\n x = self.fc_layer(x[:,-1,:])\n #print(x.shape)\n x = self.softmax(x)\n #print(x)\n return x \n \n\n\nclass NativeEEGLSTM(nn.Module):\n '''\n Targe time step:\n '''\n def __init__(self,input_size, hidden_size,num_layers,num_classes,device='cuda', *args, **kwargs):\n super().__init__()\n self.device = device\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size,hidden_size,num_layers,batch_first=True,dropout=0.0)\n self.fc1 = nn.Linear(hidden_size,num_classes)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self,x):\n x = x.permute(0,2,1)\n h0 = torch.zeros(self.num_layers,x.size(0),self.hidden_size).to(self.device)\n c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(self.device)\n state = (h0,c0)\n out, state = self.lstm(x,state) \n out = self.fc1(out[:,-1,:])\n #print(out[:,-1,:].shape)\n #print(out.shape)\n out = self.softmax(out)\n return out \n\nclass EEGLSTM(nn.Module):\n def __init__(self,input_size, hidden_size,num_layers,num_classes,device='cuda', *args, **kwargs):\n \"\"\"\n Input_size: the \n \"\"\"\n super().__init__()\n self.device = device\n #self.conv1 = nn.Conv2d((1,40),padding='none')\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size,hidden_size,num_layers,batch_first=True)\n self.fc1 = nn.Linear(hidden_size,num_classes)\n self.softmax = nn.Softmax(dim=1)\n #self.fc2 = nn.Linear()\n def forward(self,x):\n #x = x.permute(0,2,1)\n x = x.view(-1,100,22*10)\n #x = x.permute(0,2,1)\n #print(x.size(0))\n #print(x.shape)\n\n h0 = torch.zeros(self.num_layers,x.size(0),self.hidden_size).to(self.device)\n c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(self.device)\n state = (h0,c0)\n out, state = self.lstm(x,state) \n out = self.fc1(out[:,-1,:])\n #print(out[:,-1,:].shape)\n #print(out.shape)\n #out = self.softmax(out)\n return out ","sub_path":"eeg_net/eeg_rnn.py","file_name":"eeg_rnn.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"327863007","text":"from RedditBot import bot, utils\n\nimport sys\nimport __builtin__\n\n\n@bot.command\ndef eval(context):\n '''.eval '''\n if not utils.isadmin(context.line['prefix'], bot):\n return\n if context.args:\n try:\n return str(__builtin__.eval(context.args))\n except:\n return repr(sys.exc_info()[1])\n else:\n return eval.__doc__\n","sub_path":"RedditBot/plugins/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"281952521","text":"from django.test import TestCase\nfrom django.http import HttpRequest\n\nimport datetime as D\nfrom wafer.schedule.admin import SlotAdmin\nfrom wafer.schedule.models import Day, Slot\n\n\nclass DummyForm(object):\n\n def __init__(self):\n self.cleaned_data = {}\n\n\ndef make_dummy_form(additional):\n \"\"\"Fake a form object for the tests\"\"\"\n form = DummyForm()\n form.cleaned_data['additional'] = additional\n return form\n\n\n# Tests the custom save_model logic on SlotAdmin\nclass SlotAdminTests(TestCase):\n\n def setUp(self):\n \"\"\"Create some Venues and Days for use in the actual tests.\"\"\"\n self.day = Day.objects.create(date=D.date(2013, 9, 22))\n self.admin = SlotAdmin(Slot, None)\n\n def test_save_model_single_new(self):\n \"\"\"Test save_model creating a new slot, but no additional slots\"\"\"\n slot = Slot(day=self.day, start_time=D.time(11, 0, 0),\n end_time=D.time(11, 30, 0))\n # check that it's not saved in the database yet\n self.assertEqual(Slot.objects.count(), 0)\n request = HttpRequest()\n dummy = make_dummy_form(0)\n self.admin.save_model(request, slot, dummy, False)\n # check that it's now been saved in the database\n self.assertEqual(Slot.objects.count(), 1)\n slot2 = Slot.objects.filter(start_time=D.time(11, 0, 0)).get()\n self.assertEqual(slot, slot2)\n\n def test_save_model_change_slot(self):\n \"\"\"Test save_model changing a slot\"\"\"\n slot = Slot(day=self.day, start_time=D.time(11, 0, 0),\n end_time=D.time(12, 30, 0))\n # end_time is chosen as 12:30 so it stays valid through all the\n # subsequent fiddling\n slot.save()\n # check that it's saved in the database\n self.assertEqual(Slot.objects.count(), 1)\n request = HttpRequest()\n dummy = make_dummy_form(0)\n slot.start_time = D.time(12, 0, 0)\n self.assertEqual(\n Slot.objects.filter(start_time=D.time(11, 0, 0)).count(), 1)\n self.admin.save_model(request, slot, dummy, True)\n # Check that the database has changed\n self.assertEqual(\n Slot.objects.filter(start_time=D.time(11, 0, 0)).count(), 0)\n self.assertEqual(Slot.objects.count(), 1)\n slot2 = Slot.objects.filter(start_time=D.time(12, 0, 0)).get()\n self.assertEqual(slot, slot2)\n\n # Check that setting additional has no influence on the change path\n dummy = make_dummy_form(3)\n slot.start_time = D.time(11, 0, 0)\n self.assertEqual(\n Slot.objects.filter(start_time=D.time(11, 0, 0)).count(), 0)\n self.admin.save_model(request, slot, dummy, True)\n # Still only 1 object\n self.assertEqual(Slot.objects.count(), 1)\n # And it has been updated\n self.assertEqual(\n Slot.objects.filter(start_time=D.time(12, 0, 0)).count(), 0)\n self.assertEqual(\n Slot.objects.filter(start_time=D.time(11, 0, 0)).count(), 1)\n\n def test_save_model_new_additional(self):\n \"\"\"Test save_model changing a new slot with some additional slots\"\"\"\n slot = Slot(day=self.day, start_time=D.time(11, 0, 0),\n end_time=D.time(11, 30, 0))\n # check that it's not saved in the database\n self.assertEqual(Slot.objects.count(), 0)\n request = HttpRequest()\n dummy = make_dummy_form(3)\n self.admin.save_model(request, slot, dummy, False)\n self.assertEqual(Slot.objects.count(), 4)\n\n # check the hierachy is created correctly\n slot1 = Slot.objects.filter(previous_slot=slot).get()\n self.assertEqual(slot1.get_start_time(), slot.end_time)\n self.assertEqual(slot1.end_time, D.time(12, 0, 0))\n slot2 = Slot.objects.filter(previous_slot=slot1).get()\n self.assertEqual(slot2.get_start_time(), slot1.end_time)\n self.assertEqual(slot2.end_time, D.time(12, 30, 0))\n self.assertEqual(slot2.day, slot.day)\n slot3 = Slot.objects.filter(previous_slot=slot2).get()\n self.assertEqual(slot3.get_start_time(), slot2.end_time)\n self.assertEqual(slot3.end_time, D.time(13, 00, 0))\n self.assertEqual(slot3.day, slot.day)\n\n # repeat checks with a different length of slot\n slot = Slot(day=self.day, previous_slot=slot3,\n end_time=D.time(14, 30, 0))\n dummy = make_dummy_form(4)\n self.admin.save_model(request, slot, dummy, False)\n self.assertEqual(Slot.objects.count(), 9)\n slot1 = Slot.objects.filter(previous_slot=slot).get()\n self.assertEqual(slot1.get_start_time(), slot.end_time)\n self.assertEqual(slot1.end_time, D.time(16, 0, 0))\n slot2 = Slot.objects.filter(previous_slot=slot1).get()\n self.assertEqual(slot2.get_start_time(), slot1.end_time)\n self.assertEqual(slot2.end_time, D.time(17, 30, 0))\n self.assertEqual(slot2.day, slot.day)\n slot3 = Slot.objects.filter(previous_slot=slot2).get()\n self.assertEqual(slot3.get_start_time(), slot2.end_time)\n self.assertEqual(slot3.end_time, D.time(19, 00, 0))\n self.assertEqual(slot3.day, slot.day)\n slot4 = Slot.objects.filter(previous_slot=slot3).get()\n self.assertEqual(slot4.get_start_time(), slot3.end_time)\n self.assertEqual(slot4.end_time, D.time(20, 30, 0))\n self.assertEqual(slot4.day, slot.day)\n","sub_path":"wafer/schedule/tests/test_admin.py","file_name":"test_admin.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"110719158","text":"#!/bin/python\n\n\nfrom workflow import Workflow, ICON_WEB\nfrom functions import excute\nimport sys\ncheck_auto_proxy_url_cmd = \"networksetup -getautoproxyurl Wi-Fi\"\n\ndef is_auto_proxy_on():\n out, err = excute(check_auto_proxy_url_cmd).communicate()\n if (not err) and (\"Yes\" in out):\n return True\n return False\n\ndef main(wf):\n proxy_list = sys.argv[1:]\n \n if is_auto_proxy_on():\n wf.add_item(title=\"turn it off\", arg=\"off\", valid = True, icon=ICON_WEB)\n for proxy in proxy_list:\n wf.add_item(title=proxy, arg=proxy, valid=True, icon=ICON_WEB)\n\n\n wf.send_feedback()\n\nif __name__ == '__main__':\n wf = Workflow()\n sys.exit(wf.run(main))\n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"382003816","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom models import cars\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Create your views here.\n\ndef login(request):\n if request.user.is_authenticated():\n return (\" \" + request.user.username)\n else:\n return (\"

You arent logged Login
\")\n\ndef all(request):\n cars_list = cars.objects.all()\n out = \"
    \\n\"\n for fila in cars_list:\n out += \"
  • \" + fila.model + \"
  • \\n\"\n out += \"$]$', '_comms_return_char': '\\\\n', '_comms_ansi': False, \"\n \"'ssh_config_file': '', 'ssh_known_hosts_file': '', 'lib_auth_exception': , '_isauthenticated': False, \"\n \"'transport_options': {}, 'open_cmd': ['ssh', 'localhost', '-p', '22', '-o', \"\n \"'ConnectTimeout=5', '-o', 'ServerAliveInterval=5', '-o', 'StrictHostKeyChecking=yes', '-F', \"\n \"'/dev/null'], '_stdin_fd': -1, '_stdout_fd': -1}\"\n )\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"systemssh not supported on windows\")\ndef test_creation():\n conn = SystemSSHTransport(\"localhost\")\n assert conn.host == \"localhost\"\n assert conn.port == 22\n assert conn._isauthenticated is False\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"systemssh not supported on windows\")\ndef test_build_open_cmd():\n conn = SystemSSHTransport(\"localhost\")\n assert conn.open_cmd == [\n \"ssh\",\n \"localhost\",\n \"-p\",\n \"22\",\n \"-o\",\n \"ConnectTimeout=5\",\n \"-o\",\n \"ServerAliveInterval=5\",\n \"-o\",\n \"StrictHostKeyChecking=yes\",\n \"-F\",\n \"/dev/null\",\n ]\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"systemssh not supported on windows\")\n@pytest.mark.parametrize(\n \"user_options\",\n [[\"oKexAlgorithms=+diffie-hellman-group1-sha1\"], \"oKexAlgorithms=+diffie-hellman-group1-sha1\",],\n ids=[\"user options list\", \"user options string\",],\n)\ndef test_build_open_cmd_user_options(user_options):\n conn = SystemSSHTransport(\"localhost\", transport_options={\"open_cmd\": user_options})\n assert conn.open_cmd == [\n \"ssh\",\n \"localhost\",\n \"-p\",\n \"22\",\n \"-o\",\n \"ConnectTimeout=5\",\n \"-o\",\n \"ServerAliveInterval=5\",\n \"-o\",\n \"StrictHostKeyChecking=yes\",\n \"-F\",\n \"/dev/null\",\n \"oKexAlgorithms=+diffie-hellman-group1-sha1\",\n ]\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"systemssh not supported on windows\")\n@pytest.mark.parametrize(\n \"eof_msg\",\n [\n (b\"Host key verification failed\", \"Host key verification failed for host localhost\",),\n (b\"Operation timed out\", \"Timed out connecting to host localhost\",),\n (b\"Connection timed out\", \"Timed out connecting to host localhost\",),\n (b\"No route to host\", \"No route to host localhost\",),\n (b\"no matching cipher found\", \"No matching cipher found for host localhost\",),\n (\n b\"no matching cipher found, their offer: aes128-cbc,aes256-cbc\",\n \"No matching cipher found for host localhost, their offer: aes128-cbc,aes256-cbc\",\n ),\n (\n b\"blah blah blah\",\n \"Failed to open connection to host localhost. Do you need to disable `auth_strict_key`?\",\n ),\n ],\n ids=[\n \"host key verification\",\n \"operation time out\",\n \"connection time out\",\n \"no route to host\",\n \"no matching cipher\",\n \"no matching cipher found ciphers\",\n \"unknown reason\",\n ],\n)\ndef test_pty_authentication_error_messages(eof_msg):\n conn = SystemSSHTransport(\"localhost\")\n error_msg = eof_msg[0]\n expected_msg = eof_msg[1]\n actual_msg = conn._pty_authentication_eof_handler(error_msg)\n assert actual_msg == expected_msg\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"systemssh not supported on windows\")\ndef test_set_timeout():\n conn = SystemSSHTransport(\"localhost\")\n assert conn.timeout_transport == 5\n conn.set_timeout(1000)\n assert conn.timeout_transport == 1000\n conn.timeout_transport = 9999\n conn.set_timeout()\n assert conn.timeout_transport == 9999\n","sub_path":"tests/unit/transport/test_systemssh.py","file_name":"test_systemssh.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"100116791","text":"import mock\nimport string\nimport unittest\nimport random\nfrom pprint import pprint\nfrom bitshares import BitShares\nfrom bitshares.account import Account\nfrom bitsharesbase.operationids import getOperationNameForId\nfrom bitshares.amount import Amount\nfrom bitsharesbase.account import PrivateKey\nfrom bitsharesbase.asset_permissions import todict\nfrom bitshares.instance import set_shared_bitshares_instance\nfrom .fixtures import fixture_data, bitshares\n\nfrom graphenebase.signedtransactions import *\nfrom bitsharesbase.operations import *\nfrom graphenebase.base58 import Base58\n\nclass Testcases(unittest.TestCase):\n\n def setUp(self):\n fixture_data()\n\n # def test_connect(self):\n # bitshares.connect()\n\n # def test_set_default_account(self):\n # bitshares.set_default_account(\"init0\")\n\n # def test_info(self):\n # info = bitshares.info()\n # for key in ['current_witness',\n # 'head_block_id',\n # 'head_block_number',\n # 'id',\n # 'last_irreversible_block_num',\n # 'next_maintenance_time',\n # 'recently_missed_count',\n # 'time']:\n # self.assertTrue(key in info)\n\n # def test_finalizeOps(self):\n # tx1 = bitshares.new_tx()\n # tx2 = bitshares.new_tx()\n # bitshares.transfer(\"init1\", 1, \"BTS\", append_to=tx1)\n # bitshares.transfer(\"init1\", 2, \"BTS\", append_to=tx2)\n # bitshares.transfer(\"init1\", 3, \"BTS\", append_to=tx1)\n # tx1 = tx1.json()\n # tx2 = tx2.json()\n # ops1 = tx1[\"operations\"]\n # ops2 = tx2[\"operations\"]\n # self.assertEqual(len(ops1), 2)\n # self.assertEqual(len(ops2), 1)\n\n # def test_transfer(self):\n # import json\n\n # tx = bitshares.transfer(\n # \"1.2.101\", 1.33, \"BTS\", memo=\"Foobar\", account=\"init0\")\n\n # # # jtx = str(tx).replace(\"'\", \"\\\"\")\n # # jtx = json.loads( jtx )\n # # print( json.dumps( jtx, indent=4 ) )\n\n\n # self.assertEqual(\n # getOperationNameForId(tx[\"operations\"][0][0]),\n # \"transfer\"\n # )\n # op = tx[\"operations\"][0][1]\n # self.assertIn(\"memo\", op)\n # self.assertEqual(op[\"from\"], \"1.2.100\")\n # self.assertEqual(op[\"to\"], \"1.2.101\")\n # amount = Amount(op[\"amount\"])\n # self.assertEqual(float(amount), 1.33)\n\n\n # jtx = str(tx).replace(\"'\", \"\\\"\")\n # jtx = json.loads( jtx )\n # print( json.dumps( jtx, indent=4 ) )\n\n # def test_my_transfer(self):\n\n # top = \\\n # {\n # \"expiration\": \"2020-01-29T12:55:22\", # PointInTime??\n # \"ref_block_num\": 64476, # uint16\n # \"ref_block_prefix\": 1034601640, # uint32\n # \"operations\": [\n # Transfer(\n # **{\n # \"fee\": {\n # \"amount\": 91204,\n # \"asset_id\": \"1.3.0\"\n # },\n # \"from\": \"1.2.100\",\n # \"to\": \"1.2.101\",\n # \"amount\": {\n # \"amount\": 133000,\n # \"asset_id\": \"1.3.0\"\n # },\n # \"memo\": {\n # \"from\": \"BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV\",\n # \"to\": \"BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV\",\n # \"nonce\": 17580857522633640572,\n # \"message\": \"3b37e7316b5eecf206d8dd24015ef5f3\",\n # \"prefix\": \"BTS\"\n # },\n # \"extensions\": []\n # }\n # )\n # ],\n # \"extensions\": [], # set\n # }\n\n # tx = Signed_Transaction( **top )\n\n # wifs = [\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"]\n # chain = {\n # 'chain_id': '4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8',\n # 'core_symbol': 'BTS',\n # 'prefix': 'BTS'\n # }\n\n # import json\n\n # sig = tx.sign( wifs, chain )\n # j = json.loads(str(sig))\n # print( \"sig\\n\", json.dumps(j, indent=3) )\n\n # pub_key = PublicKey(\"02c0ded2bc1f1305fb0faac5e6c03ee3a1924234985427b6167ca569d13df435cf\", prefix=\"BTS\")\n\n # pub_keys = [pub_key]\n # verified = tx.verify( pub_keys, chain )\n # print( \"Verified:\", verified[0] == hexlify(bytes(pub_key)).decode() )\n\n # def test_my_transfer_without_memo(self):\n\n # top = \\\n # {\n # \"expiration\": \"2020-01-29T12:55:22\", # PointInTime??\n # \"ref_block_num\": 64476, # uint16\n # \"ref_block_prefix\": 1034601640, # uint32\n # \"operations\": [\n # Transfer(\n # **{\n # \"fee\": {\n # \"amount\": 91204,\n # \"asset_id\": \"1.3.0\"\n # },\n # \"from\": \"1.2.100\",\n # \"to\": \"1.2.101\",\n # \"amount\": {\n # \"amount\": 133000,\n # \"asset_id\": \"1.3.0\"\n # },\n # \"extensions\": []\n # }\n # )\n # ],\n # \"extensions\": [], # set\n # }\n\n # tx = Signed_Transaction( **top )\n\n # wifs = [\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"]\n # chain = {\n # 'chain_id': '4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8',\n # 'core_symbol': 'BTS',\n # 'prefix': 'BTS'\n # }\n\n # sig = tx.sign( wifs, chain )\n\n # def test_my_limit_order_create(self):\n # loc = \\\n # {\n # \"expiration\": \"2020-01-29T12:55:22\", # PointInTime??\n # \"ref_block_num\": 64476, # uint16\n # \"ref_block_prefix\": 1034601640, # uint32\n # \"operations\": [\n # Limit_order_create(\n # **{\n # \"fee\": {\"amount\": 100, \"asset_id\": \"1.3.0\"},\n # \"seller\": \"1.2.29\",\n # \"amount_to_sell\": {\"amount\": 100000, \"asset_id\": \"1.3.0\"},\n # \"min_to_receive\": {\"amount\": 10000, \"asset_id\": \"1.3.105\"},\n # \"expiration\": \"2016-05-18T09:22:05\",\n # \"fill_or_kill\": False,\n # \"memo\": {},\n # \"extensions\": [],\n # }\n # )\n # ],\n # \"extensions\": [], # set\n # }\n\n # tx = Signed_Transaction(**loc)\n # wifs = [\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"]\n # chain = {\n # 'chain_id': '4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8',\n # 'core_symbol': 'BTS',\n # 'prefix': 'BTS'\n # }\n\n # sig = tx.sign( wifs, chain )\n\n # def test_my_limit_order_cancel(self):\n # loc = \\\n # {\n # \"expiration\": \"2020-01-29T12:55:22\", # PointInTime??\n # \"ref_block_num\": 64476, # uint16\n # \"ref_block_prefix\": 1034601640, # uint32\n # \"operations\": [\n # Limit_order_cancel(**{\n # \"fee\": {\"amount\": 0, \"asset_id\": \"1.3.0\"},\n # \"fee_paying_account\": \"1.2.104\",\n # \"order\": \"1.7.51840\",\n # \"extensions\": [],\n # })\n # ],\n # \"extensions\": [], # set\n # }\n\n # tx = Signed_Transaction(**loc)\n # wifs = [\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"]\n # chain = {\n # 'chain_id': '4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8',\n # 'core_symbol': 'BTS',\n # 'prefix': 'BTS'\n # }\n\n # sig = tx.sign( wifs, chain )\n\n def test_account_create(self):\n acc = \\\n {\n \"expiration\": \"2020-01-29T12:55:22\", # PointInTime??\n \"ref_block_num\": 64476, # uint16\n \"ref_block_prefix\": 1034601640, # uint32\n \"operations\": [\n Account_create(**{\n \"fee\": {\"amount\": 1467634,\"asset_id\": \"1.3.0\"},\n \"registrar\": \"1.2.33\",\n \"referrer\": \"1.2.27\",\n \"referrer_percent\": 3,\n \"name\": \"foobar-f124\",\n \"owner\": {\n \"weight_threshold\": 1,\n \"account_auths\": [],\n 'key_auths': [\n ['BTS6pbVDAjRFiw6fkiKYCrkz7PFeL7XNAfefrsREwg8MKpJ9VYV9x',1],\n ['BTS6zLNtyFVToBsBZDsgMhgjpwysYVbsQD6YhP3kRkQhANUB4w7Qp',1]\n ],\n \"address_auths\": []\n },\n \"active\": {\n \"weight_threshold\": 1,\n \"account_auths\": [],\n 'key_auths': [\n ['BTS6pbVDAjRFiw6fkiKYCrkz7PFeL7XNAfefrsREwg8MKpJ9VYV9x', 1],\n ['BTS6zLNtyFVToBsBZDsgMhgjpwysYVbsQD6YhP3kRkQhANUB4w7Qp', 1],\n ['BTS8CemMDjdUWSV5wKotEimhK6c4dY7p2PdzC2qM1HpAP8aLtZfE7', 1]\n ],\n \"address_auths\": []\n },\n \"options\": {\n \"memo_key\": \"BTS5TPTziKkLexhVKsQKtSpo4bAv5RnB8oXcG4sMHEwCcTf3r7dqE\",\n \"voting_account\": \"1.2.5\",\n \"num_witness\": 0,\n \"num_committee\": 0,\n \"votes\": [\"1:0\"],\n \"extensions\": []\n },\n \"prefix\": \"BTS\",\n \"extensions\": []\n })\n ],\n \"extensions\": [], # set\n }\n\n tx = Signed_Transaction(**acc)\n wifs = [\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"]\n chain = {\n 'chain_id': '4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8',\n 'core_symbol': 'BTS',\n 'prefix': 'BTS'\n }\n\n sig = tx.sign( wifs, chain )\n\n # def test_create_account(self):\n # name = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))\n # key1 = PrivateKey()\n # key2 = PrivateKey()\n # key3 = PrivateKey()\n # key4 = PrivateKey()\n # tx = bitshares.create_account(\n # name,\n # registrar=\"init0\", # 1.2.100\n # referrer=\"init1\", # 1.2.101\n # referrer_percent=33,\n # owner_key=format(key1.pubkey, \"BTS\"),\n # active_key=format(key2.pubkey, \"BTS\"),\n # memo_key=format(key3.pubkey, \"BTS\"),\n # additional_owner_keys=[format(key4.pubkey, \"BTS\")],\n # additional_active_keys=[format(key4.pubkey, \"BTS\")],\n # additional_owner_accounts=[\"committee-account\"], # 1.2.0\n # additional_active_accounts=[\"committee-account\"],\n # proxy_account=\"init0\",\n # storekeys=False\n # )\n # self.assertEqual(\n # getOperationNameForId(tx[\"operations\"][0][0]),\n # \"account_create\"\n # )\n # op = tx[\"operations\"][0][1]\n # role = \"active\"\n # self.assertIn(\n # format(key4.pubkey, \"BTS\"),\n # [x[0] for x in op[role][\"key_auths\"]])\n # self.assertIn(\n # format(key4.pubkey, \"BTS\"),\n # [x[0] for x in op[role][\"key_auths\"]])\n # self.assertIn(\n # \"1.2.0\",\n # [x[0] for x in op[role][\"account_auths\"]])\n # role = \"owner\"\n # self.assertIn(\n # format(key4.pubkey, \"BTS\"),\n # [x[0] for x in op[role][\"key_auths\"]])\n # self.assertIn(\n # format(key4.pubkey, \"BTS\"),\n # [x[0] for x in op[role][\"key_auths\"]])\n # self.assertIn(\n # \"1.2.0\",\n # [x[0] for x in op[role][\"account_auths\"]])\n # self.assertEqual(\n # op[\"options\"][\"voting_account\"],\n # \"1.2.100\")\n # self.assertEqual(\n # op[\"registrar\"],\n # \"1.2.100\")\n # self.assertEqual(\n # op[\"referrer\"],\n # \"1.2.101\")\n # self.assertEqual(\n # op[\"referrer_percent\"],\n # 33 * 100)\n\n # def test_create_asset(self):\n # symbol = \"FOOBAR\"\n # precision = 7\n # max_supply = 100000\n # description = \"Test asset\"\n # is_bitasset = True\n # market_fee_percent = 0.1\n # max_market_fee = 10\n # blacklist_authorities = [\"init1\"]\n # blacklist_authorities_ids = [Account(a)[\"id\"] for a in blacklist_authorities]\n # blacklist_markets = [\"BTS\"]\n # blacklist_markets_ids = [\"1.3.0\"]\n # permissions = {\n # \"charge_market_fee\": True,\n # \"white_list\": True,\n # \"override_authority\": True,\n # \"transfer_restricted\": True,\n # \"disable_force_settle\": True,\n # \"global_settle\": True,\n # \"disable_confidential\": True,\n # \"witness_fed_asset\": True,\n # \"committee_fed_asset\": True,\n # }\n # flags = {\n # \"charge_market_fee\": False,\n # \"white_list\": False,\n # \"override_authority\": False,\n # \"transfer_restricted\": False,\n # \"disable_force_settle\": False,\n # \"global_settle\": False,\n # \"disable_confidential\": False,\n # \"witness_fed_asset\": False,\n # \"committee_fed_asset\": False,\n # }\n # tx = bitshares.create_asset(\n # symbol,\n # precision,\n # max_supply,\n # market_fee_percent=market_fee_percent,\n # max_market_fee=max_market_fee,\n # description=description,\n # is_bitasset=is_bitasset,\n # blacklist_authorities=blacklist_authorities,\n # blacklist_markets=blacklist_markets,\n # permissions=permissions,\n # flags=flags,\n # )\n # self.assertEqual(getOperationNameForId(tx[\"operations\"][0][0]), \"asset_create\")\n # op = tx[\"operations\"][0][1]\n # self.assertEqual(op[\"issuer\"], \"1.2.100\")\n # self.assertEqual(op[\"symbol\"], symbol)\n # self.assertEqual(op[\"precision\"], precision)\n # self.assertEqual(\n # op[\"common_options\"][\"max_supply\"], int(max_supply * 10 ** precision)\n # )\n # self.assertEqual(\n # op[\"common_options\"][\"market_fee_percent\"], int(market_fee_percent * 100)\n # )\n # self.assertEqual(\n # op[\"common_options\"][\"max_market_fee\"],\n # int(max_market_fee * 10 ** precision),\n # )\n # self.assertEqual(op[\"common_options\"][\"description\"], description)\n # self.assertEqual(\n # op[\"common_options\"][\"blacklist_authorities\"], blacklist_authorities_ids\n # )\n # self.assertEqual(\n # op[\"common_options\"][\"blacklist_markets\"], blacklist_markets_ids\n # )\n # self.assertEqual(\n # todict(op[\"common_options\"][\"issuer_permissions\"]), permissions\n # )\n # self.assertEqual(todict(op[\"common_options\"][\"flags\"]), flags)\n\n # def test_weight_threshold(self):\n\n # auth = {'account_auths': [['1.2.0', '1']],\n # 'extensions': [],\n # 'key_auths': [\n # ['BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n', 1],\n # ['BTS7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv', 1]],\n # 'weight_threshold': 3} # threshold fine\n # bitshares._test_weights_treshold(auth)\n # auth = {'account_auths': [['1.2.0', '1']],\n # 'extensions': [],\n # 'key_auths': [\n # ['BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n', 1],\n # ['BTS7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv', 1]],\n # 'weight_threshold': 4} # too high\n\n # with self.assertRaises(ValueError):\n # bitshares._test_weights_treshold(auth)\n\n # def test_allow(self):\n # tx = bitshares.allow(\n # \"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n\",\n # weight=1,\n # threshold=1,\n # permission=\"owner\"\n # )\n # self.assertEqual(\n # getOperationNameForId(tx[\"operations\"][0][0]),\n # \"account_update\"\n # )\n # op = tx[\"operations\"][0][1]\n # self.assertIn(\"owner\", op)\n # self.assertIn(\n # [\"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n\", '1'],\n # op[\"owner\"][\"key_auths\"])\n # self.assertEqual(op[\"owner\"][\"weight_threshold\"], 1)\n\n # def test_disallow(self):\n # with self.assertRaisesRegex(ValueError, \".*Changes nothing.*\"):\n # bitshares.disallow(\n # \"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n\",\n # weight=1,\n # threshold=1,\n # permission=\"owner\"\n # )\n # with self.assertRaisesRegex(ValueError, \"Cannot have threshold of 0\"):\n # bitshares.disallow(\n # \"BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV\",\n # weight=1,\n # threshold=1,\n # permission=\"owner\"\n # )\n # bitshares.disallow(\n # \"BTS5i8bEmtnN4fP4jAsBe17z9CCuQcHLkRyTuRZXYZeN2kVCL1sXa\",\n # weight=1,\n # threshold=1,\n # permission=\"active\"\n # )\n\n # def test_update_memo_key(self):\n # tx = bitshares.update_memo_key(\"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n\")\n # self.assertEqual(\n # getOperationNameForId(tx[\"operations\"][0][0]),\n # \"account_update\"\n # )\n # op = tx[\"operations\"][0][1]\n # self.assertEqual(\n # op[\"new_options\"][\"memo_key\"],\n # \"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n\")\n\n # def test_approvewitness(self):\n # tx = bitshares.approvewitness(\"1.6.1\")\n # self.assertEqual(\n # getOperationNameForId(tx[\"operations\"][0][0]),\n # \"account_update\"\n # )\n # op = tx[\"operations\"][0][1]\n # self.assertIn(\n # \"1:0\",\n # op[\"new_options\"][\"votes\"])\n\n # def test_approvecommittee(self):\n # tx = bitshares.approvecommittee(\"1.5.0\")\n # self.assertEqual(\n # getOperationNameForId(tx[\"operations\"][0][0]),\n # \"account_update\"\n # )\n # op = tx[\"operations\"][0][1]\n # self.assertIn(\n # \"0:11\",\n # op[\"new_options\"][\"votes\"])\n\n\n\n# tx = bitshares.transfer(\n# \"1.2.101\", 1.33, \"BTS\", memo=\"Foobar\", account=\"init0\")\n# # self.assertEqual(\n# # getOperationNameForId(tx[\"operations\"][0][0]),\n# # \"transfer\"\n# # )\n# op = tx[\"operations\"][0][1]\n# # self.assertIn(\"memo\", op)\n# # self.assertEqual(op[\"from\"], \"1.2.100\")\n# # self.assertEqual(op[\"to\"], \"1.2.101\")\n# amount = Amount(op[\"amount\"])\n# # self.assertEqual(float(amount), 1.33)\n# print( tx )\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_bitshares.py","file_name":"test_bitshares.py","file_ext":"py","file_size_in_byte":19586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"168775477","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(color_codes=True)\n\n# Cyano\ntimes_cyano = [3*60 + 35, 255]\n# Ralstonia\ntimes_ralstonia = [3*60 + 23, 40] # MOCKUP\n\n# Titles\ntitles = ['Cyanobacteria', 'Ralstonia']\n\ndata = {'Time': {'Data': [times_cyano, times_ralstonia],\n 'ylabel': 'Minutes',\n 'file': 'times-uppmax.png'}}\n# Labels\nprograms = ['Quandenser pipeline\\n(parallel)', 'Waiting time']\ncolors = [(45,55,65,0), (160,160,160,0)]\ncolors = [(r/255, g/255, b/255, a) for (r,g,b,a) in colors]\n\nfor key in data.keys():\n fig, axes = plt.subplots(nrows=1, ncols=2,figsize=(15,7))\n plt.subplots_adjust(wspace=0.2)\n for index, d in enumerate(data[key]['Data']):\n d = pd.DataFrame({' ': programs, key: d})\n plt.subplot(1,2,index+1)\n plt.title(titles[index] + ' ' + key)\n barplot_fig = sns.barplot(x=' ', y=key, data=d, order=programs, palette=colors)\n plt.ylabel(data[key]['ylabel'])\n for item in barplot_fig.get_xticklabels():\n item.set_rotation(20)\n plt.savefig(data[key]['file'])\n plt.close()","sub_path":"doc/thesis/results/plots_uppmax.py","file_name":"plots_uppmax.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"63384355","text":"# 题目:\n# 给定两个字符串 text1 和 text2,返回这两个字符串的最长公共子序列的长度。\n#\n# 一个字符串的 子序列 是指这样一个新的字符串:它是由原字符串在不改变字符的相对顺序的情况下删除某些字符(也可以不删除任何字符)后组成的新字符串。\n# 例如,\"ace\" 是 \"abcde\" 的子序列,但 \"aec\" 不是 \"abcde\" 的子序列。两个字符串的「公共子序列」是这两个字符串所共同拥有的子序列。\n#\n# 若这两个字符串没有公共子序列,则返回 0。\n\"\"\"\n思路:\n对于两个字符串的动态规划问题,一般来说都是像本文一样定义 DP table, 因为这样定义有一个好处,就是容易写出状态转移方程, dp[i][j] 的状态 可以通过之前的状态推导出来:\n定义二阶数组:dp含义:dp[i][j] s1中 第一个到第i个数字组成的数组 和 s2中 第一个数字到第j个数字组成的数组(序号0--j-1)的数组的最长序列长度。\n决定是 s1[i-1] == s2[j-1]:\n状态转移方程是 dp[i][j] = dp[i-1][j-1]+1\n初始化是 dp[0][...] = dp[...][0] = 0\n\"\"\"\n\"\"\"子序列-二维数组\"\"\"\n\n\nimport sys\nclass Solution:\n def func(self, text1, text2):\n dp = [[0] * (len(text2) + 1) for _ in range(len(text1) + 1)]\n for s1_seq in range(1, len(text1) + 1):\n for s2_seq in range(1, len(text2) + 1):\n if text1[s1_seq - 1] == text2[s2_seq - 1]:\n dp[s1_seq][s2_seq] = dp[s1_seq-1][s2_seq-1] + 1\n else:\n dp[s1_seq][s2_seq] = max(dp[s1_seq-1][s2_seq], dp[s1_seq][s2_seq-1])\n return dp[len(text1)][len(text2)]\n\na = Solution()\nprint(a.func([1,2,3,4,7],[2,3,4,5,7]))","sub_path":"leetcode/最长共同子序列(动态).py","file_name":"最长共同子序列(动态).py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"625870567","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nLocalization pattern classification of RNA molecules in 2-d.\n\"\"\"\n\nimport os\nimport argparse\nimport time\n\nimport numpy as np\n\nimport bigfish.stack as stack\nimport bigfish.classification as classification\n\n# TODO build tensorflow from source to avoid the next line\n# Your CPU supports instructions that this TensorFlow binary was not compiled\n# to use: AVX2 FMA\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = \"2\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\n\nif __name__ == '__main__':\n print()\n print(\"Running {0} file...\". format(os.path.basename(__file__)), \"\\n\")\n start_time = time.time()\n\n # parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path_input\",\n help=\"Path of the input data.\",\n type=str)\n parser.add_argument(\"log_directory\",\n help=\"Path of the log directory.\",\n type=str)\n parser.add_argument(\"--features\",\n help=\"Features used ('normal', 'distance' or \"\n \"'surface').\",\n type=str,\n default=\"normal\")\n parser.add_argument(\"--classes\",\n help=\"Set of classes to predict.\",\n type=str,\n default=\"all\")\n parser.add_argument(\"--batch_size\",\n help=\"Size of a batch.\",\n type=int,\n default=16)\n parser.add_argument(\"--nb_epochs\",\n help=\"Number of epochs to train the model.\",\n type=int,\n default=10)\n parser.add_argument(\"--nb_workers\",\n help=\"Number of workers to use.\",\n type=int,\n default=1)\n parser.add_argument(\"--multiprocessing\",\n help=\"Use multiprocessing.\",\n type=bool,\n default=False)\n args = parser.parse_args()\n\n # parameters\n input_shape = (224, 224)\n\n print(\"------------------------\")\n print(\"Input data: {0}\".format(args.path_input))\n print(\"Output logs: {0}\".format(args.log_directory), \"\\n\")\n\n print(\"------------------------\")\n print(\"Input shape: {0}\".format(input_shape))\n print(\"Features: {0}\".format(args.features))\n print(\"Batch size: {0}\".format(args.batch_size))\n print(\"Number of epochs: {0}\".format(args.nb_epochs))\n print(\"Number of workers: {0}\".format(args.nb_workers))\n print(\"Multiprocessing: {0}\".format(args.multiprocessing), \"\\n\")\n\n print(\"--- PREPROCESSING ---\", \"\\n\")\n\n # load data\n df = stack.read_pickle(args.path_input)\n print(\"Shape input dataframe (before preparation): {0}\".format(df.shape))\n\n # prepare data\n df, encoder, classes = stack.encode_labels(df,\n column_name=\"pattern_name\",\n classes_to_analyse=args.classes)\n nb_classes = len(classes)\n df = stack.filter_data(df, proportion_to_exclude=0.2)\n df = stack.balance_data(df, column_to_balance=\"pattern_name\")\n print(\"Number of classes: {0}\".format(nb_classes))\n print(\"Classes: {0}\".format(classes))\n print(\"Shape input dataframe (after preparation): {0}\".format(df.shape))\n print()\n\n # split data\n df_train, df_validation, df_test = stack.split_from_background(\n data=df,\n p_validation=0.2,\n p_test=0.2,\n logdir=args.log_directory)\n print(\"Split train|validation|test: {0}|{1}|{2}\"\n .format(df_train.shape[0], df_validation.shape[0], df_test.shape[0]))\n\n # build train generator\n train_generator = stack.Generator(\n data=df_train,\n method=args.features,\n batch_size=args.batch_size,\n input_shape=input_shape,\n augmentation=True,\n with_label=True,\n nb_classes=nb_classes,\n nb_epoch_max=None,\n shuffle=True,\n precompute_features=True)\n print(\"Number of train batches per epoch: {0}\"\n .format(train_generator.nb_batch_per_epoch))\n\n # build validation generator\n validation_generator = stack.Generator(\n data=df_validation,\n method=args.features,\n batch_size=args.batch_size,\n input_shape=input_shape,\n augmentation=False,\n with_label=True,\n nb_classes=nb_classes,\n nb_epoch_max=None,\n shuffle=True,\n precompute_features=True)\n print(\"Number of validation batches per epoch: {0}\"\n .format(validation_generator.nb_batch_per_epoch))\n\n # build test generator\n test_generator = stack.Generator(\n data=df_test,\n method=args.features,\n batch_size=args.batch_size,\n input_shape=input_shape,\n augmentation=False,\n with_label=True,\n nb_classes=nb_classes,\n nb_epoch_max=None,\n shuffle=False,\n precompute_features=True)\n print(\"Number of test batches per epoch: {0}\"\n .format(test_generator.nb_batch_per_epoch))\n print()\n\n print(\"--- TRAINING ---\", \"\\n\")\n\n # build and fit model\n model = classification.SqueezeNet0(\n nb_classes=nb_classes,\n bypass=True,\n optimizer=\"adam\",\n logdir=args.log_directory)\n print(\"Model trained: {0}\".format(model.trained))\n model.print_model()\n model.fit_generator(train_generator, validation_generator, args.nb_epochs,\n args.nb_workers, args.multiprocessing)\n model.save_training_history()\n print(\"Model trained: {0}\".format(model.trained))\n print()\n\n print(\"--- EVALUATION ---\", \"\\n\")\n\n # evaluate model with train data\n train_generator.reset()\n loss, accuracy = model.evaluate_generator(train_generator,\n args.nb_workers,\n args.multiprocessing,\n verbose=0)\n print(\"Loss train: {0:.3f} | Accuracy train: {1:.3f}\"\n .format(loss, 100 * accuracy))\n\n # evaluate model with validation data\n validation_generator.reset()\n loss, accuracy = model.evaluate_generator(validation_generator,\n args.nb_workers,\n args.multiprocessing,\n verbose=0)\n print(\"Loss validation: {0:.3f} | Accuracy validation: {1:.3f}\"\n .format(loss, 100 * accuracy))\n\n # evaluate model with test data\n loss, accuracy = model.evaluate_generator(test_generator,\n args.nb_workers,\n args.multiprocessing,\n verbose=0)\n print(\"Loss test: {0:.3f} | Accuracy test: {1:.3f}\"\n .format(loss, 100 * accuracy), \"\\n\")\n\n print(\"--- PREDICTION ---\", \"\\n\")\n\n # make predictions on the testing dataset\n test_generator.reset()\n predictions, probabilities = model.predict_generator(test_generator, True)\n path = os.path.join(args.log_directory, \"test_predictions.npz\")\n np.savez(path, predictions=predictions, probabilities=probabilities)\n\n end_time = time.time()\n duration = int(round((end_time - start_time) / 60))\n print(\"Duration: {0} minutes.\".format(duration))\n","sub_path":"python_scripts/2d_pattern_classification.py","file_name":"2d_pattern_classification.py","file_ext":"py","file_size_in_byte":7408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"176590179","text":"\"\"\" PyTorch Network Training Demo\nTrains a simple CNN to make velocity fields incompressible.\nThis script runs for a certain number of steps before saving the trained network and halting.\n\"\"\"\nfrom phi.torch.flow import *\n\n\n# TORCH_BACKEND.set_default_device('GPU')\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = torch.nn.Conv2d(2, 4, 3, padding=1)\n self.conv2 = torch.nn.Conv2d(4, 4, 3, padding=1)\n self.conv3 = torch.nn.Conv2d(4, 2, 3, padding=1)\n\n def forward(self, x):\n x = torchf.relu(self.conv1(x))\n x = torchf.relu(self.conv2(x))\n x = self.conv3(x)\n return x\n\n\nnet = Net().to(TORCH_BACKEND.get_default_device().ref)\noptimizer = optim.Adam(net.parameters(), lr=1e-3)\n\n# Initialize variables for ModuleViewer\nDOMAIN = Domain(x=64, y=64)\nprediction = DOMAIN.vector_grid(0)\nprediction_div = DOMAIN.scalar_grid(0)\n\napp = ModuleViewer()\nfor step in app.range(100, warmup=1):\n # Load or generate training data\n data = DOMAIN.vector_grid(Noise(batch=8, vector=2))\n # Initialize optimizer\n optimizer.zero_grad()\n # Prediction\n pred_tensor = net(data.values.native('batch,vector,x,y'))\n prediction = DOMAIN.vector_grid(math.wrap(pred_tensor, 'batch,vector,x,y'))\n # Simulation\n prediction_div = field.divergence(prediction)\n # Define loss\n loss = field.l2_loss(prediction_div) + field.l2_loss(prediction - data)\n app.info(f\"{step} \\tDiv: {field.mean(abs(prediction_div))} \\tDist: {math.vec_abs(field.mean(abs(prediction - data)))}\")\n # Compute gradients and update weights\n loss.native().backward()\n optimizer.step()\n\ntorch.save(net.state_dict(), 'torch_net.pth')\napp.info(\"Network saved.\")\n\n# To load the network: net.load_state_dict(torch.load('torch_net.pth'))\n","sub_path":"demos/network_training_pytorch.py","file_name":"network_training_pytorch.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"255779548","text":"from sqlalchemy import ForeignKey, Column, Integer, String, DateTime, Float\nfrom sqlalchemy.ext.declarative import declarative_base\nimport datetime\nBase = declarative_base()\n\n\nclass User(Base):\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True)\n user_id = Column(String)\n\nclass Log(Base):\n __tablename__ = 'logs'\n\n id = Column(Integer, primary_key=True)\n user_id = Column(String)\n text = Column(String)\n date = Column(String)","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"540111394","text":"import os\nimport re\nimport random\nimport numpy as np\nimport pickle\nimport sys\nimport torch\nfrom utils import read_word_embeds\nfrom torch.utils.data import Dataset, DataLoader\n\nclass TextDataset(Dataset):\n def __init__(self, dataset, prc='', test=False, wo_unlabel=False):\n _extend = '.without_unlabel' if wo_unlabel else ''\n if len(prc) > 0:\n prc = '.' + prc\n else:\n assert(not wo_unlabel)\n # Train files\n train_label_file = 'data/%s/label_train%s.txt' % (dataset, prc)\n train_label_file = os.path.join('..', train_label_file)\n train_text_file = 'data/%s/text_train.txt' % (dataset)\n train_text_file = os.path.join('..', train_text_file)\n # Test files\n test_label_file = 'data/%s/label_test.txt' % (dataset)\n test_label_file = os.path.join('..', test_label_file)\n test_text_file = 'data/%s/text_test.txt' % (dataset)\n test_text_file = os.path.join('..', test_text_file)\n # Word embedings\n emb_file = '%s_workspace%s/word.emb' % (dataset, prc+_extend)\n emb_file = os.path.join('..', emb_file)\n\n # Unused directories\n sent_ebd_file = '%s_workspace%s/text.emb' % (dataset, prc)\n sent_ebd_file = os.path.join('..', sent_ebd_file)\n all_text_file = 'data/%s/text_all.txt' % (dataset)\n all_text_file = os.path.join('..', all_text_file)\n\n self.voc, self.emb = read_word_embeds(emb_file)\n _temp = np.zeros((1,self.emb.shape[1]),dtype=self.emb.dtype)\n # Add two more embeddings at the front and tail of\n # word embedding for padding and UNK respectively.\n self.emb = np.concatenate((_temp, self.emb, _temp.copy()))\n self.dicts = {self.voc[i]:i+1 for i in range(len(self.voc))}\n self.text_data = []\n self.label_data = []\n self.num_class = 1\n # Switch between train dataset and test dataset\n text_file = test_text_file if test else train_text_file\n label_file = test_label_file if test else train_label_file\n with open(text_file,'r',encoding='utf-8') as reader1, open(label_file) as reader2:\n for line1, line2 in zip(reader1, reader2):\n words = line1.strip().split()\n _data = [self.dicts.get(word) for word in words]\n line_data = [self.emb.shape[0]-1 if v is None else v for v in _data]\n self.text_data.append(np.array(line_data,dtype=np.int64))\n self.label_data.append(int(line2.strip()))\n self.label_data = np.array(self.label_data, dtype=np.int64)\n # Make labels start from 0\n if np.min(self.label_data) != 0:\n self.label_data -= np.min(self.label_data)\n self.num_class = np.max(self.label_data)+1\n text_len = map(lambda x:len(x), self.text_data)\n sent_len = min(max(text_len), 300)\n for v in self.text_data:\n v.resize(sent_len, refcheck=False)\n self.text_data = np.array(self.text_data)\n\n def __len__(self):\n return len(self.text_data)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n return self.text_data[idx], self.label_data[idx]\n\n def get_dict(self):\n return self.dicts\n\n def get_emb(self):\n return self.emb\n","sub_path":"TextCNN/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"545677157","text":"#!/usr/bin/env python3\n\n'柱状图'\n\nimport logging\nimport pandas as pd\nfrom pyecharts import Bar\n\nlogging.basicConfig(level=logging.INFO)\n\n# 读入数据为呈现图表\ndf=pd.read_csv('city_yj.csv',encoding='gb18030')\n# 餐饮游记排序后再切片取前20名\ndf=df.sort_values(by=\"餐饮游记\",ascending=False)[0:20]\nlogging.info(df)\n# 还原排序\n# df=df.sort_values(by=\"城市\")\n\n# title_top 标题的top位置(可以用百分比) width 图表宽 height 图表高\n# title_pos 可以控制标题的x轴位置,可以用百分比\nbar=Bar('餐饮类标签排名',title_top=\"10\",width=800,height=500)\n# is_splitline_show 显示X轴的标尺线\n# xaxis_rotate 未知\n# legend_top 图例的顶部位置(可以用百分比)\n# legend_pos 以控制图例的x轴位置,可以用百分比\n# is_stack 未知\n# xaxis_interval X轴间隔多少个“城市”显示\n# yaxix_min 未知\nbar.add('游记数量',df['城市'],df['餐饮游记'],\n\tis_splitline_show=True,xaxis_rotate=30,legend_top=\"10\",\n\tis_stack=True,xaxis_interval=0,yaxix_min=4.2\n\t)\nbar.render('bar.html')","sub_path":"python-sample/vendor/pyecharts/_bar.py","file_name":"_bar.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"8336318","text":"from freenect import sync_get_depth as get_depth\nimport numpy as np\nimport cv2\nimport math\nimport os\nimport pygame\nimport time\nfrom timeit import default_timer as timer\nimport datetime\nfrom multiprocessing import Process, current_process, cpu_count, Pool, Lock, Queue\n# import webbrowser\nfrom pynput.keyboard import Key, Controller as keyboard_con\nfrom pynput.mouse import Button, Controller as mouse_con\n# from Queue import Queue as queue\n# from threading import Thread, Lock\n\nBLACK = (0,0,0)\nRED = (255,0,0)\nGREEN = (0,255,0)\nPURPLE = (255,0,255)\nBLUE = (0,0,255)\nWHITE = (255,255,255)\nYELLOW = (255,255,0)\n\nclass BlobAnalysis:\n def __init__(self,contour):\n self.contour = contour\n self.contour_s = np.vstack(contour).squeeze()\n self.contour_point = self.get_contour_point()\n self.centroid = self.get_centroid()\n self.convex_hull = self.get_convex_hull()\n self.approx_hull_count = self.get_approx_hull_count()\n self.id = -1\n self.area = cv2.contourArea(self.contour)\n self.deflect_count_90 = self.get_deflect_count(90)\n self.isHand = self.check_isHand()\n self.isGrab = self.isGrab()\n \n def set_id(self,i):\n self.id = i\n \n def get_contour_point(self):\n return np.array(self.contour_s).tolist()\n \n def get_centroid(self):\n m = cv2.moments(self.contour)\n cX = int(m['m10'] / m['m00'])\n cY = int(m['m01'] / m['m00'])\n return (cX, cY)\n \n def get_convex_hull(self):\n convexHull = cv2.convexHull(self.contour)\n epsilon = 0.015*cv2.arcLength(convexHull,True)\n approx = cv2.approxPolyDP(convexHull,epsilon,True)\n approx = np.vstack(approx).squeeze()\n return np.array(approx).tolist()\n \n def get_approx_hull_count(self):\n approx = self.convex_hull\n return len(approx)\n \n def get_deflect_count(self,max_angle):\n count = 0\n hull = cv2.convexHull(self.contour,returnPoints = False)\n defects = cv2.convexityDefects(self.contour, hull)\n for i in range(defects.shape[0]):\n s,e,f,d = defects[i,0]\n start = self.contour_s[s]\n end = self.contour_s[e]\n far = self.contour_s[f]\n a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)\n b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)\n c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)\n angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57\n if angle <= max_angle:\n count += 1\n return count\n \n def check_isHand(self):\n if self.deflect_count_90 == 4 :\n return True\n else:\n return False\n \n def set_isHand(self,logic):\n self.isHand = logic\n\n def set_isGrab(self,logic):\n self.isGrab = logic\n \n def isGrab(self):\n\n # contourArea = int(cv2.contourArea(cv2.convexHull(self.contour)))\n # oldArea = int(self.area)\n # # print(\"This is new area \",(0.7*contourArea), \"This is old area\", oldArea)\n # if oldArea > int(0.7*contourArea) and oldArea < 1200 and oldArea > 600:\n # #print(\"Hand is Grab\")\n # return True\n # else:\n # return False\n\n if self.deflect_count_90 == 0 :\n return True\n else:\n return False\n\n def isNear(self,ref):\n (x1,y1) = self.centroid\n (x2,y2) = ref.centroid\n dist = math.hypot(x2 - x1, y2 - y1)\n if dist < 50:\n return True\n else:\n return False\n \n def isSame(self,ref):\n if self.isNear(ref):\n if self.area < 1.5*ref.area or self.area < 0.1*ref.area:\n return True\n else:\n return False\n else:\n return False\n\ndef blobs_track(blobs,blob,i,n):\n # global blobs\n global blobs_movement\n if blobs_buffer[n] == []:\n if n+1 < buffer_size:\n blobs_track(blobs,blob,i,n+1)\n else:\n blob.set_id(i)\n blobs_movement[i] = [blob.centroid,blob.centroid]\n else:\n for j in range(len(blobs_buffer[n])):\n if blob.isSame(blobs_buffer[n][j]):\n new_id = blobs_buffer[n][j].id\n exist = False\n for k in blobs:\n if k.id == new_id:\n exist = True\n if exist:\n if n+1 < buffer_size:\n blobs_track(blobs,blob,i,n+1)\n else:\n new_id = 1\n for k in range(buffer_size):\n if max(old_id[k]or[0])+1 > new_id:\n new_id = max(old_id[k])+1\n blob.set_id(new_id)\n old_id[0].append(new_id)\n blobs_movement[new_id] = [blob.centroid,blob.centroid]\n else:\n \n blob.set_id(new_id)\n blobs_movement[new_id].append(blob.centroid)\n blobs_movement[new_id] = blobs_movement[new_id][-20:]\n\n if blob.id != -1:\n break\n if blob.id == -1:\n if n+1 < buffer_size:\n blobs_track(blobs,blob,i,n+1)\n else:\n new_id = 1\n for k in range(buffer_size):\n if max(old_id[k]or[0])+1 > new_id:\n new_id = max(old_id[k])+1\n blob.set_id(new_id)\n old_id[0].append(new_id)\n blobs_movement[new_id] = [blob.centroid,blob.centroid]\n return blob\n\ndef check_gesture(fps):\n global blobs\n global blobs_movement\n n_blobs = len(blobs)\n id_hand = []\n id_grab = []\n for blob in blobs:\n if blob.isHand:\n id_hand.append(blob.id)\n id_grab = []\n elif blob.isGrab:\n id_grab.append(blob.id)\n id_hand = []\n n_hand = len(id_hand)\n n_grab = len(id_grab)\n\n if n_hand == 0 and n_grab == 0:\n return \"undefined action\"\n\n elif n_hand == 1:\n n_frames = int(fps)+1\n vector_hand = blobs_movement[id_hand[0]][-n_frames:]\n weight = {\"swipe up\":0,\"swipe down\":0,\"swipe left\":0,\"swipe right\":0}\n\n for i in range(len(vector_hand)):\n if i == 0 :\n (x0,y0) = vector_hand[0]\n else:\n (x1,y1) = vector_hand[i]\n radian = math.atan2(y1-y0,x1-x0)\n degree = math.degrees(radian)\n dist = math.hypot(x1-x0,y1-y0)\n if dist > 8:\n if degree>-135 and degree<-45:\n weight[\"swipe up\"] += 1\n if degree>45 and degree<135:\n weight[\"swipe down\"] += 1\n if degree>135 or degree<-135:\n weight[\"swipe left\"] += 1\n if degree>-45 and degree<45:\n weight[\"swipe right\"] += 1\n (x0,y0) = (x1,y1)\n ans = \"\" + max(weight, key=weight.get)\n p70 = (7/10)*(n_frames-1)\n if weight[ans] > p70 :\n return ans\n else :\n return \"undefined action\"\n\n elif n_grab == 1:\n n_frames = int(fps)+1\n vector_grab = blobs_movement[id_grab[0]][-n_frames:]\n weight = {\"grab up\":0,\"grab down\":0,\"grab left\":0,\"grab right\":0}\n for i in range(len(vector_grab)):\n if i == 0 :\n (x0,y0) = vector_grab[0]\n else:\n (x1,y1) = vector_grab[i]\n radian = math.atan2(y1-y0,x1-x0)\n degree = math.degrees(radian)\n dist = math.hypot(x1-x0,y1-y0)\n if dist > 8:\n if degree>-135 and degree<-45:\n weight[\"grab up\"] += 1\n if degree>45 and degree<135:\n weight[\"grab down\"] += 1\n if degree>135 or degree<-135:\n weight[\"grab left\"] += 1\n if degree>-45 and degree<45:\n weight[\"grab right\"] += 1\n (x0,y0) = (x1,y1)\n ans = \"\" + max(weight, key=weight.get)\n p70 = (7/10)*(n_frames-1)\n if weight[ans] > p70 :\n return ans\n else :\n return \"undefined action\"\n\nblobs = []\nbuffer_size = 3\nblobs_buffer = [[]] * buffer_size\nold_id = [[]] * buffer_size\nblobs_movement = {}\nfps = 0\nstate = 0\nlast_gesture = \"undefined action\"\nt0 = time.time()\ntt0 = t0\ntt1 = t0\nused_time = 0\nc = 0\nstart_time = timer()\nkeyboard = keyboard_con()\nmouse = mouse_con()\nundefined_count = 0 # use to count undefined action\nstate_gesture = \"\" # use to save state of gesture to check\n\ndef get_contours_new(q1,q2,lock):\n\n while True:\n # update_id(lock)\n # print(\"Process 1 start \\n\")\n start = time.clock()\n global xsize,ysize\n (depth,_) = get_depth()\n # for i in range(100):\n depth = depth.astype(np.float32)\n depth = cv2.flip(depth, 1)\n depth = cv2.resize(depth,(xsize,ysize))\n depth = cv2.GaussianBlur(depth, (5,5), 0)\n\n depth = cv2.erode(depth, None, iterations=1)\n depth = cv2.dilate(depth, None, iterations=1)\n min_hand_depth = np.amin(depth)-10\n hand_depth = 80\n max_hand_depth = min_hand_depth + hand_depth\n if max_hand_depth > 700 :\n max_hand_depth = 700\n (_,BW) = cv2.threshold(depth, max_hand_depth, min_hand_depth, cv2.THRESH_BINARY_INV)\n BW = cv2.convertScaleAbs(BW)\n #BW = cv2.resize(BW,(xsize,ysize))\n _,cs,_ = cv2.findContours(BW,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n cs_f = []\n for i in range(len(cs)):\n if cv2.contourArea(cs[i]) > 500:\n cs_f.append(cs[i])\n del depth,BW\n clock_time = time.clock()-start\n lock.acquire()\n q1.put(cs_f)\n q2.put(clock_time)\n lock.release()\n # print(\"image processing time : %s \\n\" %(clock_time))\n # return cs_f\n\ndef update_id():\n\n # while True:\n # start = timer()\n # lock.acquire()\n global blobs\n # blobs = q_blobs.get()\n blobs = []\n global blobs_buffer\n global old_id\n # print(\"start update id\")\n for i in range(buffer_size):\n old_id[i] = []\n for j in range(len(blobs_buffer[i])):\n old_id[i].append(blobs_buffer[i][j].id)\n # lock.release()\n # q_blobs.put(blobs)\n # print(\"update id time : %s \\n\" %(timer()-start))\n\n # return blobs\n\n# def blob_tracking(cs,blobs,state):\ndef blob_tracking(q1,q2,lock):\n\n while True:\n # print(\"Process 2 start \\n\")\n start = time.clock()\n global blobs\n # blobs = update_id(blobs)\n # blobs = q_blobs.get()\n\n global blobs_buffer\n\n # blobs = update_id(blobs)\n update_id()\n\n # lock.acquire()\n cs = q1.get()\n # lock.release()\n # print(\"start blob tracking\")\n\n # lock.acquire()\n for i in range(len(cs)):\n blob = BlobAnalysis(cs[i])\n blob = blobs_track(blobs,blob,i,0)\n blobs.append(blob)\n\n # print(blobs)\n\n for i in range(buffer_size):\n if i == 0:\n blobs_buffer[i] = blobs\n else:\n blobs_buffer[i] = blobs_buffer[i-1]\n\n # lock.release()\n\n # state = keymap_new(blobs,state)\n img_pro_time = q2.get()\n get_gesture(start,img_pro_time)\n\n # blob_time = time.time()-start\n # img_pro_time = q2.get()\n # print(\"blob tracking time : %s \\n\" %(blob_time))\n\n # full_process_time = img_pro_time+blob_time\n\n # print(\"full process time : %s \\n\" %(full_process_time))\n\n # print(\"write row\")\n # file = open(\"time pipeline thread.csv\",\"a\")\n # file.write(str(datetime.datetime.now()) + \",\" + str(full_process_time) + \"\\n\")\n # file.close()\n\n # return blobs,state\n\ndef show_page():\n # os.system(\"sleep 5\")\n os.system(\"chromium-browser -no-sandbox --app=file:///home/pi/Desktop/kinect_hand_detection_and_tracking_2/src/index.html\")\n # webbrowser.open('file://' + os.path.realpath(\"/home/scarletdragon/Desktop/kinect_hand_detection_and_tracking_2/src/index.html\"))\n # keyboard.press(Key.f11)\n # keyboard.release(Key.f11)\n\n# def keymap_new(blobs,state):\n\ndef key_mapping(gesture):\n\n if gesture == \"swipe up\":\n mouse.scroll(0 , -5)\n if gesture == \"swipe down\":\n mouse.scroll(0 , 5)\n if gesture == \"swipe left\":\n keyboard.press(Key.right)\n keyboard.release(Key.right)\n if gesture == \"swipe right\":\n keyboard.press(Key.left)\n keyboard.release(Key.left)\n\n if gesture == \"grab down\":\n keyboard.press('x')\n keyboard.release('x')\n # print(\"Grab up\")\n if gesture == \"grab left\" :\n keyboard.press('z')\n keyboard.release('z')\n # print(\"Grab left\")\n if gesture == \"grab right\" :\n keyboard.press('c')\n keyboard.release('c') \n # print(\"Grab right\")\n\ndef get_gesture(start,img_pro_time):\n\n global fps\n global last_gesture\n global t0,tt0,tt1,used_time,c,start_time\n global state\n global undefined_count\n global state_gesture\n\n if state == 0:\n t0 = time.time()\n tt0 = t0\n tt1 = t0\n used_time = 0\n c = 0\n start_time = timer()\n\n undefined_count = 0 # use to count undefined action\n state_gesture = \"\" # use to save state of gesture to check\n\n # print(\"start state\")\n state = 1\n\n # t0 = time.clock()\n gesture = check_gesture(fps)\n t1 = time.time()\n tc0 = time.clock()\n fps = 1/(t1-t0)\n print(\"FPS : %s\" %fps)\n # print(\"Gesture : %s\" %gesture)\n # lock.release()\n\n if gesture == \"undefined action\":\n\n end_time = timer()\n diff_time = end_time - start_time\n # print(\"Different time : %s\" %diff_time) \n\n if diff_time >= 1:\n state_gesture = \"undefined action\"\n start_time = end_time\n\n last_gesture = \"undefined action\"\n\n # if undefined_count == 7: # if found 7 undefined actions\n # undefined_count = 0 # reset count\n # state_gesture = \"undefined action\" # set state to undefined\n\n # else:\n # undefined_count += 1 # update count\n\n last_gesture = \"undefined action\" # set last gesture to undefined\n \n elif last_gesture != gesture: # if latest gesture is not same as last gesture\n\n if last_gesture == \"undefined action\": # if last gesture is undefined\n\n if state_gesture != gesture: # check previous state before undefined if same as latest not do anything\n key_mapping(gesture) # if not map gesture\n \n else:\n key_mapping(gesture) # if last gesture is other gesture then map gesture\n\n last_gesture = gesture # set last gesture with latest gesture\n state_gesture = gesture # set state with latest gesture\n # undefined_count = 0 # reset count\n\n end_time = timer()\n diff_time = end_time - start_time\n # print(\"Action time : %s\" %diff_time)\n start_time = end_time\n \n tt1 = time.time()\n # tc1 = time.clock()\n # used_time = used_time + (tc1 - tc0)\n # tc0 = tc1\n t0 = t1\n # c = c + 1\n\n blob_time = time.clock()-start\n # print(\"blob tracking time : %s \\n\" %(blob_time))\n\n full_process_time = img_pro_time+blob_time\n print(\"full process time : %s \\n\" %(full_process_time))\n\n if(tt1>tt0+1):\n # print(\"write row\")\n # percent = used_time*100/(tt1 - tt0)\n # fps_a = c / used_time\n file = open(\"clock time pipeline rpi.csv\",\"a\")\n file.write(str(datetime.datetime.now()) + \",\" + str(gesture) + \",\" + str(last_gesture) + \",\" + str(state_gesture) + \",\" + str(full_process_time) + \",\" + str(tt1 - tt0) + \"\\n\")\n # file.write(str(datetime.datetime.now()) + \",\" + str(fps_a) + \",\" + str(percent) + \",\" + str(used_time) + \",\" + str(tt1 - tt0) + \"\\n\")\n file.close()\n # used_time = 0\n # c = 0\n tt0 = t0\n\n # lock.release()\n # print(\"keymap time : %s \\n\" %(timer()-start))\n\n # return state\n\nxsize,ysize = 280,210\n# xsize,ysize = 640,480\n\nif __name__ == \"__main__\":\n\n lock = Lock()\n\n q1 = Queue()\n q2 = Queue()\n\n show_page()\n\n p0 = Process(target=get_contours_new,args=(q1,q2,lock,))\n p1 = Process(target=blob_tracking,args=(q1,q2,lock,))\n\n p0.start()\n p1.start()\n\n p0.join()\n p1.join()\n\n","sub_path":"keymap_modify_new.py","file_name":"keymap_modify_new.py","file_ext":"py","file_size_in_byte":16894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"538838356","text":"import time\nfrom typing import List, Tuple, Dict\nfrom collections import Counter\nfrom functools import partial\n\nimport networkx as nx\n\nfrom .utils import graph_factory\n\n\ndef celery_test(seconds: int = 1): # pragma: no cover\n start = time.time()\n print(\"entering long task..\")\n time.sleep(seconds)\n end = time.time()\n\n secs = end - start\n\n # raise ValueError('test fail')\n # make this work right\n\n return {\"message\": f\"task took {secs:.1f} seconds.\"}\n\n\ndef find_cycle(G: nx.Graph, **kwargs) -> List:\n \"\"\"Wraps networkx.find_cycle to return empty list\n if no cycle is found.\n \"\"\"\n try:\n return list(nx.find_cycle(G, **kwargs))\n\n except nx.exception.NetworkXNoCycle:\n return []\n\n\ndef validate_network(G: nx.Graph, **kwargs) -> Tuple[List, List, List, List]:\n \"\"\"Checks if there is a cycle, and prints a helpful\n message if there is.\n \"\"\"\n _partial_sort = partial(sorted, key=lambda x: str(x))\n\n # force cycles to be ordered so that we can test against them\n simplecycles = list(map(_partial_sort, nx.simple_cycles(G)))\n\n findcycles = find_cycle(G, **kwargs)\n\n multiple_outs = [(k, v) for k, v in G.out_degree() if v > 1]\n\n duplicate_edges = []\n if len(G.edges()) != len(set(G.edges())):\n duplicate_edges = [k for k, v in Counter(G.edges()).items() if v > 1]\n\n return simplecycles, findcycles, multiple_outs, duplicate_edges\n\n\ndef is_valid(G):\n return all([len(_) == 0 for _ in validate_network(G)])\n\n\ndef validate_network_from_dict(graph: Dict) -> Dict:\n G = graph_factory(graph)\n res = validate_network(G)\n\n if all([len(_) == 0 for _ in res]):\n return {\"status\": \"valid\"}\n\n else:\n simplecycles, findcycles, multiple_outs, duplicate_edges = res\n return {\n \"status\": \"invalid\",\n \"node_cycles\": simplecycles,\n \"edge_cycles\": findcycles,\n \"multiple_out_edges\": multiple_outs,\n \"duplicate_edges\": duplicate_edges,\n }\n","sub_path":"nereid/nereid/network/network_validate.py","file_name":"network_validate.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"495207968","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n 脚本名:\nCreated on 2019--\n@author:David Yisun\n@group:data\n\"\"\"\nfrom optparse import OptionParser\n\n# 外部参数\ndef get_para():\n usage = 'Volvo projects initialize'\n parser = OptionParser(usage=usage)\n parser.add_option('--a', action='store_false', dest='build_mysql', default=True)\n parser.add_option('--proxy_ip_init', action='store_true', dest='proxy_ip_init')\n parser.add_option('--vehicle_types_init', action='store_true', dest='vehicle_types_init')\n parser.add_option('--source', action='store', dest='source', type='string', default='*', help='the name of network station that will be spidered, sep by \",\" ')\n parser.add_option('--vehicle_types', action='store', dest='vehicle_types', type='string', default='*', help='the vehicle_types that will be spidered, sep by \",\" ')\n parser.add_option('--part', action='store', dest='part', type='string', default='article,koubei,comment', help='the part of content that will be spidered, sep by \",\" ')\n\n option, args = parser.parse_args()\n res = {'build_mysql': option.build_mysql,\n 'proxy_ip_init': option.proxy_ip_init,\n 'vehicle_types_init': option.vehicle_types_init,\n 'source': option.source.split(',') if option.source != \"*\" else option.source,\n 'vehicle_types': option.vehicle_types.split(',') if option.vehicle_types != \"*\" else option.vehicle_types,\n 'part': option.part.split(',')}\n return res\n\npara = get_para()\n\nprint(para)","sub_path":"Volvo_demo/demo/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"625403149","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\n@app.route('/bmi//')\ndef bmi(weight, height):\n result = weight/((height/100)**2)\n return render_template('bmi.html',bmi=result)\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Fundamentals/WebModule/Web2/hw_solution/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"299431120","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@Title : 测试预测\n@File : pred_gen.py \n@Author : vincent\n@Time : 2020/5/15 10:41 上午\n@Version : 1.0 \n'''\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nfrom model.config import cfg\nfrom model.test import im_detect\nfrom model.nms_wrapper import nms\n\nfrom utils.timer import Timer\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, cv2\nimport argparse\nimport glob\n\nfrom nets.vgg16 import vgg16\nfrom nets.resnet_v1 import resnetv1\nimport image_utils\n\nCLASSES = ('__background__',\n 'table')\n\n\ndef get_files(data_path):\n \"\"\"\n 获取目录下以及子目录下的图片\n :param data_path:\n :return:\n \"\"\"\n files = []\n exts = ['jpg', 'png', 'jpeg', 'JPG','bmp']\n for ext in exts:\n # glob.glob 得到所有文件名\n # 一层 2层子目录都取出来\n files.extend(glob.glob(os.path.join(data_path, '*.{}'.format(ext))))\n files.extend(glob.glob(os.path.join(data_path, '*', '*.{}'.format(ext))))\n return files\n\n\ndef vis_detections(im, class_name, dets, thresh=0.5):\n \"\"\"Draw detected bounding boxes.\"\"\"\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n\n\ndef draw(im,box):\n cv2.polylines(im, [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(255, 255, 0),\n thickness=2)\n\ndef demo(sess, net, im,im_name):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n base_name = os.path.basename(im_name)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n # TODO 不管什么图都是给出300个结果? 然后根据概率筛选?去掉重叠?\n #\n print(\"图片预测结果:\",boxes.shape,boxes[0],scores[0])\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n CONF_THRESH = 0.8 #\n NMS_THRESH = 0.3 #\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n #TODO 4-8? 想只筛选出这个类别的box,但是逻辑没看懂\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n # boxes 应该是对应的每个分类都有两个点 然后按分类顺序往后排\n # shape (300,4) 这里是取了后四个\n print(\"cls_boxes:\",cls_boxes.shape,cls_boxes[0])\n cls_scores = scores[:, cls_ind]\n # 找出对应分类的score?\n print(\"cls_scores:\",cls_scores.shape,cls_scores[0])\n\n # 合成box和框(300,5)\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n\n #TODO NMS筛选之后剩下的 IOU超过多少的过滤掉,返回的是?应该是对应的序号(0-300)\n keep = nms(dets, NMS_THRESH)\n print(\"keep:\",keep)\n dets = dets[keep, :]\n # print(\"dets:\",dets)\n # print(\"cls:\",cls)\n # IOU过滤 后的框+概率\n # TODO write output\n for det in dets:\n if det[-1] > 0.8:\n box = det[:-1]\n new_box=[box[0],box[1],box[2],box[1],box[2],box[3],box[0],box[3]]\n print(\"画框:\",new_box)\n new_box = np.array(new_box)\n draw(im,new_box)\n cv2.imwrite(\"data/pred/output1/\"+str(cls)+base_name ,im)\n # vis_detections(im, cls, dets, thresh=CONF_THRESH)\n\n\nif __name__ == '__main__':\n cfg.TEST.HAS_RPN = True # Use RPN for proposals\n\n # model path\n tfmodel = \"output/res101/gen_train/default/res101_faster_rcnn_iter_70000.ckpt\"\n\n if not os.path.isfile(tfmodel + '.meta'):\n raise IOError(('{:s} not found.\\nDid you download the proper networks from '\n 'our server and place them properly?').format(tfmodel + '.meta'))\n\n # set config\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth=True\n\n # init session\n sess = tf.Session(config=tfconfig)\n\n net = resnetv1(num_layers=101)\n\n # # load model\n # net.create_architecture(\"TEST\", imdb.num_classes, tag='default',\n # anchor_scales=cfg.ANCHOR_SCALES,\n # anchor_ratios=cfg.ANCHOR_RATIOS)\n\n net.create_architecture(\"TEST\", 2,\n tag='default', anchor_scales=[8, 16, 32])\n saver = tf.train.Saver()\n saver.restore(sess, tfmodel)\n\n print('Loaded network {:s}'.format(tfmodel))\n\n input_path = \"data/pred/input\"\n #TODO\n im_names = get_files(input_path)\n print(\"加载的图片:\",im_names)\n for im_name in im_names:\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print('Demo for {}'.format(im_name))\n # Load the demo image\n im = cv2.imread(im_name)\n im1, im2 = image_utils.split_two(im)\n im_name = os.path.basename(im_name)\n demo(sess, net, im1,\"1_\"+im_name)\n demo(sess, net, im2,\"2_\"+im_name)\n","sub_path":"tools/pred_gen.py","file_name":"pred_gen.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"15847313","text":"## begin license ##\n#\n# \"Meresco Oai Common\" are utils to support \"Meresco Oai\".\n#\n# Copyright (C) 2007-2008 SURF Foundation. http://www.surf.nl\n# Copyright (C) 2007-2010 Seek You Too (CQ2) http://www.cq2.nl\n# Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl\n# Copyright (C) 2009 Delft University of Technology http://www.tudelft.nl\n# Copyright (C) 2009 Tilburg University http://www.uvt.nl\n# Copyright (C) 2012, 2015, 2018 Seecr (Seek You Too B.V.) http://seecr.nl\n# Copyright (C) 2012 Stichting Bibliotheek.nl (BNL) http://www.bibliotheek.nl\n# Copyright (C) 2015 Koninklijke Bibliotheek (KB) http://www.kb.nl\n#\n# This file is part of \"Meresco Oai Common\"\n#\n# \"Meresco Oai Common\" is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# \"Meresco Oai Common\" is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with \"Meresco Oai Common\"; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n#\n## end license ##\n\nfrom ._partition import Partition\n\ndef resumptionTokenFromString(s):\n try:\n return ResumptionToken.fromString(s)\n except ResumptionTokenException:\n return None\n\nclass ResumptionTokenException(Exception):\n pass\n\nclass ResumptionToken(object):\n SHORT = {\n 'm': 'metadataPrefix',\n 'c': 'continueAfter',\n 'f': 'from_',\n 'u': 'until',\n 's': 'set_',\n }\n ALL_SHORT = dict(p='partition', **SHORT)\n\n def __init__(self, metadataPrefix='', continueAfter='0', from_='', until='', set_='', partition=None):\n self.metadataPrefix = metadataPrefix\n self.continueAfter = continueAfter\n self.from_ = from_ or '' #blank out \"None\"\n self.until = until or ''\n self.set_ = set_ or ''\n self.partition = Partition.create(partition)\n\n def __str__(self):\n return '|'.join(\"%s%s\" % (key, value) for key, value in\n ((key, getattr(self, attr)) for key, attr in\n self.ALL_SHORT.items())\n if value is not None)\n\n def __repr__(self):\n return repr(str(self))\n\n def __eq__(self, other):\n return \\\n ResumptionToken == other.__class__ and \\\n self.metadataPrefix == other.metadataPrefix and \\\n self.continueAfter == other.continueAfter and \\\n self.from_ == other.from_ and \\\n self.until == other.until and \\\n self.set_ == other.set_ and \\\n self.partition == other.partition\n\n @classmethod\n def fromString(cls, s):\n resumptDict = dict(((part[0], part[1:]) for part in s.split('|') if part))\n if not set(cls.SHORT.keys()).issubset(set(resumptDict.keys())):\n raise ResumptionTokenException()\n return cls(**dict((cls.ALL_SHORT[k],v) for k,v in resumptDict.items()))\n\n__all__ = ['ResumptionToken', 'ResumptionTokenException', 'resumptionTokenFromString']\n","sub_path":"meresco/oaicommon/_resumptiontoken.py","file_name":"_resumptiontoken.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"20708737","text":"import atexit\r\nimport os\r\nimport pty\r\nimport select\r\nimport socket\r\nfrom concurrent.futures import ProcessPoolExecutor\r\nfrom contextlib import closing\r\nfrom pathlib import Path\r\n\r\nfrom madbg import client\r\nfrom madbg.consts import STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO\r\nfrom madbg.tty_utils import PTY\r\n\r\nJOIN_TIMEOUT = 5\r\nCONNECT_TIMEOUT = 5\r\nSCRIPTS_PATH = Path(__file__).parent / 'scripts'\r\n\r\n\r\ndef run_in_process(func, *args, **kwargs):\r\n return ProcessPoolExecutor(1).submit(func, *args, **kwargs)\r\n\r\n\r\ndef _run_script(script, start_with_ctty, args, kwargs):\r\n \"\"\"\r\n Meant to be called inside a python subprocess, do NOT call directly.\r\n \"\"\"\r\n enter_pty(start_with_ctty)\r\n result = script(*args, **kwargs)\r\n # Python-spawned subprocesses do not call exit funcs - https://stackoverflow.com/q/34506638/2907819\r\n atexit._run_exitfuncs()\r\n return result\r\n\r\n\r\ndef run_script_in_process(script, start_with_ctty, *args, **kwargs):\r\n return run_in_process(_run_script, script, start_with_ctty, args, kwargs)\r\n\r\n\r\ndef find_free_port() -> int:\r\n \"\"\" A suggested way of finding a free port on the local machine. Prone to race conditions. \"\"\"\r\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\r\n s.bind(('', 0))\r\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n return s.getsockname()[1]\r\n\r\n\r\ndef enter_pty(attach_as_ctty, connect_stdio_to_pty=True):\r\n \"\"\"\r\n To be used in a subprocess that wants to be run inside a pty.\r\n Enters a new session, opens a new pty and sets the pty to be its controlling tty.\r\n If connect_output_to_pty is True, the process's stdio will be redirected to the pty's\r\n slave interface.\r\n\r\n :return: The master fd for the pty.\r\n \"\"\"\r\n os.setsid()\r\n master_fd, slave_fd = pty.openpty()\r\n if attach_as_ctty:\r\n os.close(os.open(os.ttyname(slave_fd), os.O_RDWR)) # Set the PTY to be our CTTY\r\n if connect_stdio_to_pty:\r\n for fd_to_override in (STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO):\r\n os.dup2(slave_fd, fd_to_override)\r\n return master_fd, slave_fd\r\n\r\n\r\ndef run_client(port: int, debugger_input: bytes):\r\n \"\"\" Run client process and return client's tty output \"\"\"\r\n master_fd, slave_fd = enter_pty(True, connect_stdio_to_pty=False)\r\n os.write(master_fd, debugger_input)\r\n client.connect_to_debugger(port=port, timeout=CONNECT_TIMEOUT, in_fd=slave_fd, out_fd=slave_fd)\r\n data = b''\r\n while select.select([master_fd], [], [], 0)[0]:\r\n data += os.read(master_fd, 4096)\r\n PTY(master_fd, slave_fd).close()\r\n return data\r\n","sub_path":"tests/system/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"66461898","text":"\nimport acm\nfrom DealPackageDevKit import DealPackageDefinition, Date, Settings, Text\nfrom inspect import cleandoc\n\n@Settings(GraphApplicable=False, \n SheetApplicable=False)\nclass AttributeIntroductionDefinition(DealPackageDefinition):\n \"\"\"This Example introduces a date attribute, and meta data\"\"\"\n \n startDate = Date( defaultValue='0d',\n label='Start Date',\n validate='@StartDateValidate',\n transform='@PeriodToDateTransform',\n toolTip='@GenericToolTip',\n onChanged='@Print1|Print2' )\n\n doc = Text( defaultValue=cleandoc(__doc__),\n editable=False,\n height=80) \n\n # ####################### #\n # Interface Overrides #\n # ####################### #\n \n def CustomPanes(self):\n print ('CustomPanes')\n return [ \n {'General' : \"\"\"\n startDate;\n fill;\n hbox{DESCRIPTION;\n doc;\n );\t\n \"\"\"\n }\n ] \n \n def IsValid(self, exceptionAccumulator, aspect):\n exceptionAccumulator('This example is used for demonstration and can not be saved.')\n\n # ####################### #\n # Attribute Callbacks #\n # ####################### #\n\n def Print1(self, attributeName, old, new, userInputAttributeName):\n print ('This is printed from \"Print1\"')\n print (' Attribute name:', attributeName)\n print (' Old value:', old)\n print (' New value:', new)\n print (' User input attribute name:', userInputAttributeName)\n\n def Print2(self, attributeName, old, new, userInputAttributeName):\n print ('This is printed from \"Print2\"')\n\n def GenericToolTip(self, attributeName):\n return 'This parameter has name ' + attributeName\n\n def PeriodToDateTransform(self, attributeName, newDate):\n date = newDate\n if acm.Time().PeriodSymbolToDate(newDate):\n date = acm.Time().PeriodSymbolToDate(newDate)\n print ('TransformStartPeriodToDate', newDate, date)\n return date\n \n def StartDateValidate(self, attributeName, value):\n # raise exception here to prevent value from beeing applied to attribute\n print ('ValidateStartDate')\n","sub_path":"Extensions/Deal Package Examples/FPythonCode/AttributeIntroduction_DPE.py","file_name":"AttributeIntroduction_DPE.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"403436296","text":"import bpy\nfrom ..utils import get_preferences\n\n\ndef boomsmash_settings(self, context):\n prefs = get_preferences()\n # userpref = context.user_preferences\n # system = userpref.system\n scene = context.scene\n\n layout = self.layout\n row = layout.row()\n\n layout.row()\n box = layout.box()\n box.label(text=\"DDA Playblast Settings :\")\n\n row = box.row()\n row.label(text=\"Animator:\")\n row.prop(scene.tk_anim, \"boomsmash_animator_name\", text=\"\")\n row = box.row()\n col = box.column()\n row = col.row()\n row.prop(prefs, \"use_autoplay\")\n row.prop(prefs, \"use_custom_path\")\n row = col.row()\n row.prop(prefs, \"bypass_staging\")\n\n col = box.column()\n row = col.row()\n row.label(text=\"Custom Playblast Path:\")\n row.active = prefs.use_custom_path\n row.prop(prefs, \"custom_path\", text=\"\")\n col = box.column()\n col.operator(\"wm.save_userpref\")\n\n\ndef boomsmash_stage_settings(self, context):\n scn = bpy.context.scene\n layout = self.layout\n row = layout.row(align=True)\n # scene = bpy.context.scene\n # objects = scene.objects\n # cam_obj = [o for o in objects if o.type == \"CAMERA\"]\n\n # if cam_obj:\n # cam_obj = cam_obj[0]\n\n # view3d = context.space_data.region_3d\n # prefs = get_preferences()\n row.alert = True if scn.tk_anim.boomsmash_tk_select == \"tk4\" else False\n row.prop(scn.tk_anim, \"boomsmash_tk_select\", text=\"\")\n\n row.operator(\n \"tk_anim.render_boomsmash_stage\",\n text=\"Playblast\",\n icon=\"RENDER_ANIMATION\",\n )\n row.operator(\n \"render.play_rendered_anim\", text=\"PlayBlast View\", icon=\"PLAY\"\n )\n # row.prop(prefs, \"use_transparent\")\n","sub_path":"tk_anim/tk_anim_settings.py","file_name":"tk_anim_settings.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"274987863","text":"import parser\nparser = parser.parser()\n\ndef test_partition_raw01():\n \"\"\"Tests full and correctly formed message\"\"\"\n line = parser.partition_raw(\":name!user@host command middle :rest of the message\")\n assert line['name'] == 'name'\n assert line['user'] == 'user'\n assert line['host'] == 'host'\n assert line['command'] == 'command'\n assert line['middle'] == 'middle'\n assert line['trailing'] == 'rest of the message'\n\ndef test_partition_raw02():\n \"\"\"Tests ircnet style server message\"\"\"\n line = parser.partition_raw(\":irc.server.foo 042 username some attributes :human readable message\")\n assert line['name'] == 'irc.server.foo'\n assert line['user'] == None\n assert line['host'] == None\n assert line['command'] == '042'\n assert line['middle'] == 'username'\n assert line['trailing'] == 'some attributes :human readable message'\n\ndef test_admin_input_incorrect_admin():\n \"\"\"Tests admin with incorrect name\"\"\"\n line = parser.partition_raw(\":foobar!foo@bar PRIVMSG SLPyBot :mummo123 foo\")\n response = parser.admin_input(line)\n assert response == [('send_data', 'PRIVMSG foobar :This operation is not allowed by you')]\n\ndef test_admin_input_no_command():\n \"\"\"Tests admin with no command\"\"\"\n line = parser.partition_raw(\":StealthyLoner!foo@bar PRIVMSG SLPyBot :mummo123\")\n response = parser.admin_input(line)\n assert response == [('send_data', 'PRIVMSG StealthyLoner :Please provide a command')] \n\ndef test_admin_input_incorrect_password():\n for n in reversed(xrange(5)):\n yield check_admin_input, n\n \ndef check_admin_input(n):\n \"\"\"Tests admin with incorrect password\"\"\"\n line = parser.partition_raw(\":StealthyLoner!foo@bar PRIVMSG SLPyBot :foo foo\")\n response = parser.admin_input(line)\n if n > 0:\n assert response[0] == ('send_data', 'PRIVMSG StealthyLoner :You have been configured as admin, but you did not provide the correct password')\n assert response[1] == ('send_data', 'PRIVMSG StealthyLoner :You have ' + str(n) + ' tries left.')\n else:\n assert response[0] == ('send_data', 'PRIVMSG StealthyLoner :Sorry :(')\n\ndef test_own_input_join():\n line = parser.partition_raw(\":SLPyBot!foo@bar JOIN :#test_channel\")\n response = parser.own_input(line)\n assert response == [('add_ircchannel', '#test_channel')]\n \ndef test_own_input_part():\n line = parser.partition_raw(\":SLPyBot!foo@bar PART #test_channel :part message\")\n response = parser.own_input(line)\n assert response == [('remove_ircchannel', '#test_channel')]\n\ndef test_own_input_blank():\n line = parser.partition_raw(\":SLPyBot!foo@bar PRIVMSG #channel :foobar\")\n response = parser.own_input(line)\n assert response == [None]\n\ndef test_server_input():\n line = parser.partition_raw(\":irc.foobar.net 001 foobar :Welcome\")\n response = parser.server_input(line)\n assert response == [('set_registered', True)]\n \n line = parser.partition_raw(\":irc.foobar.net 403 foobar :No such channel\")\n response = parser.server_input(line)\n assert response == [None]\n \n line = parser.partition_raw(\":irc.foobar.net 405 foobar :Too many channels\")\n response = parser.server_input(line)\n assert response == [None]\n \n line = parser.partition_raw(\":irc.foobar.net 471 foobar :Channel limit reached\")\n response = parser.server_input(line)\n assert response == [None]\n \n line = parser.partition_raw(\":irc.foobar.net 473 foobar :Channel is invite only\")\n response = parser.server_input(line)\n assert response == [None]\n \n line = parser.partition_raw(\":irc.foobar.net 474 foobar :You are banned from the channel\")\n response = parser.server_input(line)\n assert response == [None]\n \n line = parser.partition_raw(\":irc.foobar.net 432 foobar :foobar\")\n response = parser.server_input(line)\n assert response[0][0] == 'set_nick'\n\n line = parser.partition_raw(\":irc.foobar.net 433 foobar :foobar\")\n response = parser.server_input(line)\n assert response[0][0] == 'set_nick'\n \n line = parser.partition_raw(\":irc.foobar.net 421 foobar :Unknown command\")\n response = parser.server_input(line)\n assert response == [None]\n\n line = parser.partition_raw(\":irc.foobar.net 000 foobar :Numerical not in list\")\n response = parser.server_input(line)\n assert response == [None]\n\ndef test_notice_input_no_handler():\n \"\"\"Tests notice with no handlers\"\"\"\n line = parser.partition_raw(\":foo!foo@bar NOTICE foobar :foo\")\n response = parser.notice_input(line)\n assert response == []\n\ndef test_channel_input_no_handler():\n \"\"\"Tests channel input with no handlers\"\"\"\n line = parser.partition_raw(\":foo!foo@bar PRIVMSG foobar :foo\")\n response = parser.channel_input(line)\n assert response == []\n\ndef test_ctcp_input():\n \"\"\"Tests CTCP input with no handlers\"\"\"\n line = parser.partition_raw(\":foo!foo@bar PRIVMSG foobar :\u0001CTCP\u0001\")\n response = parser.ctcp_input(line)\n assert response == []\n\ndef test_input_line():\n # Server input\n response = parser.input_line(':irc.cs.hut.fi 433 foobar :foobar')\n assert response[0][0] == 'set_nick'\n # CTCP input\n response = parser.input_line(':foo!foo@bar PRIVMSG foobar :\u0001CTCP\u0001')\n assert response == []\n # Own input\n response = parser.input_line(':SLPyBot!foo@bar JOIN :#foobar')\n assert response == [('add_ircchannel', '#foobar')]\n # Admin input\n response = parser.input_line(':foobar!foo@bar PRIVMSG SLPyBot :foo foo foo')\n assert response == [('send_data', 'PRIVMSG foobar :This operation is not allowed by you')]\n # Notice input\n response = parser.input_line(':foo!foo@bar NOTICE SLPyBot :foobar')\n assert response == []\n # Channel input\n response = parser.input_line(':foo!foo@bar PRIVMSG #foobar :foo')\n assert response == []\n # Something else\n response = parser.input_line(':foo!foo@bar foobar foobar :foo')\n assert response == []\n","sub_path":"test_Parser.py","file_name":"test_Parser.py","file_ext":"py","file_size_in_byte":5922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"49818129","text":"import logging\n\nfrom aiohttp import web\nfrom aiohttp_middlewares.error import error_context\n\nfrom rororo import openapi_context, OperationTableDef\nfrom .data import GITHUB_REPOSITORIES\nfrom .decorators import login_required\nfrom .exceptions import ObjectDoesNotExist\n\n\nlogger = logging.getLogger(__name__)\noperations = OperationTableDef()\n\n\nasync def error(request: web.Request) -> web.Response:\n with error_context(request) as context:\n logger.error(context.message, exc_info=True)\n return web.json_response(context.data, status=context.status)\n\n\n@operations.register\n@login_required\nasync def list_repositories(request: web.Request) -> web.Response:\n with openapi_context(request) as context:\n username = context.parameters.header[\"X-GitHub-Username\"]\n return web.json_response(\n list((GITHUB_REPOSITORIES.get(username) or {}).values())\n )\n\n\n@operations.register\n@login_required\nasync def retrieve_repository(request: web.Request) -> web.Response:\n with openapi_context(request) as context:\n owner = context.parameters.path[\"owner\"]\n repository = (GITHUB_REPOSITORIES.get(owner) or {}).get(\n context.parameters.path[\"name\"]\n )\n\n if not repository:\n raise ObjectDoesNotExist(\"Repository\")\n\n return web.json_response(repository)\n","sub_path":"examples/hobotnica/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"329006899","text":"\"\"\"\nAuthors:\nRandy Heiland (heiland@iu.edu)\nAdam Morrow, Grant Waldrow, Drew Willis, Kim Crevecoeur\nDr. Paul Macklin (macklinp@iu.edu)\n\n--- Versions ---\n0.1 - initial version\n\"\"\"\n\nimport sys\n# import xml.etree.ElementTree as ET # https://docs.python.org/2/library/xml.etree.elementtree.html\nfrom PyQt5 import QtCore, QtGui\nfrom PyQt5.QtWidgets import QFrame,QApplication,QWidget,QTabWidget,QFormLayout,QLineEdit, QHBoxLayout,QVBoxLayout,QRadioButton,QLabel,QCheckBox,QComboBox,QScrollArea\n\nclass QHLine(QFrame):\n def __init__(self):\n super(QHLine, self).__init__()\n self.setFrameShape(QFrame.HLine)\n self.setFrameShadow(QFrame.Sunken)\n\n\nclass Config(QWidget):\n def __init__(self):\n super().__init__()\n # global self.config_params\n\n self.xml_root = None\n\n # self.tab = QWidget()\n # self.tabs.resize(200,5)\n \n #-------------------------------------------\n label_width = 110\n domain_value_width = 100\n value_width = 60\n label_height = 20\n units_width = 70\n\n self.scroll = QScrollArea() # might contain centralWidget\n\n self.config_params = QWidget()\n self.vbox = QVBoxLayout()\n self.vbox.addStretch(0)\n\n\n #============ Domain ================================\n label = QLabel(\"Domain (micron)\")\n label.setFixedHeight(label_height)\n label.setStyleSheet(\"background-color: orange\")\n label.setAlignment(QtCore.Qt.AlignCenter)\n self.vbox.addWidget(label)\n\n hbox = QHBoxLayout()\n\n label = QLabel(\"Xmin\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.xmin = QLineEdit()\n self.xmin.setFixedWidth(domain_value_width)\n self.xmin.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.xmin)\n\n label = QLabel(\"Xmax\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.xmax = QLineEdit()\n self.xmax.setFixedWidth(domain_value_width)\n self.xmax.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.xmax)\n\n label = QLabel(\"dx\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.xdel = QLineEdit()\n self.xdel.setFixedWidth(value_width)\n self.xdel.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.xdel)\n\n self.vbox.addLayout(hbox)\n #----------\n hbox = QHBoxLayout()\n label = QLabel(\"Ymin\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.ymin = QLineEdit()\n self.ymin.setFixedWidth(domain_value_width)\n self.ymin.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.ymin)\n\n label = QLabel(\"Ymax\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.ymax = QLineEdit()\n self.ymax.setFixedWidth(domain_value_width)\n self.ymax.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.ymax)\n\n label = QLabel(\"dy\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.ydel = QLineEdit()\n self.ydel.setFixedWidth(value_width)\n self.ydel.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.ydel)\n\n self.vbox.addLayout(hbox)\n #----------\n hbox = QHBoxLayout()\n label = QLabel(\"Zmin\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.zmin = QLineEdit()\n self.zmin.setFixedWidth(domain_value_width)\n self.zmin.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.zmin)\n\n label = QLabel(\"Zmax\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.zmax = QLineEdit()\n self.zmax.setFixedWidth(domain_value_width)\n self.zmax.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.zmax)\n\n label = QLabel(\"dz\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n self.zdel = QLineEdit()\n self.zdel.setFixedWidth(value_width)\n self.zdel.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.zdel)\n\n self.vbox.addLayout(hbox)\n #----------\n hbox = QHBoxLayout()\n self.virtual_walls = QCheckBox(\"Virtual walls\")\n # self.motility_enabled.setAlignment(QtCore.Qt.AlignRight)\n # label.setFixedWidth(label_width)\n hbox.addWidget(self.virtual_walls)\n self.vbox.addLayout(hbox)\n\n # self.vbox.addWidget(QHLine())\n\n #============ Misc ================================\n label = QLabel(\"Misc runtime parameters\")\n label.setFixedHeight(label_height)\n label.setStyleSheet(\"background-color: orange\")\n label.setAlignment(QtCore.Qt.AlignCenter)\n self.vbox.addWidget(label)\n\n hbox = QHBoxLayout()\n # hbox.setFixedHeight(label_width)\n\n label = QLabel(\"Max Time\")\n # label_width = 210\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n\n self.max_time = QLineEdit()\n # self.max_time.setFixedWidth(200)\n self.max_time.setFixedWidth(domain_value_width)\n self.max_time.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.max_time)\n\n label = QLabel(\"min\")\n label.setFixedWidth(200)\n label.setAlignment(QtCore.Qt.AlignLeft)\n hbox.addWidget(label)\n\n label = QLabel(\" \") # weird, but nicer layout\n label.setFixedWidth(200)\n hbox.addWidget(label)\n\n self.vbox.addLayout(hbox)\n #----------\n hbox = QHBoxLayout()\n\n label = QLabel(\"# threads\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n\n self.num_threads = QLineEdit()\n # self.num_threads.setFixedWidth(value_width)\n self.num_threads.setFixedWidth(domain_value_width)\n self.num_threads.setValidator(QtGui.QIntValidator())\n hbox.addWidget(self.num_threads)\n\n label = QLabel(\" \") # weird, but nicer layout\n label.setFixedWidth(200) # 70?\n hbox.addWidget(label)\n\n label = QLabel(\" \")\n label.setFixedWidth(200)\n hbox.addWidget(label)\n\n self.vbox.addLayout(hbox)\n #----------\n hbox = QHBoxLayout()\n\n label = QLabel(\"output folder\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n\n self.folder = QLineEdit()\n # self.num_threads.setFixedWidth(value_width)\n self.folder.setFixedWidth(domain_value_width)\n # self.folder.setValidator(QtGui.QTex())\n hbox.addWidget(self.folder)\n\n label = QLabel(\" \") # weird, but nicer layout\n label.setFixedWidth(200) # 70?\n hbox.addWidget(label)\n\n label = QLabel(\" \")\n label.setFixedWidth(200)\n hbox.addWidget(label)\n\n self.vbox.addLayout(hbox)\n\n #------------------\n hbox = QHBoxLayout()\n\n label = QLabel(\"Save data:\")\n label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignLeft)\n hbox.addWidget(label)\n\n #------\n self.save_svg = QCheckBox(\"SVG\")\n # self.motility_2D.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(self.save_svg)\n\n label = QLabel(\"every\")\n # label_width = 210\n # label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n\n self.svg_interval = QLineEdit()\n self.svg_interval.setFixedWidth(value_width)\n self.svg_interval.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.svg_interval)\n\n label = QLabel(\"min\")\n # label.setFixedWidth(units_width)\n label.setAlignment(QtCore.Qt.AlignLeft)\n hbox.addWidget(label)\n\n #------\n self.save_full = QCheckBox(\"Full\")\n # self.motility_2D.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(self.save_full)\n\n label = QLabel(\"every\")\n # label_width = 210\n # label.setFixedWidth(label_width)\n label.setAlignment(QtCore.Qt.AlignRight)\n hbox.addWidget(label)\n\n self.full_interval = QLineEdit()\n self.full_interval.setFixedWidth(value_width)\n self.full_interval.setValidator(QtGui.QDoubleValidator())\n hbox.addWidget(self.full_interval)\n\n label = QLabel(\"min\")\n # label.setFixedWidth(units_width)\n label.setAlignment(QtCore.Qt.AlignLeft)\n hbox.addWidget(label)\n\n self.vbox.addLayout(hbox)\n\n #============ Cells IC ================================\n label = QLabel(\"Initial conditions of cells (x,y,z, type)\")\n label.setFixedHeight(label_height)\n label.setStyleSheet(\"background-color: orange\")\n label.setAlignment(QtCore.Qt.AlignCenter)\n\n self.vbox.addWidget(label)\n self.cells_csv = QCheckBox(\"config/cells.csv\")\n self.vbox.addWidget(self.cells_csv)\n\n #--------------------------\n # Dummy widget for filler??\n # label = QLabel(\"\")\n # label.setFixedHeight(1000)\n # # label.setStyleSheet(\"background-color: orange\")\n # label.setAlignment(QtCore.Qt.AlignCenter)\n # self.vbox.addWidget(label)\n\n self.vbox.addStretch()\n\n\n #==================================================================\n self.config_params.setLayout(self.vbox)\n\n self.scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n self.scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n self.scroll.setWidgetResizable(True)\n\n self.scroll.setWidget(self.config_params) # self.config_params = QWidget()\n\n self.layout = QVBoxLayout(self)\n\n self.layout.addWidget(self.scroll)\n\n\n # @QtCore.Slot()\n # def save_xml(self):\n # # self.text.setText(random.choice(self.hello))\n # pass\n\n\n def fill_gui(self):\n\n self.xmin.setText(self.xml_root.find(\".//x_min\").text)\n self.xmax.setText(self.xml_root.find(\".//x_max\").text)\n self.xdel.setText(self.xml_root.find(\".//dx\").text)\n\n self.ymin.setText(self.xml_root.find(\".//y_min\").text)\n self.ymax.setText(self.xml_root.find(\".//y_max\").text)\n self.ydel.setText(self.xml_root.find(\".//dy\").text)\n \n self.zmin.setText(self.xml_root.find(\".//z_min\").text)\n self.zmax.setText(self.xml_root.find(\".//z_max\").text)\n self.zdel.setText(self.xml_root.find(\".//dz\").text)\n \n self.max_time.setText(self.xml_root.find(\".//max_time\").text)\n \n self.num_threads.setText(self.xml_root.find(\".//omp_num_threads\").text)\n\n self.folder.setText(self.xml_root.find(\".//folder\").text)\n \n self.svg_interval.setText(self.xml_root.find(\".//SVG//interval\").text)\n # NOTE: do this *after* filling the mcds_interval, directly above, due to the callback/constraints on them??\n if self.xml_root.find(\".//SVG//enable\").text.lower() == 'true':\n self.save_svg.setChecked(True)\n else:\n self.save_svg.setChecked(False)\n\n self.full_interval.setText(self.xml_root.find(\".//full_data//interval\").text)\n if self.xml_root.find(\".//full_data//enable\").text.lower() == 'true':\n self.save_full.setChecked(True)\n else:\n self.save_full.setChecked(False)\n\n\n\n # Read values from the GUI widgets and generate/write a new XML\n def fill_xml(self):\n indent1 = '\\n'\n indent6 = '\\n '\n indent8 = '\\n '\n indent10 = '\\n '\n\n # print(\"config_tab: fill_xml: xmin=\",self.xmin.text() )\n self.xml_root.find(\".//x_min\").text = self.xmin.text()\n self.xml_root.find(\".//x_max\").text = self.xmax.text()\n self.xml_root.find(\".//dx\").text = self.xdel.text()\n\n self.xml_root.find(\".//y_min\").text = self.ymin.text()\n self.xml_root.find(\".//y_max\").text = self.ymax.text()\n self.xml_root.find(\".//dy\").text = self.ydel.text()\n\n self.xml_root.find(\".//z_min\").text = self.zmin.text()\n self.xml_root.find(\".//z_max\").text = self.zmax.text()\n self.xml_root.find(\".//dz\").text = self.zdel.text()\n\n # if not self.xml_root.find(\".//virtual_wall_at_domain_edge\"):\n # opts = self.xml_root.find(\".//options\")\n # if not opts:\n # print(\"------ Missing in config .xml. HALT.\")\n # sys.exit(1)\n\n\n # rwh: I ended up *requiring* the original .xml (which is copied) have the element.\n bval = \"false\"\n if self.virtual_walls.isChecked():\n bval = \"true\"\n self.xml_root.find(\".//virtual_wall_at_domain_edge\").text = bval\n\n # rwh: Not sure why I couldn't get this to work, i.e., to *insert* the element (just one time) if it didn't exist.\n # vwall = self.xml_root.find(\".//virtual_wall_at_domain_edge\")\n # # if self.xml_root.find(\".//virtual_wall_at_domain_edge\"):\n # if False:\n # print(\"\\n------ FOUND virtual_wall_at_domain_edge.\")\n # # if not opts.find(\".//virtual_wall_at_domain_edge\"):\n # # if not opts.find(\"virtual_wall_at_domain_edge\"):\n # if self.virtual_walls.isChecked():\n # # self.xml_root.find(\".//virtual_wall_at_domain_edge\").text = 'true'\n # vwall.text = 'true'\n # else:\n # vwall.text = 'false'\n # # self.xml_root.find(\".//virtual_wall_at_domain_edge\").text = 'false'\n # else:\n # print(\"\\n------ virtual_wall_at_domain_edge NOT found. Create it.\")\n # # todo: create it? Child of root.\n # print(\"------config_tab.py: no virtual_wall_at_domain_edge tag\")\n # # \n # # false\n # # true\t\t\n # # \t\n # # elm = ET.Element(\"options\") \n # # # elm.tail = '\\n' + indent6\n # # elm.tail = indent6\n # # elm.text = indent6\n # opts = self.xml_root.find(\".//options\")\n # bval = \"false\"\n # if self.virtual_walls.isChecked():\n # bval = \"true\"\n # subelm = ET.SubElement(opts, 'virtual_wall_at_domain_edge')\n # subelm.text = bval\n # subelm.tail = indent8\n # opts.insert(0,subelm)\n\n self.xml_root.find(\".//max_time\").text = self.max_time.text()\n self.xml_root.find(\".//omp_num_threads\").text = self.num_threads.text()\n self.xml_root.find(\".//folder\").text = self.folder.text()\n\n if self.save_svg.isChecked():\n self.xml_root.find(\".//SVG//enable\").text = 'true'\n else:\n self.xml_root.find(\".//SVG//enable\").text = 'false'\n self.xml_root.find(\".//SVG//interval\").text = self.svg_interval.text()\n\n if self.save_full.isChecked():\n self.xml_root.find(\".//full_data//enable\").text = 'true'\n else:\n self.xml_root.find(\".//full_data//enable\").text = 'false'\n self.xml_root.find(\".//full_data//interval\").text = self.full_interval.text()\n\n if self.cells_csv.isChecked():\n self.xml_root.find(\".//initial_conditions//cell_positions\").attrib['enabled'] = 'true'\n else:\n self.xml_root.find(\".//initial_conditions//cell_positions\").attrib['enabled'] = 'false'\n\n # TODO: verify valid type (numeric) and range?\n # xml_root.find(\".//x_min\").text = str(self.xmin.value)\n # xml_root.find(\".//x_max\").text = str(self.xmax.value)\n # xml_root.find(\".//dx\").text = str(self.xdelta.value)\n # xml_root.find(\".//y_min\").text = str(self.ymin.value)\n # xml_root.find(\".//y_max\").text = str(self.ymax.value)\n # xml_root.find(\".//dy\").text = str(self.ydelta.value)\n # xml_root.find(\".//z_min\").text = str(self.zmin.value)\n # xml_root.find(\".//z_max\").text = str(self.zmax.value)\n # xml_root.find(\".//dz\").text = str(self.zdelta.value)\n\n # xml_root.find(\".//max_time\").text = str(self.tmax.value)\n\n # xml_root.find(\".//omp_num_threads\").text = str(self.omp_threads.value)\n\n # xml_root.find(\".//SVG\").find(\".//enable\").text = str(self.toggle_svg.value)\n # xml_root.find(\".//SVG\").find(\".//interval\").text = str(self.svg_interval.value)\n # xml_root.find(\".//full_data\").find(\".//enable\").text = str(self.toggle_mcds.value)\n # xml_root.find(\".//full_data\").find(\".//interval\").text = str(self.mcds_interval.value)","sub_path":"PhysiCell-model-builder/bin/config_tab.py","file_name":"config_tab.py","file_ext":"py","file_size_in_byte":17276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"233468520","text":"import sqlite3\r\nimport create_db\r\n\r\n#path_to_db = r\"D:\\project price\\db\\\\\" + str(db_file) + \".db\"\r\n\r\n\r\ndef create_table_for_category(category, path_to_db, is_stock):\r\n\ttry:\r\n\t\tconn = sqlite3.connect(path_to_db)\r\n\texcept (sqlite3.OperationalError):\r\n\t\tcreate_db.create_new_db(path_to_db)\r\n\t\tconn = sqlite3.connect(path_to_db)\r\n\tc = conn.cursor()\r\n\tif is_stock == True:\r\n\t\tcreate_table = \"CREATE TABLE IF NOT EXISTS [\" + category + \"] (title, old_price, new_price, product_weight, product_url, image_urls, product_availability BOOLEAN)\"\r\n\telse:\r\n\t\tcreate_table = \"CREATE TABLE IF NOT EXISTS [\" + category + \"] (title, price, product_weight, product_url, image_urls, product_availability BOOLEAN)\"\r\n\tc.execute(create_table)\r\n\tconn.commit()\r\n\r\n\r\ndef set_new_row_data(category, data_el, path_to_db, is_stock):\r\n\tconn = sqlite3.connect(path_to_db)\r\n\tc = conn.cursor()\r\n\tif is_stock == True:\r\n\t\tinsert_data = \"INSERT INTO [\" + category + \"] (title, old_price, new_price, product_weight, product_url, image_urls, product_availability) VALUES (?, ?, ?, ?, ?, ?, ?)\"\r\n\telse:\r\n\t\tinsert_data = \"INSERT INTO [\" + category + \"] (title, price, product_weight, product_url, image_urls, product_availability) VALUES (?, ?, ?, ?, ?, ?)\"\r\n\tdata_tuple = (data_el[0], data_el[1], data_el[2], data_el[3], data_el[4], data_el[5])\r\n\t\r\n\tc.execute(insert_data, data_tuple)\r\n\tconn.commit()\r\n\r\nif __name__ == '__main__':\r\n\tcreate_table_for_category(\"М'ясо, риба, птиця\")\r\n#title, beautiful_prices, product_weight, product_url, image_urls, product_availability ({category})\r\n","sub_path":"change_info_in_silpo_db.py","file_name":"change_info_in_silpo_db.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"2936477","text":"from share_box import common\nimport json\n\nURI = '/api/upload'\n\n\ndef get_upload_file_details(file_id):\n \"\"\"get details of Uploaded file to ShareBox.\"\"\"\n params = {'fileId': file_id}\n resp = common.hit_url(URI, params)\n if resp:\n resp_data, resp_code = resp\n if resp_code == 200:\n return resp_data\n elif resp_code == 400:\n return \"Bad Request\"\n elif resp_code == 401:\n return \"Authentication failure\"\n elif resp_code == 403:\n return \"Access forbidden\"\n elif resp_code == 500:\n return \"Internal server error.\"\n\n\ndef upload_file(file_name, file_size, file_hash, file_):\n \"\"\"Delete the file from ShareBox.\n\n :parameter\n file_name (str): name of the file\n file_size (str): file size\n file_hash (str): file hash\n file_ (str): file handler\n\n :return\n str: success or failure message.\n \"\"\"\n params = {'name': file_name,\n 'size': file_size,\n 'hash': file_hash,\n 'file': str(file_)}\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n resp = common.hit_url(URI, json.dumps(params).encode(), headers, method_type='POST')\n if resp:\n resp_data, resp_code = resp\n if resp_code == 200:\n return resp_data\n elif resp_code == 400:\n return \"Bad Request\"\n elif resp_code == 401:\n return \"Authentication failure\"\n elif resp_code == 403:\n return \"Access forbidden\"\n elif resp_code == 500:\n return \"Internal server error.\"\n\n\ndef update_file_status(file_id):\n \"\"\"Update the upload file status.\n\n :parameter\n file_id (str): file to be shared\n bytes_completed (str): bytes completed\n\n :return\n str: success or failure message based on status\n \"\"\"\n params = {'fileId': file_id,\n 'bytesCompleted': '100'}\n\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n\n resp = common.hit_url(URI, json.dumps(params).encode(), headers, method_type='PUT')\n if resp:\n resp_data, resp_code = resp\n if resp_code == 200:\n return resp_data\n elif resp_code == 400:\n return \"Bad Request\"\n elif resp_code == 401:\n return \"Authentication failure\"\n elif resp_code == 403:\n return \"Access forbidden\"\n elif resp_code == 404:\n return \"File not found\"\n elif resp_code == 500:\n return \"Internal server error.\"\n","sub_path":"share_box/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"385726177","text":"import numpy as np\nimport itertools\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.neighbors import BallTree\nimport pickle\nimport glob\nimport cv2\nfrom VLADlib.Descriptors import *\nfrom tqdm import tqdm\nimport multiprocessing\n\ndef getSingleImageDescriptor(args):\n imagePath, functionHandleDescriptor = args\n im = cv2.imread(imagePath)\n kp, des = functionHandleDescriptor(im)\n return des\n\ndef getDescriptors(path, functionHandleDescriptor, threads):\n print('Searching for images...')\n files = glob.glob(path + \"/*.jpg\")\n print('Found {} images...'.format(len(files)))\n\n print('Running with {} threads'.format(threads))\n\n if threads == 1:\n descriptors = []\n for imagePath in tqdm(files, desc = \"Calculating descriptors\"):\n im = cv2.imread(imagePath)\n kp, des = functionHandleDescriptor(im)\n if des is not None:\n descriptors.append(des)\n\n #flatten list\n descriptors = list(itertools.chain.from_iterable(descriptors))\n else:\n pool = multiprocessing.Pool(threads)\n\n data = [(f, functionHandleDescriptor) for f in files] # Generate payload to send to threads\n descriptors = []\n for descs in tqdm(pool.imap_unordered(getSingleImageDescriptor, data), desc=\"[{} CPUs] Calculating descriptors\".format(threads), total=len(data)):\n if descs is not None:\n descriptors.extend(descs)\n pool.close()\n\n #list to array\n descriptors = np.asarray(descriptors)\n\n return descriptors\n\n\n# input\n# training = a set of descriptors\ndef kMeansDictionary(training, k):\n\n #K-means algorithm\n # est = KMeans(n_clusters=k, init='k-means++', tol=0.0001, verbose=1).fit(training)\n est = MiniBatchKMeans(n_clusters=k, init='k-means++', max_iter=100, batch_size=100, verbose=1,tol=0.0).fit(training)\n return est\n\ndef getVLADDescriptors(path, functionHandleDescriptor, visualDictionary):\n descriptors = list()\n idImage = list()\n for imagePath in glob.glob(path + \"/*.jpg\"):\n print(imagePath)\n im = cv2.imread(imagePath)\n kp, des = functionHandleDescriptor(im)\n if des is not None:\n v = VLAD(des, visualDictionary)\n descriptors.append(v)\n idImage.append(imagePath)\n\n #list to array\n descriptors = np.asarray(descriptors)\n return descriptors, idImage\n\ndef getVLADDescriptorsPerPDF(path, functionHandleDescriptor,visualDictionary):\n descriptors = list()\n idPDF = list()\n desPDF = list()\n\n #####\n #sorting the data\n data = list()\n for e in glob.glob(path + \"/*.jpg\"):\n #print(\"e: {}\".format(e))\n s = e.split('/')\n #print(\"s: {}\".format(s))\n s = s[1].split('-')\n #print(\"s: {}\".format(s))\n s = s[0].split('.')\n #print(\"s: {}\".format(s))\n s = int(s[0]+s[1])\n #print(\"s: {}\".format(s))\n\n data.append([s, e])\n\n data = sorted(data, key=lambda atr: atr[0])\n #####\n\n #sFirst=glob.glob(path+\"/*.jpg\")[0].split('-')[0]\n sFirst = data[0][0]\n docCont = 0\n docProcessed = 0\n #for imagePath in glob.glob(path+\"/*.jpg\"):\n for s, imagePath in data:\n #print(imagePath)\n #s=imagePath.split('-')[0]\n #print(\"s : {}\".format(s))\n #print(\"sFirst : {}\".format(sFirst))\n\n #accumulate all pdf's image descriptors in a list\n if (s == sFirst):\n\n im = cv2.imread(imagePath)\n kp, des = functionHandleDescriptor(im)\n if des!=None:\n desPDF.append(des)\n\n else:\n docCont = docCont + 1\n #compute VLAD for all the descriptors whithin a PDF\n #------------------\n if len(desPDF)!=0:\n docProcessed=docProcessed+1\n #print(\"len desPDF: {}\".format(len(desPDF)))\n #flatten list\n desPDF = list(itertools.chain.from_iterable(desPDF))\n #list to array\n desPDF = np.asarray(desPDF)\n #VLAD per PDF\n v = VLAD(desPDF,visualDictionary)\n descriptors.append(v)\n idPDF.append(sFirst)\n #------------------\n #update vars\n desPDF = list()\n sFirst = s\n im = cv2.imread(imagePath)\n kp, des = functionHandleDescriptor(im)\n if des != None:\n desPDF.append(des)\n\n #Last element\n docCont = docCont+1\n if len(desPDF) != 0:\n docProcessed = docProcessed + 1\n desPDF = list(itertools.chain.from_iterable(desPDF))\n desPDF = np.asarray(desPDF)\n v = VLAD(desPDF,visualDictionary)\n descriptors.append(v)\n idPDF.append(sFirst)\n\n #list to array\n descriptors = np.asarray(descriptors)\n print(\"descriptors: {}\".format(descriptors))\n print(\"idPDF: {}\".format(idPDF))\n print(\"len descriptors : {}\".format(descriptors.shape))\n print(\"len idpDF: {}\".format(len(idPDF)))\n print(\"total number of PDF's: {}\".format(docCont))\n print(\"processed number of PDF's: {}\".format(docProcessed))\n\n return descriptors, idPDF\n\ndef VLAD(X, visualDictionary):\n predictedLabels = visualDictionary.predict(X)\n centers = visualDictionary.cluster_centers_\n labels = visualDictionary.labels_\n\n k = visualDictionary.n_clusters\n\n m, d = X.shape\n V = np.zeros([k,d])\n\n # for all the clusters (visual words)\n for i in range(k):\n # if there is at least one descriptor in that cluster\n if np.sum(predictedLabels==i) > 0:\n # add the diferences\n V[i] = np.sum(X[predictedLabels==i,:] - centers[i], axis=0)\n\n\n V = V.flatten()\n # power normalization, also called square-rooting normalization\n V = np.sign(V)*np.sqrt(np.abs(V))\n\n # L2 normalization\n V = V/np.sqrt(np.dot(V,V))\n print(len(V))\n return V\n\n\n\n#Implementation of a improved version of VLAD\n#reference: Revisiting the VLAD image representation\ndef improvedVLAD(X, visualDictionary):\n predictedLabels = visualDictionary.predict(X)\n centers = visualDictionary.cluster_centers_\n labels = visualDictionary.labels_\n k = visualDictionary.n_clusters\n\n m, d = X.shape\n V = np.zeros([k,d])\n #computing the differences\n\n # for all the clusters (visual words)\n for i in range(k):\n # if there is at least one descriptor in that cluster\n if np.sum(predictedLabels ==i ) > 0:\n # add the diferences\n V[i] = np.sum(X[predictedLabels==i,:]-centers[i],axis=0)\n\n\n V = V.flatten()\n # power normalization, also called square-rooting normalization\n V = np.sign(V)*np.sqrt(np.abs(V))\n\n # L2 normalization\n\n V = V/np.sqrt(np.dot(V,V))\n return V\n\ndef indexBallTree(X,leafSize):\n tree = BallTree(X, leaf_size=leafSize)\n return tree\n\n#typeDescriptors =SURF, SIFT, OEB\n#k = number of images to be retrieved\ndef query(image, k,descriptorName, visualDictionary,tree):\n #read image\n im = cv2.imread(image)\n #compute descriptors\n dict = {\"SURF\":describeSURF,\"SIFT\":describeSIFT,\"ORB\":describeORB}\n funDescriptor = dict[descriptorName]\n kp, descriptor = funDescriptor(im)\n\n #compute VLAD\n v = VLAD(descriptor,visualDictionary)\n\n #find the k most relevant images\n dist, ind = tree.query(v.reshape(1, -1), k)\n\n return dist, ind\n","sub_path":"VLADdata/VLADlib/VLAD.py","file_name":"VLAD.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"483164597","text":"from datetime import datetime\nfrom sys import argv\nimport operator\nfrom Utils import Utils\nfrom calculate_accuracy import calculate_accuracy\nimport numpy as np\n\ndef get_best_tag(a, b, c):\n probabilities = {label: utils.getE(c[1], label) * utils.getQ(a[0], b[0], label)\n for label in utils.labels}\n return max(probabilities.items(), key=operator.itemgetter(1))[0]\n\n\ndef greedyStart(sentences):\n result = []\n for i in range(len(sentences)):\n result.append([])\n for j in range(2, len(sentences[i])):\n tag = get_best_tag(sentences[i][j-2], sentences[i][j-1], sentences[i][j])\n sentences[i][j][0] = tag\n result[i].append([tag, sentences[i][j][1]])\n\n return result\n\n\nif __name__ == \"__main__\":\n print(datetime.now())\n input_f = argv[1]\n q_output = argv[2]\n e_output = argv[3]\n output_f = argv[4]\n gamma = [0.15533849, 0.02582799, 0.81883352]\n utils = Utils(gamma)\n utils.load_q_and_load_e(q_output, e_output)\n sentences = utils.load_input_file(input_f)\n result = greedyStart(sentences)\n utils.print_to_file(result, output_f)\n print(datetime.now())\n","sub_path":"GreedyTag.py","file_name":"GreedyTag.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"439786026","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Pizza, Topping, Comment\nfrom .forms import PizzaForm, ToppingForm, CommentForm\nfrom django.http import Http404\n# Create your views here.\n\ndef index(request):\n ''' The home page for Pizzeria '''\n return render(request, 'pizzas/index.html')\n\n@login_required\ndef pizzas(request):\n pizzas = Pizza.objects.filter(owner=request.user).order_by('date_added')\n context = {'pizzas':pizzas}\n return render(request, 'pizzas/pizzas.html', context)\n\n@login_required\ndef pizza(request, pizza_id):\n pizza = Pizza.objects.get(id=pizza_id)\n toppings = pizza.topping_set.order_by('-date_added')\n if pizza.owner != request.user:\n raise Http404\n context = {'pizza':pizza, 'toppings':toppings}\n return render(request, 'pizzas/pizza.html', context)\n\n@login_required\ndef new_pizza(request):\n if request.method != 'POST':\n form = PizzaForm()\n else:\n form = PizzaForm(data=request.POST)\n if form.is_valid():\n new_pizza = form.save(commit=False)\n new_pizza.owner = request.user\n new_pizza.save()\n \n return redirect('pizzas:pizzas')\n context = {'form':form}\n return render(request, 'pizzas/new_pizza.html', context)\n\n@login_required\ndef new_topping(request,pizza_id):\n pizza = Pizza.objects.get(id=pizza_id)\n if pizza.owner != request.user:\n raise Http404\n if request.method != 'POST':\n form = ToppingForm()\n else:\n form = ToppingForm(data=request.POST)\n if form.is_valid():\n new_topping = form.save(commit=False)\n new_topping.pizza = pizza\n new_topping.save()\n return redirect('pizzas:pizza', pizza_id=pizza_id)\n context = {'form':form, 'pizza':pizza} #passing pizza because we want to see the pizza, not the number that represents its id\n return render(request, 'pizzas/new_topping.html', context)\n\n@login_required\ndef edit_topping(request,topping_id):\n topping = Topping.objects.get(id=topping_id)\n pizza = topping.pizza\n if pizza.owner != request.user:\n raise Http404\n if request.method != 'POST':\n form = ToppingForm(instance=topping)\n else:\n form = ToppingForm(instance=topping, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('pizzas:pizza',pizza_id=pizza.id)\n context = {'topping':topping, 'pizza':pizza, 'form':form}\n return render(request, 'pizzas/edit_topping.html', context)\n\n@login_required\ndef comments(request, pizza_id):\n pizza = Pizza.objects.get(id=pizza_id)\n if request.method != 'POST':\n form = CommentForm()\n else:\n form = CommentForm(data=request.POST)\n if form.is_valid():\n comments = form.save(commit=False)\n comments.pizza = pizza\n comments.owner = request.user\n form.save()\n return redirect('pizzas:pizza',pizza_id=pizza_id)\n context = {'form':form, 'pizza':pizza}\n return render(request, 'pizzas/comments.html', context) \n\n '''\n if request.method == 'POST' and request.POST.get(\"btn1\"):\n comment = request.POST.get(\"comment\")\n Comment.objects.create(pizza_id=pizza_id,name=comment,date_added=date.today())\n comments = Comment.object.filter(pizza=pizza_id)\n pizza = Pizza.objects.get(id=pizza_id)\n\n context = {'pizza':pizza, 'comments':comments}\n return render(request, 'pizzas/comments.html', context)\n '''\n\n\n\n","sub_path":"pizzas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"449621597","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, request, redirect, url_for, session\nimport mysql.connector\nimport os\napp = Flask(__name__)\napp.secret_key = os.urandom(16)\n\nmydb = mysql.connector.connect(\n host=\"localhost\", ## Escribir aqui tu host (localhost por defecto)\n user=\"root\", # Escribir aqui tu usuario\n passwd=\"3_99SA.17*Pc#2\", # Escribir aqui tu contraseña\n database = \"sanpatricio\", # Escribir aqui el nombre de la base de datos\n auth_plugin='mysql_native_password' # Dejar esta propiedad asi\n)\n\ncur = mydb.cursor()\ncurpGlobal = \"\"\n\n@app.route('/')\ndef cargar_principal():\n if 'sesion' in session:\n usuario = session['usuario']\n print(\"LOGIN: \", usuario)\n return render_template('menu_responsive.html')\n return redirect(url_for('cargar_login'))\n\n@app.route('/login', methods=['GET', 'POST'])\ndef cargar_login():\n if 'sesion' in session:\n return redirect(url_for('cargar_principal'))\n\n if request.method == \"POST\":\n detalles = request.form\n _usuario = detalles['username']\n\n query = \"select exists(select user_usu from usuario where user_usu='\" + _usuario + \"')\"\n cur.execute(query)\n existeUsuario = cur.fetchall()[0]\n\n if not existeUsuario[0]:\n return redirect(url_for('cargar_login'))\n\n _contrasenia = detalles['password']\n\n query = \"select cve_usu from usuario where user_usu='\" + _usuario + \"'\"\n cur.execute(query)\n claveUsuario = cur.fetchall()[0]\n\n query = \"select strcmp((select pass_usu from usuario where cve_usu=\" + str(claveUsuario[0]) + \"), sha2('\" + str(_contrasenia) + \"', 224))\"\n cur.execute(query)\n contraseniaCorrecta = cur.fetchall()[0]\n\n if contraseniaCorrecta[0] != 0:\n return redirect(url_for('cargar_login'))\n\n rol = \"persona\"\n rolEmpleado = \"\"\n\n query = \"select exists(select u.curp_per from usuario u join empleado e on u.curp_per=e.curp_per where u.cve_usu=\" + str(claveUsuario[0]) + \")\"\n cur.execute(query)\n esEmpleado = cur.fetchall()[0]\n\n if esEmpleado[0]:\n rol = \"empleado\"\n\n query = \"select concat(puesto, '') from usuario u join empleado e on u.curp_per=e.curp_per where user_usu='\" + str(_usuario) + \"'\"\n cur.execute(query)\n resultado = cur.fetchall()[0]\n rolEmpleado = resultado[0]\n print(rolEmpleado)\n\n query = \"select exists(select u.curp_per from usuario u join alumno a on u.curp_per=a.curp_per where u.cve_usu=\" + str(claveUsuario[0]) + \")\"\n cur.execute(query)\n esAlumno = cur.fetchall()[0]\n\n if esAlumno[0]:\n rol = \"alumno\"\n\n query = \"select curp_per from usuario where user_usu='\" + str(_usuario) + \"'\"\n cur.execute(query)\n resultado = cur.fetchall()[0]\n\n print(resultado[0])\n\n curpGlobal = str(resultado[0])\n\n session['usuario'] = detalles['username']\n session['rol'] = rol\n session['sesion'] = True\n session['rolEmpleado'] = rolEmpleado\n return redirect(url_for('cargar_principal'))\n return render_template('Login.html')\n\n@app.route('/logout')\ndef logout():\n session.pop('usuario', None) \n session.pop('sesion', None) \n return redirect(url_for('cargar_login'))\n\n@app.route('/menu/')\ndef cargar_menu():\n return render_template('menu.html')\n\n# /menu-responsive/\n@app.route('/menu-responsive')\ndef cargar_menu_responsive():\n return render_template('menu_responsive.html')\n\n# --------------------- datosPersonales --------------------- ## --------------------- v_datosPersonales --------------------- #\n\n@app.route('/datos-personales')\ndef cargar_vista_datosPersonales():\n query = \"select * from persona where curp_per='\" + str(curpGlobal) + \"'\"\n cur.execute(query)\n datosPersonales = cur.fetchall()\n\n print(curpGlobal)\n\n return render_template('vista_datosPersonales.html', datosPersonales = datosPersonales)\n\n# --------------------- AREAS --------------------- ## --------------------- s_AREAS --------------------- #\n\n@app.route('/areas/')\ndef cargar_areas():\n query = \"select a.cve_are, nombre_are from area a join grupo g on a.cve_are=g.cve_are where curdate() between fechaini_gru and fechafin_gru group by cve_are\"\n cur.execute(query)\n areasOcupadas = cur.fetchall()\n\n query = \"select a.cve_are, nombre_are from area a join grupo g on a.cve_are=g.cve_are where curdate() not between fechaini_gru and fechafin_gru group by cve_are\"\n cur.execute(query)\n areasLibres = cur.fetchall()\n\n return render_template('areas.html', areasOcupadas = areasOcupadas, areasLibres = areasLibres)\n\n@app.route('/area', methods=['POST'])\ndef area():\n data = request.form\n\n query = \"select cve_are, nombre_are, concat(tipo_are, '') as tipo, concat(ancho_are, ' x ', largo_are, ' ', umedida_are) as medidas, detalles_are from area where cve_are=\" + str(data['clave'])\n cur.execute(query)\n resultados = cur.fetchall()\n\n area = resultados[0]\n\n query = \"select g.*, a.nom_act, concat(p.nom_per, ' ', p.ap_per, ' ', p.am_per) from grupo g join actividad a on g.cve_act=a.cve_act join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where g.cve_are=\" + data['clave']\n cur.execute(query)\n grupos = cur.fetchall()\n\n grupos_dict = []\n\n for grupo in grupos:\n grupo_dict = {\n \"turno\": str(grupo[1]) + \" a \" + str(grupo[2]),\n \"lapso\": str(grupo[3]) + \" - \" + str(grupo[4]),\n \"minmaxalum\": str(grupo[6]) + \"/\" + str(grupo[5]),\n \"act\": grupo[10],\n \"emp\": grupo[11]\n }\n grupos_dict.append(grupo_dict)\n\n tupla = {\n \"area\": [\n { \"clave\": area[0], \"nombre\": area[1], \"tipo\": area[2], \"medidas\": area[3], \"detalles\": area[4] }\n ],\n \"grupos\": grupos_dict\n }\n\n return tupla\n\n# /registro-areas\n@app.route('/areas/registrar', methods=['GET', 'POST'])\ndef cargar_areas_registro():\n if request.method == \"POST\":\n detalles = request.form\n _nombre = detalles['nombre']\n _tipo = detalles['tipo']\n _ancho = detalles['ancho']\n _largo = detalles['largo']\n _umedida = detalles['umedida']\n _detalles = detalles['detalles']\n\n query = \"insert into area values(%s, %s, %s, %s, %s, %s, %s)\"\n values = (None, _nombre, _tipo, _ancho, _largo, _umedida, _detalles)\n\n cur.execute(query, values)\n mydb.commit()\n\n print(\"INSERCION EXITOSA\")\n\n #######################\n\n # cur.execute(\"select * from test\")\n # data = cur.fetchall()\n\n # print(str(data))\n\n\n # _ancho = detalles['ancho']\n\n # query = \"insert into test values(%s, %s)\"\n # values = (None, _ancho)\n\n # cur.execute(query, values)\n # mydb.commit()\n\n # print(\"insertado exitosamente\")\n\n pass\n\n return render_template('areas_registro.html')\n\n# --------------------- ACTIVIDADES --------------------- ## --------------------- s_ACTIVIDADES --------------------- #\n\n# Ventana principal de actividades\n@app.route('/actividades/')\ndef cargar_actividades():\n query = \"select * from actividad where tipo_act='Deportiva' order by nom_act\"\n cur.execute(query)\n actividadesDeportivas = cur.fetchall()\n\n query = \"select * from actividad where tipo_act='Artistica' order by nom_act\"\n cur.execute(query)\n actividadesArtisticas = cur.fetchall()\n\n return render_template('actividades.html', actividadesDeportivas = actividadesDeportivas, actividadesArtisticas = actividadesArtisticas)\n\n@app.route('/actividad', methods=['POST'])\ndef actividad():\n data = request.form\n query = \"select * from actividad where cve_act=\" + data['clave']\n cur.execute(query)\n _actividad = cur.fetchall()\n\n actividad = _actividad[0]\n\n _tipo = None\n\n for i in actividad[2]:\n _tipo = i\n\n # query = \"select g.*, a.nom_act, concat(p.nom_per, ' ', p.ap_per, ' ', p.am_per) from grupo g join actividad a on g.cve_act=a.cve_act join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where g.cve_act=\" + data['clave']\n query = \"select g.*, a.nombre_are, concat(p.nom_per, ' ', p.ap_per, ' ', p.am_per) from grupo g join area a on g.cve_are=a.cve_are join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where g.cve_act=\" + data['clave']\n cur.execute(query)\n grupos = cur.fetchall()\n\n grupos_dict = []\n\n for grupo in grupos:\n grupo_dict = {\n \"turno\": str(grupo[1]) + \" a \" + str(grupo[2]),\n # \"horaent\": str(grupo[1]),\n # \"horasali\": str(grupo[2]),\n \"lapso\": str(grupo[3]) + \" - \" + str(grupo[4]),\n # \"fechaini\": str(grupo[3]),\n # \"fechafin\": str(grupo[4]),\n \"minmaxalum\": str(grupo[6]) + \"/\" + str(grupo[5]),\n # \"maxalumnos\": grupo[5],\n # \"minalumnos\": grupo[6],\n \"are\": grupo[10],\n \"emp\": grupo[11]\n }\n grupos_dict.append(grupo_dict)\n\n tupla = {\n \"actividad\": [\n { \"clave\": actividad[0], \"nom\": actividad[1], \"tipo\": _tipo, \"descrip\": actividad[3] }\n ],\n \"grupos\": grupos_dict\n }\n\n return tupla\n\n# /registro-actividades\n@app.route('/actividades/registrar', methods=['GET', 'POST'])\ndef cargar_actividades_registro():\n if request.method == \"POST\":\n detalles = request.form\n _nombre = detalles['nombre']\n _tipo = detalles['tipo']\n _descripcion = detalles['descripcion']\n\n query = \"insert into actividad values(%s, %s, %s, %s)\"\n values = (None, _nombre, _tipo, _descripcion)\n\n cur.execute(query, values)\n mydb.commit()\n\n print(\"INSERCION EXITOSA\")\n return redirect(url_for('cargar_actividades'))\n\n return render_template('actividades_registro.html')\n\n# --------------------- GRUPOS --------------------- ## --------------------- s_GRUPOS --------------------- #\n\n# Ventana principal de grupos\n@app.route('/grupos/')\ndef cargar_grupos():\n query = \"select cve_gru, nom_act from grupo g join actividad a on g.cve_act=a.cve_act where curdate() between fechaini_gru and fechafin_gru\"\n cur.execute(query)\n gruposActivos = cur.fetchall()\n\n query = \"select cve_gru, nom_act from grupo g join actividad a on g.cve_act=a.cve_act where curdate() not between fechaini_gru and fechafin_gru\"\n cur.execute(query)\n gruposInactivos = cur.fetchall()\n\n return render_template('grupos.html', gruposActivos = gruposActivos, gruposInactivos = gruposInactivos)\n\n@app.route('/grupo', methods=['POST'])\ndef grupo():\n data = request.form\n\n query = \"select cve_gru, concat(horaent_gru, ' - ', horasali_gru) as turno, concat(fechaini_gru, ' / ', fechafin_gru) as periodo, concat(minalumnos_gru, '/', maxalumnos_gru) as minmax, concat(nom_act, ' - ', tipo_act) as actividad, concat(nombre_are, ' - ', tipo_are) as area, concat(nom_per, ' ', ap_per, ' ', am_per) as docente from grupo g join actividad a on g.cve_act=a.cve_act join area ar on g.cve_are=ar.cve_are join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where cve_gru=\" + data['clave']\n cur.execute(query)\n resultados = cur.fetchall()\n\n grupo = resultados[0]\n\n cur.callproc('sp_getListaGrupo', [ data['clave'], ])\n\n alumnos = []\n\n for resultado in cur.stored_results():\n listaAlumnos = resultado.fetchall()\n # if (listaAlumnos):\n # alumnos = listaAlumnos\n\n for alumno in listaAlumnos:\n alumno_dict = {\n \"curp\": alumno[0],\n \"nombre\": alumno[1]\n }\n alumnos.append(alumno_dict)\n\n # print(listaAlumnos)\n # for alumno in listaAlumnos:\n # print(alumno)\n\n # print(alumnos)\n\n # print(alumnos)\n\n tupla = {\n \"grupo\": [\n { \"clave\": grupo[0], \"turno\": grupo[1], \"periodo\": grupo[2], \"minmax\": grupo[3], \"act\": grupo[4], \"are\": grupo[5], \"docente\": grupo[6] }\n ],\n \"alumnos\": alumnos\n }\n\n return tupla\n\n#/registro-grupos\n@app.route('/grupos/registrar', methods=['GET', 'POST'])\ndef cargar_grupos_registro():\n if request.method == \"GET\":\n query = \"select cve_act, nom_act from actividad\" \n cur.execute(query)\n actividades = cur.fetchall()\n\n query = \"select cve_are, nombre_are from area\"\n cur.execute(query)\n areas = cur.fetchall()\n\n query = \"select cve_emp, rfc_emp, concat(fechain_emp, ' / ', fechafin_emp), concat(nom_per, ' ', ap_per, ' ', am_per) as nombre from empleado e join persona p on e.curp_per=p.curp_per where puesto='Docente' and curdate() between fechain_emp and fechafin_emp\" \n cur.execute(query)\n empleados = cur.fetchall()\n\n return render_template('grupos_registro.html', actividades = actividades, areas = areas, empleados = empleados)\n\n if request.method == \"POST\":\n detalles = request.form\n _horaent = detalles['horaent']\n _horasali = detalles['horasali']\n _fechaini = detalles['fechaini']\n _fechafin = detalles['fechafin']\n _minalumnos = detalles['minalumnos']\n _maxalumnos = detalles['maxalumnos']\n _actividad = detalles['actividad']\n _area = detalles['area']\n _empleado = detalles['empleado']\n\n query = \"insert into grupo values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n values = (None, _horaent, _horasali, _fechaini, _fechafin, _maxalumnos, _minalumnos, _actividad, _area, _empleado)\n\n cur.execute(query, values)\n mydb.commit()\n\n print(\"INSERCION EXITOSA\")\n pass\n\n return render_template('grupos_registro.html')\n\n# --------------------- EMPLEADOS --------------------- ## --------------------- s_EMPLEADOS --------------------- #\n\n# Ventana principal de empleados\n@app.route('/empleados/')\ndef cargar_empleados():\n # if 'sesion' not in session:\n # return redirect(url_for('cargar_login'))\n\n queryAux = \"select cve_emp, puesto, concat(ap_per, ' ', am_per, ' ', nom_per) as nombre from empleado e join persona p on e.curp_per=p.curp_per where puesto=\"\n \n query = queryAux + \"'Administrador' order by nombre\"\n cur.execute(query)\n administradores = cur.fetchall()\n\n query = queryAux + \"'Docente' order by nombre\"\n cur.execute(query)\n docentes = cur.fetchall()\n\n query = queryAux + \"'Limpieza' order by nombre\"\n cur.execute(query)\n limpieza = cur.fetchall()\n\n query = queryAux + \"'Velador' order by nombre\"\n cur.execute(query)\n veladores = cur.fetchall()\n\n query = queryAux + \"'Director' order by nombre\"\n cur.execute(query)\n directores = cur.fetchall()\n return render_template('empleados.html', administradores = administradores, docentes = docentes, limpieza = limpieza, veladores = veladores, directores = directores)\n\n@app.route('/empleado', methods=['POST'])\ndef empleado():\n data = request.form\n # print(data)\n # print(data['clave'])\n\n query = \"select * from empleado e join persona p on e.curp_per=p.curp_per where cve_emp=\" + data['clave']\n cur.execute(query)\n resultado = cur.fetchall()\n\n # print(type(resultado))\n # print(type(resultado[0]))\n # print(resultado)\n\n datosEmpleado = resultado[0]\n\n _puesto = None\n _genero = None\n _orient = None\n\n for x in datosEmpleado[4]: # Esta es la unica forma de acceder a un elemento de un set(conjunto)\n _puesto = x\n\n for x in datosEmpleado[12]:\n _genero = x\n\n for x in datosEmpleado[15]:\n _orient = x\n\n query = \"select * from grupo where cve_emp=\" + str(datosEmpleado[0])\n cur.execute(query)\n grupos = cur.fetchall()\n # print(grupos)\n # print(type(grupos))\n # print(type(grupos[0]))\n\n gruposDict = [] # Diccionario de grupos\n\n for grupo in grupos: # Esta es la forma mas sencilla de generar un JSON para retornar una respuesta\n grupoDict = {\n \"clave\": grupo[0],\n 'horaent': str(grupo[1]),\n \"horasali\": str(grupo[2]),\n \"fechaini\": str(grupo[3]),\n \"fechafin\": str(grupo[4]),\n \"maxalumnos\": grupo[5],\n \"minalumnos\": grupo[6],\n \"act\": grupo[7],\n \"are\": grupo[8]\n }\n gruposDict.append(grupoDict)\n\n query = \"select dia_diahor, horaent_diahor, horasal_diahor from diahora dh join horario h on dh.cve_hor=h.cve_hor where cve_emp=\" + str(datosEmpleado[0])\n cur.execute(query)\n horarios = cur.fetchall()\n\n horariosDict = []\n for horario in horarios:\n horarioDict = {\n \"dia\": entero_a_dia(horario[0]),\n \"horaent\": str(horario[1]),\n \"horasal\": str(horario[2])\n }\n horariosDict.append(horarioDict)\n\n tupla = { \n \"laborales\": [\n { \"clave\": datosEmpleado[0], \"rfc\": datosEmpleado[1], \"fechain\": str(datosEmpleado[2]), \"fechafin\": str(datosEmpleado[3]), \"puesto\": _puesto }\n ],\n \"personales\": [\n { \"curp\": datosEmpleado[5], \"nombre\": datosEmpleado[7] + \" \" + datosEmpleado[8] + \" \" + datosEmpleado[9], \"tel\": datosEmpleado[10],\n \"fechanac\": str(datosEmpleado[11]), \"genero\": _genero, \n \"domicilio\": datosEmpleado[13] + \" \" + _orient + \" \" + str(datosEmpleado[14]) }\n ],\n \"grupos\": gruposDict,\n \"horarios\": horariosDict\n }\n\n # print(tupla)\n # print(\"tupla = \", type(tupla))\n\n # print(tupla.laborales)\n\n return tupla\n\n# @app.route('/signUp')\n# def signUp():\n# return render_template('signUp.html')\n\n# @app.route('/signUpUser', methods=['POST'])\n# def signUpUser():\n# print(request.form)\n# user = request.form['username']\n# password = request.form['password']\n# return json.dumps({'status':'OK','user':user,'pass':password})\n\n# Registro de empleados\n@app.route('/empleados/registrar', methods=['GET', 'POST'])\ndef cargar_empleados_registro():\n if request.method == \"POST\":\n detalles = request.form\n _curp = detalles['curp']\n _nombre = detalles['nombre']\n _ap = detalles['ap']\n _am = detalles['am']\n _tel = detalles['tel']\n _fechanac = detalles['fechanac']\n _genero = detalles['genero']\n _calle = detalles['calle']\n _num = detalles['num']\n _orient = detalles['orient']\n _entrecalles = detalles['entrecalles']\n _col = detalles['col']\n\n query = \"insert into persona values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n values = (_curp, _nombre, _ap, _am, _tel, _fechanac, _genero, _calle, _num, _orient, _entrecalles, _col)\n\n cur.execute(query, values)\n\n _rfc = detalles['rfc']\n _fechain = detalles['fechain']\n _fechafin = detalles['fechafin']\n _puesto = detalles['puesto']\n\n query = \"insert into empleado values (%s, %s, %s, %s, %s, %s)\" \n values = (None, _rfc, _fechain, _fechafin, _puesto, _curp)\n\n cur.execute(query, values)\n # mydb.commit()\n\n # detalles = request.form\n # print(detalles)\n\n _monto = detalles['monto']\n _modo = detalles['modo']\n\n query = \"insert into pagoempleado values(%s, now(), %s, %s, (select max(cve_emp) from empleado))\"\n values = (None, _monto, _modo)\n cur.execute(query, values)\n\n query = \"insert into horario values(null, curdate(), (select max(cve_emp) from empleado))\"\n cur.execute(query)\n\n dias = detalles.getlist('dia')\n hsinicio = detalles.getlist('hinicio')\n hsfinal = detalles.getlist('hfinal')\n\n # print(dias)\n # print(hsinicio)\n # print(hsfinal)\n\n if detalles['horario'] == \"Fijo\":\n for i in range(len(dias)):\n print(\"%s (%s) - %s - %s\" % ( dias[i], dia_a_entero(dias[i]), hsinicio[0], hsfinal[0] ))\n query = \"insert into diahora values(%s, %s, %s, %s, (select max(cve_hor) from horario))\"\n values = ( None, dia_a_entero(dias[i]), hsinicio[0], hsfinal[0] )\n cur.execute(query, values)\n else:\n for i in range(len(dias)):\n print(dias[i], \"(\", dia_a_entero(dias[i]), \") - \", hsinicio[i], \" - \", hsfinal[i])\n # query = \"insert into \"\n\n\n mydb.commit()\n\n print(\"INSERION EXITOSA\")\n return redirect(url_for('cargar_empleados'))\n # pass\n\n query = \"select * from colonia\"\n cur.execute(query)\n colonias = cur.fetchall()\n\n return render_template('empleados_registro.html', colonias = colonias)\n\ndef dia_a_entero(dia):\n if dia == \"Lunes\":\n return 1\n elif dia == \"Martes\":\n return 2\n elif dia == \"Miercoles\":\n return 3\n elif dia == \"Jueves\":\n return 4\n elif dia == \"Viernes\":\n return 5\n elif dia == \"Sabado\":\n return 6\n elif dia == \"Domingo\":\n return 7\n\ndef entero_a_dia(n):\n if n == 1:\n return \"Lunes\"\n if n == 2:\n return \"Martes\"\n if n == 3:\n return \"Miércoles\"\n if n == 4:\n return \"Jueves\"\n if n == 5:\n return \"Viernes\"\n if n == 6:\n return \"Sábado\"\n if n == 7:\n return \"Domingo\"\n\n# --------------------- ALUMNOS --------------------- ## --------------------- s_ALUMNOS --------------------- #\n\n# Ventana principal de alumnos\n@app.route('/alumnos/')\ndef cargar_alumnos():\n query = \"select a.cve_alu, concat(nom_per, ' ', ap_per, ' ', am_per) from grupo g join registroinscripcion ri on g.cve_gru=ri.cve_gru join folio f on ri.folio_insc=f.folio_fol join alumno a on f.cve_alu=a.cve_alu join persona p on a.curp_per=p.curp_per where curdate() between fechaini_gru and fechafin_gru\"\n cur.execute(query)\n inscritos = cur.fetchall()\n\n # query = \"select a.cve_alu, concat(nom_per, ' ', ap_per, ' ', am_per) from folio f join alumno a on f.cve_alu=a.cve_alu join persona p on a.curp_per=p.curp_per where not exists(select folio_fol, folio_insc from folio f, registroinscripcion ri where folio_fol=folio_insc)\"\n # query = \"select a.cve_alu, concat(nom_per, ' ', ap_per, ' ', am_per) from folio f join registroinscripcion ri on f.folio_fol!=ri.folio_insc join alumno a on f.cve_alu=a.cve_alu join persona p on a.curp_per=p.curp_per\"\n query = \"select f.cve_alu, concat(nom_per, ' ', ap_per, ' ', am_per), folio_fol from folio f join alumno a on f.cve_alu=a.cve_alu join persona p on a.curp_per=p.curp_per where not exists(select folio_insc from registroInscripcion ri where ri.folio_insc=f.folio_fol)\"\n cur.execute(query)\n noInscritos = cur.fetchall()\n\n # query = \"select cve_gru, concat(horaent_gru, ' - ', horasali_gru), concat(fechaini_gru, ' a ', fechafin_gru), nom_act, nombre_are, concat(nom_per, \" \", ap_per, \" \", am_per) from grupo g join actividad a on g.cve_act=a.cve_act join area ar on g.cve_are=ar.cve_are join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where curdate() between fechaini_gru and fechafin_gru\"\n query = \"select g.cve_gru, concat(horaent_gru, ' - ', horasali_gru) as turno, concat(fechaini_gru, ' a ', fechafin_gru) as periodo, nom_act, nombre_are, concat(nom_per, ' ', ap_per, ' ', am_per) as docente, count(folio_insc) as cuenta, maxalumnos_gru as maxalumnos from grupo g join registroInscripcion ri on g.cve_gru=ri.cve_gru join actividad a on g.cve_act=a.cve_act join area ar on g.cve_are=ar.cve_are join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where curdate() between fechaini_gru and fechafin_gru group by g.cve_gru having cuenta<=maxalumnos\"\n cur.execute(query)\n gruposNoVacios = cur.fetchall()\n\n # select cve_gru from grupo where cve_gru not in (select cve_gru from registroinscripcion) AGREGA ESTOOOOOOOOOOOOOOOOOOOOOOOO\n # select cve_gru, concat(horaent_gru, ' - ', horasali_gru) as turno, concat(fechaini_gru, ' a ', fechafin_gru) as periodo, nom_act, nombre_are, concat(nom_per, ' ', ap_per, ' ', am_per) as docente, maxalumnos_gru from grupo g join actividad a on g.cve_act=a.cve_act join area ar on g.cve_are=ar.cve_are join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where g.cve_gru not in (select cve_gru from registroinscripcion)\n # query = \"select g.cve_gru, concat(horaent_gru, ' - ', horasali_gru) as turno, concat(fechaini_gru, ' a ', fechafin_gru) as periodo, nom_act, nombre_are, concat(nom_per, ' ', ap_per, ' ', am_per) as docente, maxalumnos_gru from grupo g join registroInscripcion ri on g.cve_gru!=ri.cve_gru join actividad a on g.cve_act=a.cve_act join area ar on g.cve_are=ar.cve_are join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where curdate() between fechaini_gru and fechafin_gru\"\n query = \"select cve_gru, concat(horaent_gru, ' - ', horasali_gru) as turno, concat(fechaini_gru, ' a ', fechafin_gru) as periodo, nom_act, nombre_are, concat(nom_per, ' ', ap_per, ' ', am_per) as docente, maxalumnos_gru from grupo g join actividad a on g.cve_act=a.cve_act join area ar on g.cve_are=ar.cve_are join empleado e on g.cve_emp=e.cve_emp join persona p on e.curp_per=p.curp_per where g.cve_gru not in (select cve_gru from registroinscripcion)\"\n cur.execute(query)\n gruposVacios = cur.fetchall()\n\n return render_template('alumnos.html', inscritos = inscritos, noInscritos = noInscritos, gruposNoVacios = gruposNoVacios, gruposVacios = gruposVacios)\n\n@app.route('/alumno', methods=['POST'])\ndef alumno():\n data = request.form\n\n query = \"select a.*, concat(p.nom_per, ' ', ap_per, ' ', am_per) as nombre, p.tel_per, p.fechanac_per, p.genero_per, concat(p.calle_per, ' ', p.orient_per, ' ', p.numero_per) as direccion from alumno a join persona p on a.curp_per=p.curp_per where a.cve_alu=\" + data['clave']\n cur.execute(query)\n _alumno = cur.fetchall()\n\n alumno = _alumno[0]\n\n _genero = None\n\n for i in alumno[7]:\n _genero = i\n\n query = \"select f.*, ac.nom_act from folio f join alumno a on f.cve_alu=a.cve_alu join actividad ac on f.cve_act=ac.cve_act where f.cve_alu=\" + data['clave'] + \" order by fecha_fol limit 1\"\n cur.execute(query)\n folio = cur.fetchall()\n\n # query = \"select cve_gru, concat(horaent_gru, ' - ', horasali_gru), concat(fechaini_gru, ' a ', fechafin_gru) from grupo where curdate() between fechaini_gru and fechafin_gru\"\n # cur.execute(query)\n # grupos = cur.fetchall()\n\n print(\"folio = \", folio)\n\n tupla = {\n \"alumno\": [\n { \"clave\": alumno[0], \"estatura\": alumno[1], \"peso\": alumno[2] }\n ],\n \"personales\": [\n { \"curp\": alumno[3], \"nombre\": alumno[4], \"tel\": alumno[5], \"fechanac\": str(alumno[6]), \"genero\": _genero, \"domicilio\": alumno[8] }\n ],\n \"folio\": folio\n }\n\n # print(tupla)\n return tupla\n\n@app.route('/alumnoInscribir', methods=['POST'])\ndef alumnoInscribir():\n detalles = request.form\n _folio = detalles['folio']\n _grupo = detalles['grupo']\n _fecha = detalles['fecha']\n _importe = detalles['importe']\n\n # print(\"detalles = \", detalles)\n\n query = \"insert into registroInscripcion values(%s, %s, %s, %s, %s)\"\n values = (_folio, _fecha, _importe, _importe, _grupo)\n\n # query = \"insert into registroinscripcion values(%s, %s, %s, %s, %s)\"\n # values = (data['folio'], data['fecha'], data['importe'], data['importe'], data['grupo'])\n\n cur.execute(query, values)\n mydb.commit()\n\n return \"OK\"\n\nimport time\n\n# /registro-alumnos\n@app.route('/alumnos/registrar', methods=['GET', 'POST'])\ndef cargar_alumnos_registro():\n if request.method == \"POST\":\n detalles = request.form\n _curp = detalles['curp']\n _nombre = detalles['nombre']\n _ap = detalles['ap']\n _am = detalles['am']\n _tel = detalles['tel']\n _fechanac = detalles['fechanac']\n _genero = detalles['genero']\n _calle = detalles['calle']\n _num = detalles['num']\n _orient = detalles['orient']\n _entrecalles = detalles['entrecalles']\n _col = detalles['col']\n\n query = \"insert into persona values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n values = (_curp, _nombre, _ap, _am, _tel, _fechanac, _genero, _calle, _num, _orient, _entrecalles, _col)\n\n cur.execute(query, values)\n\n _estatura = detalles['estatura']\n _peso = detalles['peso']\n\n query = \"insert into alumno values (%s, %s, %s, %s)\"\n values = (None, _estatura, _peso, _curp)\n\n cur.execute(query, values)\n # mydb.commit()\n\n _costo = detalles['costo']\n _act = detalles['act']\n\n query = \"select max(cve_alu) from alumno\"\n cur.execute(query)\n _alumno = cur.fetchall()\n\n alumno = _alumno[0]\n\n query = \"insert into folio values(%s, %s, %s, %s, %s)\"\n values = (None, time.strftime('%Y-%m-%d %H:%M:%S'), _costo, alumno[0], _act)\n\n cur.execute(query, values)\n mydb.commit()\n\n return redirect(url_for('cargar_alumnos'))\n\n query = \"select * from colonia\"\n cur.execute(query)\n colonias = cur.fetchall()\n\n query = \"select cve_act, concat(nom_act, ' - ', tipo_act) from actividad\"\n cur.execute(query)\n actividades = cur.fetchall()\n\n return render_template('alumnos_registro.html', colonias = colonias, actividades = actividades)\n\n# --------------------- PROVEEDORES --------------------- ## --------------------- S_PROVEEDORES --------------------- #\n\n# Ventana principal de proveedores\n@app.route('/proveedores/')\ndef cargar_proveedores():\n query = \"select * from proveedor\"\n cur.execute(query)\n proveedores = cur.fetchall()\n\n return render_template('proveedores.html', proveedores = proveedores)\n\n@app.route('/proveedor', methods=['POST'])\ndef proveedor():\n data = request.form\n\n query = \"select cve_prov, empresa_prov, concat(calle_prov, ' ', orient_prov, ' ', num_prov) as domicilio, tel_prov, cve_col from proveedor where cve_prov=\" + data['clave']\n cur.execute(query)\n _proveedor = cur.fetchall()\n\n proveedor = _proveedor[0]\n\n tupla = {\n \"general\": [\n { \"clave\": proveedor[0], \"empresa\": proveedor[1], \"domicilio\": proveedor[2], \"tel\": proveedor[3], \"col\": proveedor[4] }\n ]\n }\n\n return tupla\n\n@app.route('/proveedores/registrar', methods=['GET', 'POST'])\ndef cargar_proveedores_registro():\n if request.method == \"POST\":\n detalles = request.form\n _empresa = detalles['empresa']\n _tel = detalles['tel']\n _calle = detalles['calle']\n _num = detalles['num']\n _orient = detalles['orient']\n _entrecalles = detalles['entrecalles']\n _col = detalles['col']\n\n query = \"insert into proveedor values(%s, %s, %s, %s, %s, %s, %s, %s)\"\n values = (None, _empresa, _calle, _num, _orient, _entrecalles, _tel, _col)\n\n cur.execute(query, values)\n mydb.commit()\n\n print(\"INSERCION EXITOSA\")\n return redirect(url_for('cargar_proveedores'))\n\n query = \"select * from colonia\"\n cur.execute(query)\n colonias = cur.fetchall()\n\n return render_template('proveedores_registro.html', colonias = colonias)\n\n# --------------------- MATERIALES --------------------- ## --------------------- S_MATERIALES --------------------- #\n\n# Ventana principal de materiales\n@app.route('/materiales/')\ndef cargar_materiales():\n query = \"select * from material\"\n cur.execute(query)\n materiales = cur.fetchall()\n return render_template('materiales.html', materiales = materiales)\n\n@app.route('/material', methods=['POST'])\ndef material():\n data = request.form\n\n query = \"select m.*, a.nom_act from material m join actividad a on m.cve_act=a.cve_act where m.cve_mat=\" + data['clave']\n cur.execute(query)\n _material = cur.fetchall()\n\n material = _material[0]\n\n tupla = {\n \"material\": [\n { \"clave\": material[0], \"nombre\": material[1], \"marca\": material[2], \"precio\": material[3], \"descripcion\": material[4],\n \"act\": material[7] }\n ]\n }\n\n return tupla\n\n# /registro-materiales\n@app.route('/materiales/registrar', methods=['GET', 'POST'])\ndef cargar_materiales_registro():\n if request.method == \"POST\":\n detalles = request.form\n _nombre = detalles['nombre']\n _marca = detalles['marca']\n _precio = detalles['precio']\n _actividad = detalles['actividad']\n _descripcion = detalles['descripcion']\n\n query = \"insert into material values (%s, %s, %s, %s, %s, %s, %s)\"\n values = (None, _nombre, _marca, _precio, _descripcion, \"Prestable\", _actividad)\n\n cur.execute(query, values)\n mydb.commit()\n\n print(\"INSERCION EXITOSA\")\n pass\n\n query = \"select cve_act, nom_act from actividad\"\n cur.execute(query)\n \n actividades = cur.fetchall()\n\n return render_template('materiales_registro.html', actividades = actividades)\n\n# /resurtido-materiales\n@app.route('/materiales/suministrar', methods=['GET', 'POST'])\ndef cargar_materiales_resurtir():\n if request.method == \"POST\":\n detalles = request.form\n _material = detalles['material']\n _proveedor = detalles['proveedor']\n _cantidad = detalles['cantidad']\n _ppu = detalles['ppu']\n _fechaent = detalles['fechaent']\n\n query = \"insert into entrada values (%s, %s, %s, %s, %s, %s)\"\n values = (None, _cantidad, _ppu, _fechaent, _material, _proveedor)\n\n cur.execute(query, values)\n mydb.commit()\n\n print(\"INSERCION EXITOSA\")\n pass\n\n query = \"select * from material m join actividad a on m.cve_act=a.cve_act\"\n cur.execute(query)\n materiales = cur.fetchall()\n\n query = \"select * from proveedor p join colonia c on p.cve_col=c.cve_col\"\n cur.execute(query)\n proveedores = cur.fetchall()\n\n query = \"select nombre_mat, cantidad_ent, preciounidad_ent, empresa_prov, fechaent_ent from material m join entrada e on m.cve_mat=e.cve_mat join proveedor p on e.cve_prov=p.cve_prov order by fechaent_ent limit 5\"\n cur.execute(query)\n listaMateriales = cur.fetchall()\n\n return render_template('materiales_resurtir.html', materiales = materiales, proveedores = proveedores, listaMateriales = listaMateriales)\n\n# --------------------- SOLICITUDES --------------------- ## --------------------- s_SOLICITUDES --------------------- #\n\n@app.route('/solicitudes/')\ndef cargar_solicitudes():\n query = \"select cve_sol, concat(nom_per, ' ', ap_per, ' ', am_per) from solicitud s join persona p on s.curp_per=p.curp_per where cve_sol not in (select cve_pres from prestamo)\"\n cur.execute(query)\n solicitudes = cur.fetchall()\n\n return render_template('solicitudes.html', solicitudes = solicitudes)\n\n@app.route('/solicitud', methods=['POST'])\ndef solicitud():\n data = request.form\n\n query = \"select cve_sol, concat(nom_per, ' ', ap_per, ' ', am_per), fecha_sol from solicitud s join persona p on s.curp_per=p.curp_per where cve_sol=\" + str(data['clave'])\n cur.execute(query)\n resultados = cur.fetchall()\n\n _solicitud = resultados[0]\n\n query = \"select nombre_mat, cantidad_rensolic from renglonsolicitud rs join material m on rs.cve_mat=m.cve_mat where cve_sol=\" + str(data['clave'])\n cur.execute(query)\n solicitudes = cur.fetchall()\n\n solicitudesDict = []\n\n for solicitud in solicitudes:\n solicitudDict = {\n \"material\": solicitud[0],\n \"cantidad\": solicitud[1]\n }\n solicitudesDict.append(solicitudDict)\n\n tupla = {\n \"solicitud\": [\n { \"clave\": _solicitud[0], \"solicitante\": _solicitud[1], \"fecha\": str(_solicitud[2]) }\n ],\n \"solicitudes\": solicitudesDict\n }\n\n return tupla\n\n@app.route('/solicitudes/solicitar', methods=['POST','GET'])\ndef cargar_materiales_solicitar():\n if request.method == \"POST\":\n detalles = request.form\n _curp = detalles['persona']\n print(detalles)\n\n cantidades = detalles.getlist('cantidadVal')\n materiales = detalles.getlist('materialVal')\n\n print(cantidades)\n print(materiales)\n\n query = \"insert into solicitud values(%s, curdate(), %s)\"\n values = (None, _curp)\n cur.execute(query, values)\n\n for i in range(len(materiales)):\n query = \"insert into renglonsolicitud values(null, %s, %s, (select max(cve_sol) from solicitud))\"\n values = (cantidades[i], materiales[i])\n cur.execute(query, values)\n\n mydb.commit()\n print(\"INSERCION EXITOSA\")\n return redirect(url_for('cargar_solicitudes'))\n\n # query = \"insert into prestamo values (%s, %s, %s, %s)\"\n\n # consulta = \"select p.curp_per from persona p join empleado e on p.curp_per = e.curp_per where cve_emp = %s and puesto = 'Docente' group by p.curp_per\" %_docente\n # cur.execute(consulta)\n # resultado = cur.fetchall()\n\n # for registro in resultado:\n # _curp = registro[0]\n\n # values = (None, _fechapres, _curp, _docente)\n # cur.execute(query, values)\n # mydb.commit()\n \n # query = \"insert into renglonprestamo values (%s, %s, %s, %s)\"\n # consulta = \"select max(cve_pres) from prestamo\"\n # cur.execute(consulta)\n # result = cur.fetchall()\n\n # for registro in result:\n # _prestamo = registro[0]\n\n # values = (None, _cantidad, _material, _prestamo)\n # cur.execute(query, values)\n # mydb.commit()\n\n # query = \"select e.cve_emp, concat(nom_per, ' ', ap_per, ' ', am_per) as nombre from persona p join empleado e where p.curp_per = e.curp_per and puesto='Docente'\"\n query = \" select p.curp_per, concat(nom_per, ' ', ap_per, ' ', am_per), concat(calle_per, ' ', orient_per, ' ', numero_per), tel_per, concat(puesto, '') from persona p join empleado e on p.curp_per=e.curp_per\"\n cur.execute(query)\n empleados = cur.fetchall()\n\n query = \"select p.curp_per, concat(nom_per, ' ', ap_per, ' ', am_per), concat(calle_per, ' ', orient_per, ' ', numero_per), tel_per from persona p join alumno a on p.curp_per=a.curp_per\"\n cur.execute(query)\n alumnos = cur.fetchall()\n \n # query = \"select e.cve_mat, nombre_mat from material m join entrada e where m.cve_mat = e.cve_mat and cantidad_ent >=1 group by nombre_mat\"\n query = \"select e.cve_mat, nombre_mat, marca_mat, nom_act from material m join entrada e join actividad a on m.cve_act=a.cve_act where m.cve_mat = e.cve_mat and cantidad_ent >=1 group by nombre_mat\"\n cur.execute(query)\n materiales = cur.fetchall()\n\n materialesAux = []\n for i in materiales:\n datos = []\n materialesAux.append(datos)\n for j in i:\n datos.append(j)\n\n print('materialNombre = ', i[1])\n cur.callproc('sp_materialdisponible', [i[1], ])\n # listaDisp = cur.stored_results()\n # disp = listaDisp[0].fetchall()\n # print(\"disp = \", disp)\n for resultado in cur.stored_results():\n listaDisp = resultado.fetchall()\n # print(\"disp = \", disp[0])\n for k in listaDisp[0]:\n datos.append(k)\n # datos.append(resultado.fetchall())\n\n print(\"materiales aux = \", materialesAux)\n\n return render_template('materiales_solicitar.html', empleados = empleados, alumnos = alumnos, materialesAux = materialesAux)\n\n# --------------------- PRESTAMOS --------------------- ## --------------------- s_PRESTAMOS --------------------- #\n\n@app.route('/prestamos/')\ndef cargar_prestamos():\n query = \"select cve_pres, concat(nom_per, ' ', ap_per, ' ', am_per) from prestamo pr join persona p on pr.curp_per=p.curp_per where cve_pres not in (select cve_devo from devolucion)\"\n cur.execute(query)\n prestamos = cur.fetchall()\n\n return render_template('prestamos.html', prestamos = prestamos)\n\n@app.route('/pSolicitud', methods=['POST'])\ndef pSolicitud():\n data = request.form\n\n query = \"select cve_rensolic, cantidad_rensolic, nombre_mat from renglonsolicitud rs join material m on rs.cve_mat=m.cve_mat where cve_sol=\" + str(data['clave'])\n cur.execute(query)\n solicitudes = cur.fetchall()\n\n solicitudesDict = []\n\n for solicitud in solicitudes:\n solicitudDict = {\n \"clave\": solicitud[0],\n \"cantidad\": solicitud[1],\n \"material\": solicitud[2]\n }\n solicitudesDict.append(solicitudDict)\n\n tupla = {\n \"solicitudes\": solicitudesDict\n }\n\n return tupla\n\n@app.route('/prestamos/prestar', methods=['GET', 'POST'])\ndef cargar_materiales_prestar():\n if request.method == \"POST\":\n detalles = request.form\n _solicitud = detalles['solicitud']\n\n query = \"select curp_per from solicitud where cve_sol=\" + str(_solicitud)\n cur.execute(query)\n resultados = cur.fetchall()\n\n solicitud = resultados[0]\n curp = solicitud[0]\n\n query = \"select * from renglonsolicitud where cve_sol=\" + str(_solicitud)\n cur.execute(query)\n materialSolicitado = cur.fetchall()\n\n # print(materialSolicitado)\n\n query = \"insert into prestamo values(%s, curdate(), %s)\"\n values = (_solicitud, curp)\n\n cur.execute(query, values)\n\n for material in materialSolicitado:\n query = \"insert into renglonprestamo values(%s, %s, %s, %s)\"\n values = (None, material[1], material[2], _solicitud)\n cur.execute(query, values)\n\n mydb.commit()\n print(\"INSERCION EXITOSA\")\n return redirect(url_for('cargar_prestamos'))\n\n query = \"select cve_sol, concat(fecha_sol, ''), concat(nom_per, ' ', ap_per, ' ', am_per), s.curp_per from solicitud s join persona p on s.curp_per=p.curp_per where cve_sol not in (select cve_pres from prestamo)\"\n cur.execute(query)\n solicitudes = cur.fetchall()\n\n return render_template('materiales_prestar.html', solicitudes = solicitudes)\n\n# --------------------- NOMINAS --------------------- ## --------------------- s_NOMINAS --------------------- #\n\n@app.route('/nominas/')\ndef cargar_nominas():\n query = \"select cve_retri, fecha_retri from retribucionempleado group by fecha_retri\"\n cur.execute(query)\n nominas = cur.fetchall()\n\n return render_template('nominas.html', nominas = nominas)\n\n@app.route('/nomina', methods=['POST'])\ndef nomina():\n cur.callproc('sp_pagoTrabajadores')\n\n for resultado in cur.stored_results():\n lista = resultado.fetchall()\n \n for i in lista:\n query = \"insert into retribucionempleado values(%s, now(), %s, %s)\"\n values = (None, i[0], i[2])\n cur.execute(query, values)\n\n mydb.commit()\n return \"OK\"\n\n@app.route('/test', methods=['GET', 'POST'])\ndef test():\n if request.method == \"POST\":\n # detalles = request.form\n # print(detalles)\n\n # dias = detalles.getlist('dia')\n # hsinicio = detalles.getlist('hinicio')\n # hsfinal = detalles.getlist('hfinal')\n\n # print(dias)\n # print(hsinicio)\n # print(hsfinal)\n\n # for i in range(len(dias)):\n # print(dias[i], \" - \", hsinicio[i], \" - \", hsfinal[i])\n\n query = \"insert into test values(null, (select max(cve_emp) from empleado), now())\"\n # values = (None, \"wea\")\n\n cur.execute(query)\n mydb.commit()\n\n return render_template('test.html')\n\n# --------------------- REPORTES --------------------- ## --------------------- s_REPORTES --------------------- #\n\n@app.route('/reportes/')\ndef cargar_reportes():\n query = \"select cve_repmat, nombre_mat from reportematerial rm join material m on rm.cve_mat=m.cve_mat\"\n cur.execute(query)\n materiales = cur.fetchall()\n\n return render_template('reportes.html', materiales = materiales)\n\n@app.route('/reporte', methods=['POST'])\ndef reporte():\n data = request.form\n\n query = \"select rm.*, nombre_mat from reportematerial rm join material m on rm.cve_mat=m.cve_mat where cve_repmat=\" + str(data['clave'])\n cur.execute(query)\n resultados = cur.fetchall()\n\n reporte = resultados[0]\n\n tupla = {\n \"reporte\": [\n { \"clave\": reporte[0], \"fecha\": str(reporte[1]), \"cantidad\": reporte[2], \"causa\": reporte[3], \"material\": reporte[5] }\n ]\n }\n\n return tupla\n\n@app.route('/reportes/registrar', methods=['GET', 'POST'])\ndef cargar_reportes_registrar():\n if request.method == \"POST\":\n detalles = request.form\n _fecha = detalles['fecha']\n _cantidad = detalles['cantidad']\n _causa = detalles['causa']\n _material = detalles['material']\n\n query = \"insert into reportematerial values(%s, %s, %s, %s, %s)\"\n values = (None, _fecha, _cantidad, _causa, _material)\n cur.execute(query, values)\n mydb.commit()\n print(\"INSERCION EXITOSA\")\n return redirect(url_for('cargar_reportes'))\n\n # query = \"select * from material m join actividad a on m.cve_act=a.cve_act\"\n query = \"select m.*, a.nom_act from material m join actividad a on m.cve_act=a.cve_act\"\n cur.execute(query)\n materiales = cur.fetchall()\n\n return render_template('reportes_registrar.html', materiales = materiales)","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":45195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"3157297","text":"#!/usr/bin/env python\n\nimport sys\nimport math\nimport statistics\nimport argparse\nfrom pcap_parser import *\n\n#### DONE\n# DONE 1. Relative (per-user) query length\n# DONE 2. Relative source query frequency\n# DONE 3. Relative target query frequency\n# DONE 4. Query resolution length (time)\n# DONE 8. Query target address\n# ADDED: Query target name (different from above since a name could map to different addresses)\n# DONE 7. Query diversity entropy\n# DONE 9. Query diversity stddev\n# DONE 10. Query diversity number of URI component differences\n\n#### Requires more than one PCAP file (into and out of a resolver)\n# 10. Resolution chain length (number of recursive queries)\n# 11. Resolution chain (domains in the chain itself)\n\ndef computeComponentDifferences(s1, s2):\n if len(s1) > len(s2):\n s1,s2 = s2,s1\n distances = range(len(s1) + 1)\n for index2, elem2 in enumerate(s2):\n elem2 = elem2.lower()\n newDistances = [index2 + 1]\n for index1, elem1 in enumerate(s1):\n elem1 = elem1.lower()\n if elem1 == elem2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1], distances[index1 + 1], newDistances[-1])))\n distances = newDistances\n return distances[-1]\n\ndef computeQueryDifferences(queries):\n differences = 0\n for firstIndex, v1 in enumerate(queries):\n for secondIndex, v2 in enumerate(queries):\n if firstIndex != secondIndex:\n query1 = v1.split(\".\")\n query2 = v2.split(\".\")\n\n diff = computeComponentDifferences(query1, query2)\n differences += diff\n return differences\n\ndef computeQueryEntropy(queries):\n prob = {}\n total = 0\n for query in queries:\n if query.name not in prob:\n prob[query.name] = 0\n total += 1\n prob[query.name] += 1\n\n # compute the entropy\n # H= -\\sum p(x) log p(x)\n acc = 0\n for name in prob:\n p = float(prob[name]) / float(total)\n logp = math.log(p)\n acc += (p * logp)\n entropy = acc * -1\n\n return entropy\n\ndef computeQueryFrequency(queries, window):\n return float(len(queries)) / float(window)\n\nclass WindowFeatureExtractor(object):\n def __init__(self, window, processingFunction):\n self.window = window\n self.processingFunction = processingFunction\n\n def process(self, packets):\n return self.processingFunction(queries)\n\nclass FeatureFormatter(object):\n ''' Class that formats lists of features for the output\n '''\n def __init__(self, features):\n self.features = features # list of tuples\n\n def toCSV(self, stream = None):\n lines = []\n for f in self.features:\n line = \",\".join(map(lambda x : str(x), f))\n if len(line) > 1:\n lines.append(line)\n if stream != None:\n stream.write(line)\n return \"\\n\".join(lines)\n\nclass FeatureExtractor(object):\n ''' Base class for all feature extractors.\n '''\n def __init__(self, packets, params = {}, outputPackets = None):\n self.packets = packets\n self.params = params\n self.outputPackets = outputPackets\n\n def getPacketsFromSourceInWindow(self, offset, src, window):\n packetsSent = []\n firstPacket = self.packets[offset]\n while offset < len(self.packets):\n packet = self.packets[offset]\n if packet.query != None and packet.query.srcAddress == src:\n packetsSent.append(packet.query)\n if packet.ts - firstPacket.ts > window:\n break\n offset += 1\n return packetsSent, offset\n\n def extract(self, index, params = {}):\n pass\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass TestFeatureExtractor(FeatureExtractor):\n ''' Template for new feature extractors\n '''\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index, params = {}):\n features = []\n sources = {}\n\n # for packet in self.packets:\n # pass\n\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass WindowedFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, windowExtractor):\n FeatureExtractor.__init__(self, packets)\n self.extractor = windowExtractor\n\n def extract(self, index):\n features = []\n sources = {}\n\n window = self.extractor.window\n\n i = index\n while i < len(self.packets) - 1:\n packet = self.packets[i]\n offset = i + 1\n\n if packet.query != None:\n src = packet.query.srcAddress\n packetsSent, offset = self.getPacketsFromSourceInWindow(offset, src, window)\n featureValue = self.extractor(queriesSent)\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], featureValue)\n features.append(feature)\n\n return features, sources\n\n i = offset\n\n return features, sources\n\n return features\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass QueryComponentDifferenceDiversityFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index):\n features = []\n sources = {}\n\n window = self.params[\"window\"]\n\n i = index\n while i < len(self.packets) - 1:\n packet = self.packets[i]\n offset = i + 1\n\n if packet.query != None:\n src = packet.query.srcAddress\n packetsSent, offset = self.getPacketsFromSourceInWindow(offset, src, window)\n\n differences = computeQueryDifferences(packetsSent)\n\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], differences)\n features.append(feature)\n\n return features, sources\n\n i = offset\n\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass QueryEntropyDiversityFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index):\n features = []\n sources = {}\n\n window = self.params[\"window\"]\n\n i = index\n while i < len(self.packets) - 1:\n packet = self.packets[i]\n offset = i + 1\n\n if packet.query != None:\n src = packet.query.srcAddress\n packetsSent, offset = self.getPacketsFromSourceInWindow(offset, src, window)\n\n entropy = computeQueryEntropy(packetsSent)\n\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], entropy)\n features.append(feature)\n\n return features, sources\n\n i = offset\n\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass TargetQueryFrequencyFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index):\n sources = {}\n features = []\n\n window = self.params[\"window\"]\n\n i = index\n while i < len(self.packets) - 1:\n packet = self.packets[i]\n offset = i + 1\n\n if packet.query != None:\n src = packet.query.srcAddress\n packetsSent, offset = self.getPacketsFromSourceInWindow(offset, src, window)\n\n frequency = computeQueryFrequency(packetsSent, window)\n\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], frequency)\n features.append(feature)\n\n return features, sources\n\n # Since we're concerned with target frequency, the window only\n # moves forward when the target query changes\n targetName = packet.query.name\n for index, packet in enumerate(packetsSent):\n if packet.query.name != targetName and index != 0:\n offset = i + index\n\n i = offset\n\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass QueryFrequencyFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index):\n features = []\n sources = {}\n\n window = self.params[\"window\"]\n\n i = index\n while i < len(self.packets) - 1:\n packet = self.packets[i]\n offset = i + 1\n\n if packet.query != None:\n src = packet.query.srcAddress\n packetsSent, offset = self.getPacketsFromSourceInWindow(offset, src, window)\n\n frequency = computeQueryFrequency(packetsSent, window)\n\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], frequency)\n features.append(feature)\n\n return features, sources\n\n i = offset\n\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass TargetAddressFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index):\n features = []\n sources = {}\n\n for packet in self.packets:\n for record in packet.records:\n src = record.srcAddress\n if record.targetAddress != None:\n target = record.targetAddress\n\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], target)\n\n features.append(feature)\n\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass TargetNameFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index):\n features = []\n sources = {}\n\n for packet in self.packets:\n if packet.query != None:\n src = packet.query.srcAddress\n target = packet.query.name\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], target)\n\n features.append(feature)\n\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass QueryResolutionTimeFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index, params = {}):\n features = []\n sources = {}\n\n packet = self.packets[index]\n\n # Match queries to responses, so only start searching from queries\n if packet.query != None:\n src = packet.query.srcAddress\n target = packet.query.name\n for response in self.packets[index:]:\n if len(response.records) > 0 and response.records[0].target == target:\n match = response.records[0]\n delta = response.ts - packet.ts\n if delta > 0:\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], delta)\n\n features.append(feature)\n\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\nclass QueryLengthFeatureExtractor(FeatureExtractor):\n def __init__(self, packets, params = {}):\n FeatureExtractor.__init__(self, packets, params)\n\n def extract(self, index):\n sources = {}\n features = []\n\n packet = self.packets[index]\n if packet.query != None:\n src = packet.query.srcAddress\n queryLength = len(packet.query.name)\n\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], queryLength)\n\n features.append(feature)\n if len(packet.records) > 0:\n src = packet.records[0].dstAddress\n queryLength = len(packet.records[0].target)\n\n if src not in sources:\n sources[src] = len(sources)\n feature = (sources[src], queryLength)\n\n features.append(feature)\n return features, sources\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\ndef join(featureSet):\n if len(featureSet) == 1:\n return featureSet[0]\n else:\n index = 0\n numFeatures = len(featureSet) - 1\n joinedFeatures = []\n for features in featureSet:\n\n # features = list of tuples == [(0,19),(0,19),...]\n\n for feature in features:\n entry = [feature[0]] # feature[0] is always the source -- could be wrapped up in a class\n for i in range(index):\n entry.append(0)\n entry.append(feature[1])\n for i in range(numFeatures - index):\n entry.append(0)\n joinedFeatures.append(entry)\n\n index += 1\n return joinedFeatures\n\ndef extract(dnsPackets, extractors):\n featureSet = []\n sourceSet = {}\n for index, packet in enumerate(dnsPackets):\n if isinstance(packet, ResourceRecord):\n continue\n\n sources = set()\n features = {}\n\n for eindex, extractor in enumerate(extractors):\n # Extract one feature set from the chain starting at the current packet\n single_features, single_sources = extractor.extract(index)\n\n if eindex not in features:\n features[eindex] = []\n\n # Add new sources to the main source list, if needed\n for source in single_sources:\n if source not in sourceSet:\n sourceSet[source] = len(sourceSet)\n\n # Re-build feature entries and add them to a list\n for feature in single_features:\n sourceId = feature[0]\n value = feature[1]\n for source in single_sources:\n if single_sources[source] == sourceId:\n adjustedSourceId = sourceSet[source]\n features[eindex].append([adjustedSourceId, value])\n else:\n source = packet.src\n if source not in sourceSet:\n sourceSet[source] = len(sourceSet)\n adjustedSourceId = sourceSet[source]\n features[eindex].append([adjustedSourceId, 0]) # null feature...\n\n # Merge each feature entry tuple\n merged_feature = [packet.src]\n for feature_index in features:\n for value_tuple in features[feature_index]:\n merged_feature.append(value_tuple[1])\n break # only use the first feature\n\n featureSet.append(merged_feature)\n\n # Format the feature using CSV (maybe later add more formatting options)\n formatter = FeatureFormatter(featureSet)\n formatter.toCSV()\n\n return featureSet\n\ndef main(args):\n filenames = args.file\n print >> sys.stderr, \"$> Parsing...\", filenames\n\n dnsPackets = []\n for filename in filenames:\n parser = PacketParser()\n fh = open(filename, 'r')\n packets = parser.parseDNS(fh)\n for packet in packets:\n dnsPackets.append(packet)\n\n # Initialize the extractors\n extractors = []\n for key in vars(args):\n val = vars(args)[key]\n\n if key == \"ql\" and val:\n extractors.append(QueryLengthFeatureExtractor(dnsPackets))\n elif key == \"qr\" and val:\n extractors.append(QueryResolutionTimeFeatureExtractor(dnsPackets))\n elif key == \"qf\" and val != None:\n extractors.append(QueryFrequencyFeatureExtractor(dnsPackets, params = {\"window\" : float(val)}))\n elif key == \"tf\" and val != None:\n extractors.append(TargetQueryFrequencyFeatureExtractor(dnsPackets, params = {\"window\" : float(val)}))\n elif key == \"tn\" and val:\n extractors.append(TargetNameFeatureExtractor(dnsPackets))\n elif key == \"ta\" and val:\n extractors.append(TargetAddressFeatureExtractor(dnsPackets))\n elif key == \"qd\" and val != None:\n extractors.append(QueryComponentDifferenceDiversityFeatureExtractor(dnsPackets, params = {\"window\" : float(val)}))\n elif key == \"qe\" and val != None:\n extractors.append(QueryEntropyDiversityFeatureExtractor(dnsPackets, params = {\"window\" : float(val)}))\n\n output = extract(dnsPackets, extractors)\n print >> sys.stdout, output\n print >> sys.stderr, \"$> Done. Parsed %d individual DNS packet(s)\" % len(dnsPackets)\n\nif __name__ == \"__main__\":\n desc = '''\nParse a PCAP file and extract a set of features for classification.\n'''\n\n parser = argparse.ArgumentParser(prog='feature_extractor', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)\n parser.add_argument('-f', '--file', action=\"store\", required=True, help=\"Relative path to PCAP file to parse\", nargs=\"+\")\n parser.add_argument('--ql', default=False, action=\"store_true\", help=\"Query length feature\")\n parser.add_argument('--qr', default=False, action=\"store_true\", help=\"Query resolution time feature\")\n parser.add_argument('--tn', default=False, action=\"store_true\", help=\"Query target name feature\")\n parser.add_argument('--qf', action=\"store\", help=\"Query frequency with parameterized window\")\n parser.add_argument('--tf', action=\"store\", help=\"Source target frequency with parameterized window\")\n parser.add_argument('--ta', action=\"store\", help=\"Query target address feature\")\n parser.add_argument('--qd', action=\"store\", help=\"Source query (single) component differences feature\")\n parser.add_argument('--qe', action=\"store\", help=\"Source query entropy feature\")\n\n args = parser.parse_args()\n\n if (len(sys.argv) == 1):\n parser.print_help()\n sys.exit(-1)\n\n main(args)\n","sub_path":"scripts/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":19264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"128249490","text":"'''# Point-to-Point Comparison Method of Linear Interpolation\n计算机控制系统中逐点比较法的直线插补程序。能够显示插补阶梯状直线与各个转折点坐标。'''\n\nimport turtle as t\n\n'''象限判断0'''\ndef space_judge0(a):\n if a > 0:\n ZF = 1\n else:\n ZF = -1\n return ZF\n\n'''象限判断1'''\ndef space_judge1(a):\n if a > 0:\n ZF = 1\n else:\n ZF = -1\n return ZF\n\n'''画布,目标曲线初始化'''\ndef Canvas_init():\n t.setup(width=800, height=800, startx=0, starty=100)\n '''坐标轴绘制'''\n t.pendown()\n t.pensize(3)\n t.goto(500,0)\n t.goto(490,10)\n t.goto(500,0)\n t.goto(490,-10)\n t.goto(500,0)\n t.goto(-500,0)\n t.goto(0,0)\n t.goto(0,300)\n t.goto(10,290)\n t.goto(0,300)\n t.goto(-10,290)\n t.goto(0,300)\n t.goto(0,-300)\n t.penup()\n t.goto(0,0)\n '''目标曲线绘制'''\n t.pendown()\n t.pensize(2)\n t.goto(XE, YE)\n t.penup()\n t.goto(0, 0)\n t.pendown()\n t.color('blue')\n t.speed(1)\n\n'''标点'''\ndef dot(x,y,z):\n t.color('red')\n t.right(90)\n t.penup()\n t.fd(z)\n t.left(90)\n t.pendown()\n t.circle(z)\n t.penup()\n t.goto(x,y)\n t.color('blue')\n t.pendown()\n\n'''输入以及各参数初始化'''\nXE = int(input('输入横坐标X:')) * 50\nYE = int(input('输入横坐标Y:')) * 50\n'''步进精度'''\nx_step = int(input('输入x步进量:'))*50\ny_step = int(input('输入y步进量:'))*50\nNXY =int(abs(XE/50)+abs(YE/50)*(x_step+y_step))\nNXY_store = NXY\nFM=0\nX=0\nY=0\n\nCanvas_init()\n\n'''逐点比较法'''\nwhile (NXY != 0 ):\n if FM >= 0:\n ZF = space_judge0(XE)\n X = X + ZF * x_step\n t.goto(X,Y)\n dot(X,Y,int(x_step)/10)\n if Y/X == YE/XE:\n FM = 0\n else:\n FM = (Y * XE) - abs(X * YE)\n print(int(NXY_store+1-NXY), ':(', X/50, ',', Y/50, ')')\n else:\n ZF = space_judge1(YE)\n Y = Y + ZF * y_step\n t.goto(X,Y)\n dot(X,Y,int(y_step)/10)\n if Y/X == YE/XE:\n FM = 0\n else:\n FM = abs(Y * XE) - (X * YE)\n print(int(NXY_store+1-NXY), ':(', X/50, ',', Y/50, ')')\n NXY = NXY - 1\n if X >= XE and Y >= YE:\n break\n\n'''保留图片窗口'''\nt.done() \n\n\n\n","sub_path":"Linear_inter.py","file_name":"Linear_inter.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"32765891","text":"# Class: Transfer_window\n# Description: Pop-up window for transfering assets to another account\n# Author: Daniel Gripenstedt \nfrom tkinter import Toplevel, StringVar, Label, Button, Entry \nfrom bank_system import Bank_system\nfrom Assets_window import Assets_window\n\nclass Transfer_window:\n #Build transfer window\n def start_transfer_window(self, home_screen, account_id, username):\n self.account_id = account_id\n self.transfer_window = Toplevel(home_screen)\n self.transfer_window.geometry('480x160')\n self.transfer_window.title('Transfer [{}]'.format(username))\n\n self.temp_username = StringVar()\n self.temp_amount = StringVar()\n \n #Labels\n detail_label = Label(self.transfer_window, text = 'Please enter a username and the amount', font=('Calibri',20))\n detail_label.grid(row=0, column=0, sticky='N')\n\n username_lable = Label(self.transfer_window, text = 'username', font=('Calibri',16))\n username_lable.grid(row=1, column=0, sticky='W')\n\n amount_lable = Label(self.transfer_window, text = 'Amount', font=('Calibri',16))\n amount_lable.grid(row=2, column=0, sticky='W')\n\n #hidden label\n self.hidden_label = Label(self.transfer_window, font=('Calibri',12))\n self.hidden_label.grid(row=5, sticky='N')\n\n #Error label\n self.error_label = Label(self.transfer_window, font=('Calibri',12))\n self.error_label.grid(row=4, sticky='N')\n\n #Entries\n self.username_entry = Entry(self.transfer_window, textvariable=self.temp_username)\n self.username_entry.grid(row=1, column=0)\n\n self.amount_entry = Entry(self.transfer_window, textvariable=self.temp_amount)\n self.amount_entry.grid(row=2, column=0)\n\n #Buttons\n ok_button = Button(self.transfer_window, text='Ok', font=('Calibri', 16), width=10,command=lambda: self.start_transfer())\n ok_button.grid(row=3, column=0, sticky='E')\n\n go_back_button = Button(self.transfer_window, text='Go back', font=('Calibri', 16), width=10,command=lambda: self.transfer_window.destroy())\n go_back_button.grid(row=3, column=0, sticky='W', padx=5)\n #Note to self, add function to close all windows on logout\n\n def start_transfer(self):\n username = self.temp_username.get()\n amount = self.temp_amount.get()\n if(self.check_user_input(username, amount)):\n self.finish_transfer(username, amount)\n\n \n def finish_transfer(self, username, amount):\n target_id = Bank_system().get_target_account_id(username)\n Bank_system().transfer_assets(self.account_id, target_id, amount)\n self.username_entry.delete(0, 'end')\n self.amount_entry.delete(0, 'end')\n self.transfer_window.geometry('480x160')\n self.error_label.config(fg='green',text='The transfer was successfully made')\n #Note to self, add function to check if the deposit was succesful\n \n def check_user_input(self, username, amount):\n lower_bound = '180'\n horozontal_bound = '480'\n if((\" \" in username) or (\" \" in amount)):\n print('Spaces are not allowed')\n self.transfer_window.geometry('{}x{}'.format(horozontal_bound, lower_bound))\n self.error_label.config(fg='red',text='Spaces are not allowed')\n return False\n elif(username == '' and amount == ''):\n self.transfer_window.geometry('{}x{}'.format(horozontal_bound, lower_bound))\n self.error_label.config(fg='red',text='Please enter a username and the amount')\n return False\n elif(username == ''):\n self.transfer_window.geometry('{}x{}'.format(horozontal_bound, lower_bound))\n self.error_label.config(fg='red',text='Please enter a username')\n return False\n elif(username == '' and amount == ''):\n self.transfer_window.geometry('{}x{}'.format(horozontal_bound, lower_bound))\n self.error_label.config(fg='red',text='Please enter the amount')\n return False\n elif(not(amount.isdecimal())):\n self.transfer_window.geometry('{}x{}'.format(horozontal_bound, lower_bound))\n self.error_label.config(fg='red',text='The ammount has to be given in numbers')\n return False\n elif (Bank_system().check_target_user(username)):\n return True\n else:\n self.transfer_window.geometry('{}x{}'.format(horozontal_bound, lower_bound))\n self.error_label.config(fg='red',text='The user does not exist')\n return False\n ","sub_path":"Transfer_window.py","file_name":"Transfer_window.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"375656821","text":"# BSD 3-Clause License\n#\n# Copyright (c) 2020, Boris FX\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport shiboken2\nfrom PySide2.QtCore import *\nfrom PySide2.QtGui import *\nfrom random import *\nfrom mocha.project import get_current_project\n\n\nclass ShadeMattesByOrder():\n def __init__(self, parent=None):\n self.proj = get_current_project()\n\n def do_shading(self):\n if not self.proj:\n msg = QMessageBox(self)\n msg.setText(\"No project open\")\n msg.exec_()\n layers = self.proj.layers\n if not layers:\n msg = QMessageBox(self)\n msg.setText(\"No layers in project\")\n msg.exec_()\n\n divider = 1.0 / len(layers)\n for idx, layer in enumerate(layers):\n shade = 1.0 - ((idx + 1) * divider)\n layer.matte_color = (shade, shade, shade)\n\n\nif __name__ == \"__main__\":\n color = ShadeMattesByOrder()\n color.do_shading()\n","sub_path":"shade_layers_by_order.py","file_name":"shade_layers_by_order.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"101075987","text":"# usage:\n# dataframe_filename\n\nimport sys\nimport os\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keyname import keyname as kn\nfrom fileshash import fileshash as fsh\n\nmatplotlib.rcParams['pdf.fonttype'] = 42\nsns.set(style='whitegrid')\n\nnon_url_safe = ['\"', '#', '$', '%', '&', '+',\n ',', '/', ':', ';', '=', '?',\n '@', '[', '\\\\', ']', '^', '`',\n '{', '|', '}', '~', \"'\"]\n\ndef slugify(text):\n \"\"\"\n Turn the text content of a header into a slug for use in an ID\n \"\"\"\n non_safe = [c for c in text if c in non_url_safe]\n if non_safe:\n for c in non_safe:\n text = text.replace(c, '')\n # Strip leading, trailing and multiple whitespace, convert remaining whitespace to _\n text = u'_'.join(text.split())\n return text\n\ndataframe_filename = sys.argv[1]\n\ndf = pd.read_csv(dataframe_filename)\n\nprint(\"Data loaded!\")\n\ndf['Treatment'] = df['Treatment'].apply(lambda raw : {\n 'resource-even__channelsense-no__nlev-two__mute' : 'Blind',\n 'resource-even__channelsense-no__nlev-two__mute__mixed' : 'Mixed',\n 'resource-even__channelsense-yes__nlev-onebig' : 'Flat-Even',\n 'resource-even__channelsense-yes__nlev-two' : 'Nested-Even',\n 'resource-wave__channelsense-yes__nlev-onebig' : 'Flat-Wave',\n 'resource-wave__channelsense-yes__nlev-two' : 'Nested-Wave',\n }[raw]\n)\n\ndf['Relationship Category'] = df.apply(\n lambda x: (\n 'Neighbor' if 'Neighbor' in x['Relationship']\n else 'Channel' if 'mate' in x['Relationship']\n else 'Cell' if 'Cell' in x['Relationship']\n else 'Propagule' if 'Propagule' in x['Relationship']\n else 'Unknown'\n ),\n axis=1\n)\n\nprint(\"Data crunched!\")\nfor measure in df['Measure'].unique():\n\n plt.clf()\n\n g = sns.FacetGrid(\n df.pivot_table(\n index=[\n 'First Update', 'Last Update', 'Relationship',\n 'Relationship Category', 'Seed', 'Treatment'\n ],\n columns='Measure',\n values='Value',\n aggfunc='first'\n ).reset_index(),\n col=\"Treatment\",\n hue=\"Relationship Category\",\n col_wrap=4,\n )\n\n g.map(\n sns.barplot,\n \"Relationship\",\n measure,\n order=[\n 'Neighbor',\n 'Related Neighbor',\n 'Unrelated Neighbor',\n ] + sorted([\n r for r in df['Relationship'].unique() if 'Channelmate' in r\n ]) + [\n 'Nonchannelmate',\n 'Cell Child',\n 'Cell Parent',\n 'Propagule Child',\n 'Propagule Parent',\n ],\n )\n\n for ax in g.axes.flat:\n for label in ax.get_xticklabels():\n label.set_rotation(-90)\n\n outfile = kn.pack({\n 'title' : slugify(measure),\n '_data_hathash_hash' : fsh.FilesHash().hash_files([dataframe_filename]),\n '_script_fullcat_hash' : fsh.FilesHash(\n file_parcel=\"full_parcel\",\n files_join=\"cat_join\"\n ).hash_files([sys.argv[0]]),\n '_source_hash' :kn.unpack(dataframe_filename)['_source_hash'],\n 'ext' : \".pdf\"\n })\n\n plt.gcf().savefig(\n outfile,\n transparent=True,\n bbox_inches='tight',\n pad_inches=0\n )\n\n print(\"Output saved to\", outfile)\n","sub_path":"old/script/ResourceContributedPlot.py","file_name":"ResourceContributedPlot.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"546753911","text":"from copy import deepcopy\nfrom hashlib import sha256\nfrom random import randint\nfrom .date import randomFutureDate\nfrom starkbank import UtilityPayment\n\n\nexample_payment = UtilityPayment(\n bar_code=\"83660000001084301380074119002551100010601813\",\n scheduled=\"2020-03-29\",\n description=\"pagando a conta\"\n)\n\n\ndef generateExampleUtilityPaymentsJson(n=1, amount=None):\n payments = []\n for _ in range(n):\n bar_code = \"83660000001084301380074119002551100010601813\"\n payment = deepcopy(example_payment)\n payment.bar_code = bar_code[:4] + str(randint(100, 100000)).zfill(11) + bar_code[15:]\n payment.scheduled = str(randomFutureDate(days=7).date())\n payment.tags = [sha256(str(amount).encode('utf-8')).hexdigest()]\n payments.append(deepcopy(payment))\n return payments\n","sub_path":"tests/utils/utilityPayment.py","file_name":"utilityPayment.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"313759712","text":"# MVC - model - view - controller\n# Это файл с моделями\n\nimport mongoengine as me\nme.connect(\"REST_API_TEST\")\n\n\nclass User(me.Document):\n # объявляем поля коллекций\n first_name = me.StringField(min_length=1, max_length=100, required=True)\n last_name = me.StringField(min_length=1, max_length=100)\n interests = me.ListField()\n age = me.IntField(min_value=12, max_value=99)\n created_at = me.DateTimeField()\n\n\nif __name__ == '__main__':\n user = User(first_name='Nikolai',\n last_name='Sviridov',\n interests=['mma', 'programming'],\n age=29)\n user.save()\n user = User(first_name='Maria',\n last_name='Sviridova',\n interests=['cooking', 'programming'],\n age=31)\n user.save()\n","sub_path":"ITEA/lesson_10/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"119732892","text":"import config\n\nimport base64, jupyter_client, os\n\nfrom io import BytesIO\nfrom threading import Timer\n\nfrom Listener import Listener\n\ndef start_cb(update, context):\n tgid = update.message.from_user.id\n kernel = config.kernel\n if tgid in config.kernel_dict:\n update.message.reply_text('Kernel already started')\n elif config.num_kernels >=50:\n update.message.reply_text('Too many users, please come back later!')\n else:\n config.num_kernels += 1\n update.message.reply_text('Starting kernel...')\n wd = '/home/jovyan/work/' + str(tgid)\n os.makedirs(wd, exist_ok=True)\n if kernel=='python':\n pass\n elif kernel == 'octave':\n pkgd = wd + '/octave_packages'\n os.makedirs(pkgd, exist_ok=True)\n\n rwd = wd\n\n km = jupyter_client.KernelManager(kernel_name = config.kernel)\n km.start_kernel(cwd=rwd)\n cl = km.blocking_client()\n _init_commands(cl, rwd, kernel)\n t = Timer(config.timer_value, stop_kernel, [tgid])\n t.start()\n config.kernel_dict[tgid] = (km, cl, t, kernel)\n update.message.reply_text(kernel + ' is ready!')\n \ndef _init_commands(cl, wd, kernel):\n if kernel == 'python':\n cl.execute_interactive(\"%matplotlib inline\")\n elif kernel == 'ir':\n cl.execute_interactive(\"library(ggplot2)\")\n elif kernel == 'octave':\n pkgd = 'octave_packages'\n cl.execute_interactive(\"pkg prefix %s %s\" % (pkgd, pkgd))\n cl.execute_interactive(\"pkg local_list %s/.octave_packages\" % pkgd)\n \ndef restart_cb(update, context):\n tgid = update.message.from_user.id\n kernel = config.kernel\n if tgid in config.kernel_dict:\n update.message.reply_text('Stopping kernel...')\n stop_kernel(tgid)\n start_cb(update, context)\n\ndef stop_kernel(tgid):\n (km, cl, t, kernel) = config.kernel_dict[tgid]\n t.cancel()\n km.shutdown_kernel()\n config.kernel_dict.pop(tgid, None)\n \ndef help_cb(update, context):\n tgid = update.message.from_user.id\n (km, cl, t, kernel) = config.kernel_dict[tgid]\n if kernel == 'python':\n s = 'Python Help\\n'\n s += 'https://www.python.org/about/help/'\n elif kernel == 'octave':\n s = 'Octave Help\\n'\n s += 'https://www.gnu.org/software/octave/support.html'\n else:\n s = 'No help available for this kernel yet'\n update.message.reply_text(s)\n\ndef error_cb(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n config.logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n \ndef text_handler(update, context):\n tgid = update.message.from_user.id\n if not tgid in config.kernel_dict:\n update.message.reply_text('Kernel not running, please use command /start')\n else:\n (km, cl, t, kernel) = config.kernel_dict[tgid]\n if not km.is_alive():\n update.message.reply_text('Kernel not running, please use command /restart')\n else:\n t.cancel()\n t = Timer(config.timer_value, stop_kernel, [tgid])\n t.start()\n config.kernel_dict[tgid] = (km, cl, t, kernel)\n li = Listener(kernel)\n try:\n timeout = 5.0\n if kernel == 'octave' and update.message.text[:11] == 'pkg install':\n timeout = 60.0\n reply = cl.execute_interactive(update.message.text, allow_stdin=False, \n timeout=timeout, output_hook=li.output_cb)\n except TimeoutError:\n context.bot.send_message( chat_id=update.message.chat_id, text='Timeout waiting for reply' )\n if li.text:\n text = li.escape_ansi_text()\n if len(text) <= 4096:\n context.bot.send_message( chat_id=update.message.chat_id, text=text )\n else:\n context.bot.send_message( chat_id=update.message.chat_id, text=text[:4092]+'\\n...' )\n if li.img_data:\n image = base64.b64decode(li.img_data)\n bio = BytesIO()\n bio.name = 'image.png'\n bio.write(image)\n bio.seek(0)\n context.bot.send_photo(chat_id=update.message.chat_id, photo=bio)\n\ndef signal_handler(signum, frame):\n print('Stopping kernels...')\n for tgid in config.kernel_dict:\n print(tgid)\n (km, cl, t, kernel) = config.kernel_dict[tgid]\n km.shutdown_kernel()\n print('Done.')\n","sub_path":"callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"79115926","text":"# -*- coding: utf-8 -*-\nimport configparser\nimport sys\nimport scrapy\nimport logging\nimport os\nimport json\nimport uuid\nimport inspect\nconfig=configparser.ConfigParser()\nconfig.read(\"webExtract.ini\")\nsys.path.append(config[\"ChoprasWebExtract\"][\"custompythonpath\"])\n\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom urllib.parse import urlparse\nfrom AutoCourseInfo_Export.ExportData import exportCrawledLink\nfrom scrapy import settings\n\n\nclass UtorontoSpider(scrapy.Spider):\n \n \n name = \"uOfToronto\" \n universityParentSite= \"https://www.utoronto.ca\"\n allowed_domains = [\"www.utoronto.ca\"]\n start_urls = [\"https://www.utoronto.ca/\"]\n secondStageCrawledLinks=[] \n \n \n \n stageCount=0\n \n \n ParentID=uuid.uuid4()\n ChildID=None\n\n \n \n\n def parse(self, response):\n self.createNestedRelation(response)\n print(\"Started Parsing\"+self.universityParentSite)\n regExp=\"/future-students\"\n futureStudentsArray=response.xpath('//a[contains(@href,\"'+regExp+'\")]/@href').extract()\n self.ChildID=uuid.uuid4()\n for url in futureStudentsArray:\n if \"http\" not in url:\n exportCrawledLink.exportCrawledLinkToFile(self,\"UOfToronto.txt\",self.universityParentSite+url)\n yield scrapy.Request(self.universityParentSite+url,callback=self.parseUOfTorontoSecondStageCrawling,meta={'info':{\"methodName\":inspect.stack()[0][3],\"Level\":self.stageCount,\"Parent\":response.request.url,\"ParentID\":self.ChildID,\"Child\":self.universityParentSite+url,\"regExp\":regExp}})\n else: \n exportCrawledLink.exportCrawledLinkToFile(self,\"UOfToronto.txt\",url)\n yield scrapy.Request(url,callback=self.parseUOfTorontoSecondStageCrawling,meta={'info':{\"methodName\":inspect.stack()[0][3],\"Level\":self.stageCount,\"Parent\":response.request.url,\"ParentID\":self.ChildID,\"Child\":url,\"regExp\":regExp}})\n\n\n def parseUOfTorontoSecondStageCrawling(self,response):\n \n self.createNestedRelation(response) \n regExp=\"/academics\"\n for url in response.xpath('//a[contains(@href,\"/academics\")]/@href').extract():\n if \"http\" not in url:\n \n exportCrawledLink.exportCrawledLinkToFile(self,\"UOfToronto.txt\",self.universityParentSite+url)\n yield scrapy.Request(self.universityParentSite+url,callback=self.parseUOfTorontoThirdStageCrawling,meta={'info':{\"methodName\":inspect.stack()[0][3],\"Level\":self.stageCount,\"Parent\":response.request.url,\"ParentID\":self.ChildID,\"Child\":self.universityParentSite+url,\"regExp\":regExp}})\n \n else:\n exportCrawledLink.exportCrawledLinkToFile(self,\"UOfToronto.txt\",url)\n yield scrapy.Request(url,callback=self.parseUOfTorontoThirdStageCrawling,meta={'info':{\"methodName\":inspect.stack()[0][3],\"Level\":self.stageCount,\"Parent\":response.request.url,\"ParentID\":self.ChildID,\"Child\":url,\"regExp\":regExp}})\n\n \n\n\n def parseUOfTorontoThirdStageCrawling(self,response):\n \n self.createNestedRelation(response)\n academicsRegExp='//a[contains(@href,\"/academics\")]/@href'\n undergradRegExp='//a[contains(@href,\"/progs\")]/@href'\n gradRegExp='//a[contains(@href,\"/Pages/Programs\")]/@href'\n\n academicsExtract=response.xpath(academicsRegExp).extract()\n gradExtract=response.xpath(gradRegExp).extract() \n undergradExtract=response.xpath(undergradRegExp).extract()\n \n for url in academicsExtract:\n if \"http\" not in url:\n exportCrawledLink.exportCrawledLinkToFile(self,\"UOfToronto.txt\",self.universityParentSite+url) \n yield scrapy.Request(self.universityParentSite+url,callback=self.parseUOfTorontoThirdStageCrawling,meta={'info':{\"methodName\":inspect.stack()[0][3],\"Level\":self.stageCount,\"Parent\":response.request.url,\"ParentID\":self.ChildID,\"Child\":self.universityParentSite+url,\"regExp\":academicsRegExp}}) \n else:\n exportCrawledLink.exportCrawledLinkToFile(self,\"UOfToronto.txt\",url) \n yield scrapy.Request(url,callback=self.parseUOfTorontoThirdStageCrawling,meta={'info':{\"methodName\":inspect.stack()[0][3],\"Level\":self.stageCount,\"Parent\":response.request.url,\"ParentID\":self.ChildID,\"Child\":url,\"regExp\":academicsRegExp}}) \n \n \n for url in undergradExtract:\n exportCrawledLink.exportCrawledLinkToFile(self,\"UOfToronto.txt\",url)\n yield scrapy.Request(url,callback=self.parseUOfTorontoThirdStageCrawling,dont_filter=True,meta={'info':{\"methodName\":inspect.stack()[0][3],\"Level\":self.stageCount,\"Parent\":response.request.url,\"ParentID\":self.ChildID,\"Child\":url,\"regExp\":undergradRegExp}}) \n \n for url in gradExtract:\n exportCrawledLink.exportCrawledLinkToFile(self,\"UOfToronto.txt\",url)\n yield scrapy.Request(url,callback=self.parseUOfTorontoThirdStageCrawling,dont_filter=True,meta={'info':{\"methodName\":inspect.stack()[0][3],\"Level\":self.stageCount,\"Parent\":response.request.url,\"ParentID\":self.ChildID,\"Child\":url,\"regExp\":gradRegExp}}) \n \n \n\n \"\"\" def scrapTheDataFromCrawledLink(self,url):\n html=urlopen(url)\n res=BeautifulSoup(html.read(),\"html5lib\")\n tags=res.findAll(\"td\")\n for table in res.findAll('table'):\n for tr in table.findAll('tr')[1:]:\n exportCrawledLink.exportBeautifulSoupData(self,tr.get_text(),os.path.split((urlparse(url).path))[1]+\".txt\",url)\n #for tag in tags:\n # if tag.get_text().find(\"Application deadlines\"):\n #self.log(os.path.split((urlparse(url).path))[1]) \"\"\"\n \n def scrapTheDataFromCrawledLink(self,response,ParentID,ChildID,ParentURL,ChildURL):\n exportCrawledLink.exportCrawledLinkToDatabase(self,str(ParentID),str(ChildID),response.body,ParentURL,ChildURL,1) \n\n\n def createNestedRelation(self,response):\n #build relation\n if(self.stageCount==0):\n self.scrapTheDataFromCrawledLink(response,self.ParentID,self.ChildID,self.universityParentSite,None)\n self.stageCount=+1 \n else:\n if (self.stageCount==1):\n self.scrapTheDataFromCrawledLink(response,self.ParentID,self.ChildID,response.meta[\"info\"][\"Parent\"],response.meta[\"info\"][\"Child\"])\n self.stageCount=self.stageCount+1\n else:\n self.ParentID=response.meta[\"info\"][\"ParentID\"]\n self.ChildID=uuid.uuid4()\n self.scrapTheDataFromCrawledLink(response,self.ParentID,self.ChildID,response.meta[\"info\"][\"Parent\"],response.meta[\"info\"][\"Child\"]) \n\n \n \n \n \n \n \n\n","sub_path":"AutoCourseInfo_Scrapy/CourseInfoExtract/CourseInfoExtract/spiders/uToronto.py","file_name":"uToronto.py","file_ext":"py","file_size_in_byte":7133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"621512198","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nfrom books.models import Author, Book, Review\nfrom itertools import chain\n\n\nauthors = Author.objects.all()\n\n\n\ndef index(request):\n\n\t\n\n\tobj12 = Book.objects.all()\n\toctopuses = []\n\n\tx = 0\n\ty = 8\n\n\tflag = 0\n\n\twhile flag == 0:\n\n\t\tif y > len(obj12):\n\t\t\ty = len(obj12)\n\t\t\tflag = 1\n\t\toctopuses.append(obj12[x:y])\n\t\tx = y\n\t\ty +=8\n\n\n\tprint('!!', octopuses)\n\n\t\n\n\treturn render(request, 'home/index.html', {'obj12': obj12, 'octopuses':octopuses, 'authors':authors})\n\n\n\n\n\ndef about(request):\n\t\n\n\treturn render(request, 'home/about.html', {})\n\n\n\n\n\n\ndef search(request):\n\n\tquery = request.GET.get('q') #здесь мы передаем name=\"q\" из нашего темплейта с \n\n\n\tbooks = Book.objects.filter(title__search=query)\n\n\n\tsearch_results = books\n\n\n\n\treturn render(request, 'home/search_results.html', {'search_results': search_results, 'authors':authors})\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"204458431","text":"# -*- coding: utf-8 -*-\n\nimport chainer\nfrom chainer import cuda, Variable\nimport chainer.functions as F\nimport chainer.links as L\nimport numpy as np\n\nimport utils\n\n\nclass MNLM(chainer.Chain):\n \"\"\"\n sentence: LSTM\n image: pre-trained VGG features + linear\n \"\"\"\n\n def __init__(self, vocab_size, word_dim, image_dim, hid_dim, initialW):\n self.vocab_size = vocab_size\n self.word_dim = word_dim\n self.image_dim = image_dim\n self.hid_dim = hid_dim\n \n super(MNLM, self).__init__(\n embed=L.EmbedID(self.vocab_size, self.word_dim,\n ignore_label=-1, initialW=initialW),\n W_rnn=L.Linear(self.word_dim, 4 * self.hid_dim),\n U_rnn=L.Linear(self.hid_dim, 4 * self.hid_dim, nobias=True),\n \n W_cnn=L.Linear(self.image_dim, self.hid_dim),)\n\n # self.U_rnn.W.data[:, 0*self.hid_dim:1*self.hid_dim] = self.init_ortho(self.hid_dim)\n # self.U_rnn.W.data[:, 1*self.hid_dim:2*self.hid_dim] = self.init_ortho(self.hid_dim)\n # self.U_rnn.W.data[:, 2*self.hid_dim:3*self.hid_dim] = self.init_ortho(self.hid_dim)\n # self.U_rnn.W.data[:, 3*self.hid_dim:4*self.hid_dim] = self.init_ortho(self.hid_dim)\n\n def init_ortho(self, dim):\n A = np.random.randn(dim, dim)\n U, S, V = np.linalg.svd(A)\n return U.astype(np.float32)\n\n def forward_sentences(self, sents, train):\n xs, ms = utils.padding(sents, head=False, with_mask=True)\n xs = utils.convert_ndarray_to_variable(xs, seq=True, train=train)\n ms = utils.convert_ndarray_to_variable(ms, seq=True, train=train)\n hs = self.encode_sentences(xs, ms, train=train)\n # XXX: last state\n h = hs[-1]\n # XXX: pooling over whole states\n # h = F.concat(hs, axis=0)\n # h = F.reshape(h, (len(hs), hs[0].data.shape[0], hs[0].data.shape[1]))\n # h = F.max(h, axis=0)\n return F.normalize(h)\n\n def forward_images(self, images, train):\n xs = Variable(cuda.cupy.asarray(images, dtype=np.float32), volatile=not train)\n h = self.encode_images(xs, train=train)\n return h\n\n def encode_sentences(self, xs, ms, train):\n batch_size = xs[0].data.shape[0]\n state = {\n \"h\": Variable(cuda.cupy.zeros((batch_size, self.hid_dim), dtype=np.float32),\n volatile=not train),\n \"c\": Variable(cuda.cupy.zeros((batch_size, self.hid_dim), dtype=np.float32),\n volatile=not train)}\n hs = []\n for x, m in zip(xs, ms):\n state = self.encode_sentences_one_step(x, state, train=train)\n for key in state:\n state[key] = state[key] * F.broadcast_to(F.reshape(m, (-1,1)), state[key].data.shape)\n hs.append(state[\"h\"])\n return hs\n\n def encode_sentences_one_step(self, x, state, train):\n w = self.embed(x)\n h_in = self.W_rnn(F.dropout(w, ratio=0.2, train=train)) + self.U_rnn(state[\"h\"])\n c, h = F.lstm(state[\"c\"], h_in)\n state = {\"h\": h, \"c\": c}\n return state\n\n def encode_images(self, x, train):\n h = self.W_cnn(F.dropout(x, ratio=0.2, train=train))\n return F.normalize(h)\n\n","sub_path":"models/MNLM.py","file_name":"MNLM.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"306479395","text":"# Given a sorted array\n# We need to find if the element exists in the array or not\n# return 1 if exists or -1 if not\n\n\ndef binarySearch(A, x):\n n = len(A)\n start = 0\n end = n - 1\n while(start <= end):\n mid = (start + end) // 2\n if A[mid] == x:\n return mid\n elif A[mid] > x:\n end = mid - 1\n else:\n start = mid + 1\n return -1\n\n\nr = binarySearch([0, 2, 4, 5, 6, 7, 8, 9, 10, 15, 17], 8)\nprint(r)\n","sub_path":"Binary-Search/binary-search.py","file_name":"binary-search.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"10251471","text":"# -*- coding: utf-8 -*-\n'''\nЗадание 6.1a\n\nСделать копию скрипта задания 6.1.\n\nДополнить скрипт:\n- Добавить проверку введенного IP-адреса.\n- Адрес считается корректно заданным, если он:\n - состоит из 4 чисел разделенных точкой,\n - каждое число в диапазоне от 0 до 255.\n\nЕсли адрес задан неправильно, выводить сообщение:\n'Incorrect IPv4 address'\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\n'''\naddress_correct = False\n\nwhile not address_correct:\n ip_address = input('Please set the IP address: ')\n\n if len(ip_address.split('.')) == 4:\n for item in ip_address.split('.'):\n if item.isdigit() and int(item) in range(0,255):\n address_correct = True\n else:\n address_correct = False\n break\n else:\n address_correct = False\n\n if not address_correct:\n print('Addreee NOK')\n\nclass_a = (1, 127)\nclass_b = (128, 191)\nclass_c = (192, 223)\nclass_d = (224, 239)\n\nif int(ip_address.split('.')[0]) in class_a \\\nor int(ip_address.split('.')[0]) in class_b \\\nor int(ip_address.split('.')[0]) in class_c:\n print('IP address {} belongs to Unicast'.format(ip_address))\nelif int(ip_address.split('.')[0]) in class_d:\n print('IP address {} belongs to Multicast'.format(ip_address))\nelif ip_address == '255.255.255.255':\n print('IP address {} is Local broadcast'.format(ip_address))\nelif ip_address == '0.0.0.0':\n print('IP address {} is Local unassigned'.format(ip_address))\nelse:\n print('IP address {} is unused'.format(ip_address))\n\n","sub_path":"exercises/06_control_structures/task_6_1aa.py","file_name":"task_6_1aa.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"321530005","text":"import sys\nimport os\n\nimport numpy as np\nimport argparse\n\nimport keras\n\n# tf version 1.15.0-rc3\nimport tensorflow as tf\n\nimport cv2\nimport csv\nimport geoio\n\nimport gdal\nfrom gdalconst import *\nfrom osgeo import gdal_array, osr\n\nfrom snappy import ProductIO, PixelPos, GeoPos\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin # noqa: F401\n __package__ = \"keras_retinanet.bin\"\n\nfrom tqdm import tqdm\n\nfrom .. import models\nfrom ..utils.config import read_config_file, parse_anchor_parameters\nfrom ..utils.image import read_image_bgr, to_bgr, preprocess_image, resize_image\nfrom ..utils.geo import *\n\nTRAINING_MIN_SIZE = 800\nTRAINING_MAX_SIZE = 1333\n\ndef get_session():\n\t\"\"\" Construct a modified tf session.\n\t\"\"\"\n\tconfig = tf.ConfigProto()\n\tconfig.gpu_options.allow_growth = True\n\treturn tf.Session(config=config)\n\ndef draw_box(image, box, color, thickness=2):\n\t\"\"\" Draws a box on an image with a given color.\n\n\t# Arguments\n\t image : The image to draw on.\n\t box : A list of 4 elements (x1, y1, x2, y2).\n\t color : The color of the box.\n\t thickness : The thickness of the lines to draw a box with.\n\t\"\"\"\n\tb = np.array(box).astype(int)\n\tcv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)\n\ndef draw_detections(image, boxes, scores, labels, color=(255, 0, 0), label_to_name=None, score_threshold=0.05):\n\t\"\"\" Draws detections in an image.\n\n\t# Arguments\n\t image : The image to draw on.\n\t boxes : A [N, 4] matrix (x1, y1, x2, y2).\n\t scores : A list of N classification scores.\n\t labels : A list of N labels.\n\t color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used.\n\t label_to_name : (optional) Functor for mapping a label to a name.\n\t score_threshold : Threshold used for determining what detections to draw.\n\t\"\"\"\n\t# selection = np.where(scores > score_threshold)[0]\n\n\t# debug\n\tselection = np.where(scores > 0)[0]\n\tfor i in selection:\n\t\tdraw_box(image, boxes[i, :], color=color)\n\nclass RetinaNetWrapper(object):\n\t\"\"\"docstring for RetinaNetWrapper\"\"\"\n\tdef __init__(self, \n\t\t\t\tmodel_path, \n\t\t\t\tconvert_model, \n\t\t\t\tbackbone,\n\t\t\t\tanchor_params = None, \n\t\t\t\tscore_threshold=0.05,\n\t\t\t\tmax_detections =2000,\n\t\t\t\timage_min_side =800,\n\t\t\t\timage_max_side =1333\n\t):\n\t\tsuper(RetinaNetWrapper, self).__init__()\n\n\t\t# load the model\n\t\tprint('Loading model, this may take a second...')\n\t\twith tf.device('/cpu:0'):\n\t\t\tself.model = models.load_model(model_path, backbone_name=backbone)\n\n\t\t# optionally convert the model\n\t\tif convert_model:\n\t\t\tself.model = models.convert_model(self.model, anchor_params=anchor_params)\n\n\t\tprint(self.model.summary())\n\n\t\tself.score_threshold = score_threshold\n\t\tself.max_detections = max_detections\n\t\tself.image_min_side = image_min_side\n\t\tself.image_max_side = image_max_side\n\n\tdef predict(self, raw_image, image_type=\"planet\"):\n\t\timage = preprocess_image(raw_image.copy(), image_type=image_type)\n\t\timage, scale = resize_image(image, min_side=self.image_min_side, max_side=self.image_max_side)\n\t\tif keras.backend.image_data_format() == 'channels_first':\n\t\t\timage = image.transpose((2, 0, 1))\n\n\t\t# run network\n\t\tinput_image = np.expand_dims(image, axis=0)\n\n\t\tboxes, scores, labels = self.model.predict_on_batch(input_image)[:3]\n\t\t# correct boxes for image scale\n\t\tboxes /= scale\n\n\t\t# select indices which have a score above the threshold\n\t\tindices = np.where(scores[0, :] > self.score_threshold)[0]\n\n\t\t# select those scores\n\t\tscores = scores[0][indices]\n\n\t\t# find the order with which to sort the scores\n\t\tscores_sort = np.argsort(-scores)[:self.max_detections]\n\n\t\t# select detections\n\t\timage_boxes = boxes[0, indices[scores_sort], :]\n\t\timage_scores = scores[scores_sort]\n\t\timage_labels = labels[0, indices[scores_sort]]\n\n\t\treturn image_boxes, image_scores, image_labels\n\n\tdef predict_large_image(self, image_path, resolution, vis_path=None, scale_factor=0.2, save_path=None, image_type=\"planet\"):\n\t\ttilesize_row = 1025\n\t\ttilesize_col = 1025\n\n\t\tfile_type = os.path.basename(image_path).split(\".\")[-1]\n\t\tbasename = os.path.basename(image_path).split(\".\")[0]\n\n\t\tif file_type in [\"tif\", \"TIF\", \"tiff\", \"TIFF\"]:\n\t\t\tdataset = gdal.Open(image_path, GA_ReadOnly)\n\t\t\tsize_column = dataset.RasterXSize\n\t\t\tsize_row = dataset.RasterYSize\n\t\t\tsize_band = dataset.RasterCount\n\n\t\t\txyToLatLonFunc = xyToLatLonTiff\n\t\t\treadTileFunc = readTiffTile\n\t\telif file_type in [\"dim\", \"DIM\"]:\n\t\t\tdataset = ProductIO.readProduct(image_path)\n\t\t\tsize_column = dataset.getSceneRasterWidth()\n\t\t\tsize_row = dataset.getSceneRasterHeight()\n\t\t\tsize_band = len(dataset.getBandNames())\n\n\t\t\txyToLatLonFunc = xyToLatLonDim\n\t\t\treadTileFunc = readDimTile\n\t\telse:\n\t\t\tprint(\"File type %s not supported\" % file_type)\n\t\t\treturn\n\n\t\t# read rgb image for visualization\n\t\tif vis_path is None:\n\t\t\timage_bgr = readTileFunc(dataset, 0, 0, size_column, size_row, size_band, scale_factor=scale_factor)\n\t\t\tif image_type == \"terrasar\":\n\t\t\t\t# TerraSAR image has only one channel\n\t\t\t\t# raw_image = np.expand_dims(raw_image, axis=2)\n\t\t\t\timage_bgr = np.repeat(image_bgr, 3, axis=2)\n\t\t\telif image_type == \"planet\":\n\t\t\t\treverse = False\n\t\t\t\tif image_bgr.shape[2] == 3:\n\t\t\t\t\treverse = True\n\t\t\t\timage_bgr = image_bgr[..., :3]\n\t\t\t\tif reverse:\n\t\t\t\t\timage_bgr = image_bgr[..., ::-1].copy()\n\t\t\timage_bgr = to_bgr(image_bgr)\n\t\telse:\n\t\t\timage_bgr \t\t= read_image_bgr(vis_path)\n\n\t\tall_detections = np.array([[0, 0, size_column - 1, size_row - 1]])\n\t\tfor i in tqdm(range(0, size_row, tilesize_row)):\n\t\t\tfor j in tqdm(range(0, size_column, tilesize_col)):\n\t\t\t\trows = tilesize_row if i + tilesize_row < size_row else size_row - i\n\t\t\t\tcols = tilesize_col if j + tilesize_col < size_column else size_column - j\n\n\t\t\t\traw_image = readTileFunc(dataset, j, i, cols, rows, size_band)\n\t\t\t\tif image_type == \"terrasar\":\n\t\t\t\t\t# TerraSAR image has only one channel\n\t\t\t\t\t# raw_image = np.expand_dims(raw_image, axis=2)\n\t\t\t\t\traw_image = np.repeat(raw_image, 3, axis=2)\n\t\t\t\telif image_type == \"planet\":\n\t\t\t\t\treverse = False\n\t\t\t\t\tif raw_image.shape[2] == 3:\n\t\t\t\t\t\treverse = True\n\t\t\t\t\traw_image = raw_image[..., :3]\n\t\t\t\t\tif reverse:\n\t\t\t\t\t\traw_image = raw_image[..., ::-1].copy()\n\n\t\t\t\timage_boxes, image_scores, image_labels = self.predict(raw_image, image_type=image_type)\n\t\t\t\t# add offset to image_boxes\n\t\t\t\timage_boxes[..., 0] += j\n\t\t\t\timage_boxes[..., 1] += i\n\t\t\t\timage_boxes[..., 2] += j\n\t\t\t\timage_boxes[..., 3] += i\n\n\t\t\t\t# concatenate results\n\t\t\t\tall_detections = np.concatenate([all_detections, image_boxes], axis=0)\n\n\t\t\t\tif save_path is not None:\n\t\t\t\t\tresize_image_boxes = image_boxes * scale_factor\n\t\t\t\t\tdraw_detections(image_bgr, resize_image_boxes, image_scores, image_labels, score_threshold=self.score_threshold)\n\n\t\twith open(os.path.join(save_path, '%s.csv' % basename), mode='w') as csv_file:\n\t\t\twriter = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\t\t# write down detections\n\t\t\t# the first line will be extent of image\n\t\t\tfor i, d in enumerate(all_detections):\n\t\t\t\tif i == 0:\n\t\t\t\t\tulx, uly = xyToLatLonFunc(dataset, d[0], d[1])\n\t\t\t\t\tbrx, bry = xyToLatLonFunc(dataset, d[2], d[3])\n\t\t\t\t\tif ulx > 90 or ulx < -90 or uly > 180 or uly < -180:\n\t\t\t\t\t\tulx, uly = utmToLatLng(48, ulx, uly)\n\t\t\t\t\t\tbrx, bry = utmToLatLng(48, brx, bry)\n\t\t\t\t\twriter.writerow([ulx, uly, brx, bry])\n\t\t\t\telse:\n\t\t\t\t\tlx, ly = xyToLatLonFunc(dataset, (d[0] + d[2]) / 2, (d[1] + d[3]) / 2)\n\t\t\t\t\tif lx > 90 or lx < -90 or ly > 180 or ly < -180:\n\t\t\t\t\t\tlx, ly = utmToLatLng(48, lx, ly)\n\n\t\t\t\t\twriter.writerow([lx, ly, (d[2] - d[0]) * resolution, (d[3] - d[1]) * resolution])\n\n\t\tcv2.imwrite(os.path.join(save_path, '%s_vis.png' % basename), image_bgr)\n\ndef parse_args(args):\n\t\"\"\" Parse the arguments.\n\t\"\"\"\n\tparser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.')\n\tparser.add_argument('--image-path', help='Path for image need detections.')\n\tparser.add_argument('--vis-path', help='Path for visualize image.', default=None)\n\tparser.add_argument('--vis-scale-factor', help='Scale factor for visualize image.', type=float, default=0.2)\n\tparser.add_argument('--res', \t\t\t help='Image resolution.', type=float, default=2.5)\n\tparser.add_argument('--image-type', help='Target image type. planet or terrasar. Default: planet', default=\"planet\")\n\tparser.add_argument('--model', help='Path to RetinaNet model.')\n\tparser.add_argument('--convert-model', help='Convert the model to an inference model (ie. the input is a training model).', action='store_true')\n\tparser.add_argument('--backbone', help='The backbone of the model.', default='resnet50')\n\tparser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.5, type=float)\n\tparser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=100, type=int)\n\tparser.add_argument('--save-path', help='Path for saving images with detections (doesn\\'t work for COCO).')\n\tparser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)\n\tparser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)\n\tparser.add_argument('--config', help='Path to a configuration parameters .ini file (only used with --convert-model).')\n\n\treturn parser.parse_args(args)\n\ndef main(args=None):\n\t# parse arguments\n\tif args is None:\n\t\targs = sys.argv[1:]\n\targs = parse_args(args)\n\n\t# optionally choose specific GPU\n\tkeras.backend.tensorflow_backend.set_session(get_session())\n\n\t# make save path if it doesn't exist\n\tif args.save_path is not None and not os.path.exists(args.save_path):\n\t\tos.makedirs(args.save_path)\n\n\t# optionally load config parameters\n\tif args.config:\n\t\targs.config = read_config_file(args.config)\n\n\t# optionally load anchor parameters\n\tanchor_params = None\n\tif args.config and 'anchor_parameters' in args.config:\n\t\tanchor_params = parse_anchor_parameters(args.config)\n\n\tmodel = RetinaNetWrapper(args.model, args.convert_model, args.backbone,\n\t\t\t\t\t\t\tanchor_params = anchor_params,\n\t\t\t\t\t\t\tscore_threshold = args.score_threshold,\n\t\t\t\t\t\t\tmax_detections = args.max_detections,\n\t\t\t\t\t\t\timage_min_side = args.image_min_side,\n\t\t\t\t\t\t\timage_max_side = args.image_max_side)\n\n\tmodel.predict_large_image(args.image_path, args.res, args.vis_path, args.vis_scale_factor, args.save_path, args.image_type)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"keras_retinanet/bin/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":10749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"186559239","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom multi_task_data_CDR import Prepare_Task_Data_2\nimport scipy.stats as stats\nimport csv\n\nday_flag = True\nweek_flag = True\n\npath = '/home/qiuhui/Desktop/howard/downloaded_data/Milano_WeatherPhenomena/mi_meteo_5911_full.csv'\nwind_speed = []\nwith open(path, newline='') as csvfile:\n\trows = csv.reader(csvfile)\n\tfor row in rows:\n\t\twind_speed.append(float(row[2]))\n#X_array = wind_speed\n\t\nTK2 = Prepare_Task_Data_2('/home/qiuhui/Desktop/bo/predict_telecom_traffic/CNN_RNN/npy/final/') ##fusion\n_X_array, _Y_array = TK2.Task_avg(generate_data=False) ##fusion\n_X_array = _Y_array\n_X_array = _X_array[:1464, :, :, :, :]\n\n_X_array = _X_array.reshape((-1, 15, 15, 3)) # 3 is timestamp, grid id, internet traffic\n\nif day_flag == True:\n\tcount = 0\n\thour_sum = 0\n\tX_array_day = []\n\tCDR_day = [[[] for i in range(15)] for j in range(15)]\n\tfor ele in wind_speed:\n\t\tcount += 1\n\t\tif count == 24:\n\t\t\tX_array_day.append((hour_sum + ele) / 24)\n\t\t\thour_sum = 0\n\t\t\tcount = 0\n\t\telse:\n\t\t\thour_sum += ele\n\tcount = 0\n\thour_sum = 0\n\tfor i in range(15):\n\t\tfor j in range(15):\n\t\t\tfor k in range(_X_array.shape[0]):\n\t\t\t\tele = _X_array[k, i, j, -1]\n\t\t\t\tcount += 1\n\t\t\t\tif count == 24:\n\t\t\t\t\tCDR_day[i][j].append((hour_sum + ele) / 24)\n\t\t\t\t\thour_sum = 0\n\t\t\t\t\tcount = 0\n\t\t\t\telse:\n\t\t\t\t\thour_sum += ele\nX_array_day = np.array(X_array_day)\nCDR_day = np.array(CDR_day)\ncorr = np.zeros((15, 15))\nfor i in range(15):\n\tfor j in range(15):\n\t\t# CDR = _X_array[:, i, j, -1]\n\t\tcorr[i][j], _ = stats.pearsonr(X_array_day, CDR_day[i, j, :])\nplt.figure()\nplt.imshow(corr, cmap = plt.cm.gray, vmin = 0, vmax = 1)\nplt.colorbar()\nplt.title('Correlation coefficient of CDR and temporature(each day)')\n\nif week_flag == True:\n\tcount = 0\n\thour_sum = 0\n\tX_array = []\n\tCDR = [[[] for i in range(15)] for j in range(15)]\n\tfor ele in wind_speed:\n\t\tcount += 1\n\t\tif count == 168:\n\t\t\tX_array.append((hour_sum + ele) / 168)\n\t\t\thour_sum = 0\n\t\t\tcount = 0\n\t\telse:\n\t\t\thour_sum += ele\n\tcount = 0\n\thour_sum = 0\n\tfor i in range(15):\n\t\tfor j in range(15):\n\t\t\tfor k in range(_X_array.shape[0]):\n\t\t\t\tele = _X_array[k, i, j, -1]\n\t\t\t\tcount += 1\n\t\t\t\tif count == 168:\n\t\t\t\t\tCDR[i][j].append((hour_sum + ele) / 168)\n\t\t\t\t\thour_sum = 0\n\t\t\t\t\tcount = 0\n\t\t\t\telse:\n\t\t\t\t\thour_sum += ele\n\t\t\tcount = 0\n\nX_array = np.array(X_array)\n#CDR = np.array(CDR)\n#print(X_array.shape, CDR.shape)\ncorr = np.zeros((15, 15))\nfor i in range(15):\n\tfor j in range(15):\n\t\t# CDR = _X_array[:, i, j, -1]\n\t\tcorr[i][j], _ = stats.pearsonr(X_array, CDR[i][j][:])\nplt.figure()\nplt.imshow(corr, cmap = plt.cm.gray, vmin = 0, vmax = 1)\nplt.colorbar()\nplt.title('Correlation coefficient of CDR and temporature(each week)')\n\nmaximum = [-2, -1, -1]\nfor i in range(15):\n\tfor j in range(15):\n\t\tif corr[i][j] > maximum[0]:\n\t\t\tmaximum = [corr[i][j], i, j]\nplt.figure()\nx = np.arange(0, 504, 24)\nplt.xticks(x)\nplt_wind = []\nfor ele in wind_speed:\n\tplt_wind.append((ele - np.min(wind_speed)) / (np.max(wind_speed) - np.min(wind_speed)))\nplt_CDR = []\nCDR_X = _X_array[:504, maximum[1], maximum[2], -1]\nfor ele in CDR_X:\n\tplt_CDR.append((ele - np.min(CDR_X)) / (np.max(CDR_X) - np.min(CDR_X)))\nplt.plot(plt_wind[:504], '-o')\nplt.plot(plt_CDR, '--o')\nplt.title('the data of CDR and temporature(hour)')\n\nplt.figure()\nx = np.arange(0, 8, 1)\nplt.xticks(x)\nplt_wind = []\nfor ele in X_array_day:\n\tplt_wind.append((ele - np.min(X_array_day)) / (np.max(X_array_day) - np.min(X_array_day)))\nplt_CDR = []\nCDR_X = CDR_day[maximum[1]][maximum[2]][:]\nfor ele in CDR_X:\n\tplt_CDR.append((ele - np.min(CDR_X)) / (np.max(CDR_X) - np.min(CDR_X)))\nplt.plot(plt_wind, '-o')\nplt.plot(plt_CDR, '--o')\nplt.title('the data of CDR and temporature(day)')\n\nplt.figure()\nx = np.arange(0, 8, 1)\nplt.xticks(x)\nplt_wind = []\nfor ele in X_array:\n\tplt_wind.append((ele - np.min(X_array)) / (np.max(X_array) - np.min(X_array)))\nplt_CDR = []\nCDR_X = CDR[maximum[1]][maximum[2]][:]\nfor ele in CDR_X:\n\tplt_CDR.append((ele - np.min(CDR_X)) / (np.max(CDR_X) - np.min(CDR_X)))\nplt.plot(plt_wind, '-o')\nplt.plot(plt_CDR, '--o')\nplt.title('the data of CDR and temporature(week)')\n\ncorr = np.zeros((15, 15))\nfor i in range(15):\n\tfor j in range(15):\n\t\tCDR = _X_array[:, i, j, -1]\n\t\tcorr[i][j], _ = stats.pearsonr(wind_speed, CDR)\nplt.figure()\nplt.imshow(corr, cmap = plt.cm.gray, vmin = 0, vmax = 1)\nplt.colorbar()\nplt.title('Correlation coefficient of CDR and temporature(each hour)')\nplt.show()","sub_path":"CNN_RNN/plot_corr_temp.py","file_name":"plot_corr_temp.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"472835052","text":"# -*- coding: utf-8 -*-\n#聚类\n\nfrom openpyxl import load_workbook\nimport numpy as np\nfrom openpyxl import Workbook\n\n# filename=str(input('input the head filename'))\n# sheets=str(input('input the num of sheets'))\n\n\ndef createSheets(filename,orders,items,orderseach,Me):\n print('begin to arrange station..')\n\n #orderseach=12;\n plat=[False for i in range(Me)]\n platstat=[[] for i in range(Me)]\n c=0;\n platnum=[orderseach for i in range(Me)]\n workbook_ = load_workbook(filename=filename) #导入工作表\n sheetnames =workbook_.get_sheet_names() #获得表单名字\n sheet = workbook_.get_sheet_by_name(sheetnames[0]) #从工作表中提取某一表单\n\n sumdata=[]\n sss=[[] for i in range(Me)]\n f=open('same' + filename.strip('xlsx') + 'txt', 'r')\n same=eval(f.read())\n #orders+2\n for colNum in range(2,orders+2):\n data=[]\n #items+2\n for rowNum in range(2,items+2):\n i=0\n if(sheet.cell(row=rowNum, column=colNum).value):\n i=1\n data.append(i) #获得数据\n\n for j in range(len(same)):\n if data[same[j][0]]==1:\n data[same[j][1]]=1\n sumdata.append(data)\n\n\n\n # print('first state:\\n',np.mat(sumdata))\n\n left=orders\n inOrder=[False for i in range(orders)]\n\n\n while(True):\n if(c>(Me-1)):\n # print('clean the plat')\n plat = [False for i in range(Me)]\n platstat = [[] for i in range(Me)]\n platnum = [orderseach for i in range(Me)]\n c=0\n if(not plat[c]):\n if(left==0):\n break\n if(left taxid dmp file from NCBI database)\n# an input file 'gi_list.txt' (tab delimited txt) gi# queries, one gi per row, one column\n# name for an output file to be created (also tab delim). Format will be gi# taxid\n\n# opens gi number file and saves 'locally'\ngi_set = set(line.strip() for line in open('gi_list.txt', 'r'))\n\n# sets namesfile to open and read ('r') from taxid.gz file\ntaxidfile = open('gi_taxid_prot.dmp', 'r')\n# sets up an empty list called taxID_matches\ntaxid_matches = []\n# sets up counting lines processed\nn = 0\n# Reads string and splits into items (lineparts) in a list based on delimitor '\\t'\nfor line in taxidfile:\n n = n + 1\n # Breaks each line at delimitors, and strips the '\\t|\\n' off the end of the line\n linepart = line.strip('\\t|\\n').split('\\t')\n if linepart[0] in gi_set:\n taxid_matches.append(line)\n # prints a line every 1 million lines.\n if n%1000000 == 0:\n print (str(n/1000000) + ' million lines')\nwrite_file = open('taxid_matches.txt','w')\nfor line in taxid_matches:\n write_file.write(line)\nwrite_file.close()\n\n#5:18-\n","sub_path":"taxID conversions/gi2taxid.unzipped.py3.py","file_name":"gi2taxid.unzipped.py3.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"138361011","text":"# -*- coding: utf-8 -*-\n# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C) 2015 MinIO, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import TestCase\nfrom nose.tools import eq_\n\nfrom minio.definitions import UploadPart\nfrom minio.xml_marshal import (xml_marshal_bucket_constraint,\n xml_marshal_complete_multipart_upload)\n\nclass GenerateRequestTest(TestCase):\n def test_generate_bucket_constraint(self):\n expected_string = b'' \\\n b'region'\n actual_string = xml_marshal_bucket_constraint('region')\n eq_(expected_string, actual_string)\n\n def test_generate_complete_multipart_upload(self):\n expected_string = b'' \\\n b'1\"a54357aff0632cce46d942af68356b38\"' \\\n b'2\"0c78aef83f66abc1fa1e8477f296d394\"' \\\n b'3\"acbd18db4cc2f85cedef654fccc4a4d8\"' \\\n b''\n etags = [\n UploadPart('bucket', 'object', 'upload_id', 1,\n 'a54357aff0632cce46d942af68356b38',\n None, 0),\n UploadPart('bucket', 'object', 'upload_id', 2,\n '0c78aef83f66abc1fa1e8477f296d394',\n None, 0),\n UploadPart('bucket', 'object', 'upload_id', 3,\n 'acbd18db4cc2f85cedef654fccc4a4d8',\n None, 0),\n ]\n actual_string = xml_marshal_complete_multipart_upload(etags)\n eq_(expected_string, actual_string)\n","sub_path":"tests/unit/generate_xml_test.py","file_name":"generate_xml_test.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"401785990","text":"# coding=utf-8\n# Author: gml <28422785281@qq.com>\n# Modified by: gml\n# datetime: 2021/10/11 15:56 \n# License: bupt\n# -*- coding:utf-8 -*-\n# 导入鸢尾花数据集,调用matplotlib包用于数据的可视化,并加载PCA算法包。\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.datasets import load_iris\n\n# 然后以字典的形式加载鸢尾花数据集,使用y表示数据集中的标签,使用x表示数据集中的属性数据。\ndata = load_iris()\ny = data.target\nx = data.data\nprint(y)\nprint(x)\n\n# 将数据写入 excel 文件\ndef data_write_excel(data, filename, writer=None):\n data = pd.DataFrame(data)\n if not writer:\n writer = pd.ExcelWriter(filename + '1.xlsx')\n data.to_excel(writer, float_format='%.5f', sheet_name=filename)\n writer.save()\n writer.close()\n else:\n data.to_excel(writer, float_format='%.5f', sheet_name=filename)\n# 调用PCA算法进行降维主成分分析\n# 指定主成分个数,即降维后数据维度,降维后的数据保存在reduced_x中。\npca = PCA(n_components=2)\nreduced_x = pca.fit_transform(x)\nwriter = pd.ExcelWriter('pca1.xlsx')\ndata_restore = pca.inverse_transform(reduced_x)\ndata_write_excel(reduced_x, 'low', writer=writer)\ndata_write_excel(data_restore, 'restore', writer=writer)\n# 主成分贡献率\ndata_write_excel(pca.explained_variance_ratio_, 'ratio', writer=writer)\n# 主成分方差\ndata_write_excel(pca.explained_variance_, 'variance', writer=writer)\n# 主成分在各个变量的负载\ndata_write_excel(pca.components_.T, 'component', writer=writer)\n# 主成分个数\nprint(pca.n_components_, ' n_components')\nwriter.save()\nwriter.close()\n\nprint(data_restore)\n# 将降维后的数据保存在不同的列表中\nred_x, red_y = [], []\nblue_x, blue_y = [], []\ngreen_x, green_y = [], []\nfor i in range(len(reduced_x)):\n if y[i] == 0:\n red_x.append(reduced_x[i][0])\n red_y.append(reduced_x[i][1])\n\n elif y[i] == 1:\n blue_x.append(reduced_x[i][0])\n blue_y.append(reduced_x[i][1])\n\n else:\n green_x.append(reduced_x[i][0])\n green_y.append(reduced_x[i][1])\n\n# 可视化\nplt.scatter(red_x, red_y, c='r', marker='x')\nplt.scatter(blue_x, blue_y, c='b', marker='D')\nplt.scatter(green_x, green_y, c='g', marker='.')\nplt.show()","sub_path":"python_wajue/numbertwo_jiangwei.py","file_name":"numbertwo_jiangwei.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"247978061","text":"import pyqmc\nimport numpy as np\n\ndef setuph2(r):\n from pyscf import gto, scf, lo\n from pyqmc.accumulators import LinearTransform, EnergyAccumulator\n from pyqmc.obdm import OBDMAccumulator\n from pyqmc.tbdm import TBDMAccumulator\n from pyqmc.cvmc import DescriptorFromOBDM, DescriptorFromTBDM, PGradDescriptor\n\n import itertools\n\n # ccECP from A. Annaberdiyev et al. Journal of Chemical Physics 149, 134108 (2018)\n basis = {\n \"H\": gto.basis.parse(\n \"\"\"\n H S\n23.843185 0.00411490\n10.212443 0.01046440\n4.374164 0.02801110\n1.873529 0.07588620\n0.802465 0.18210620\n0.343709 0.34852140\n0.147217 0.37823130\n0.063055 0.11642410\n\"\"\"\n )\n }\n \"\"\"\nH S\n0.040680 1.00000000\nH S\n0.139013 1.00000000\nH P\n0.166430 1.00000000\nH P\n0.740212 1.00000000\n\"\"\"\n ecp = {\n \"H\": gto.basis.parse_ecp(\n \"\"\"\n H nelec 0\nH ul\n1 21.24359508259891 1.00000000000000\n3 21.24359508259891 21.24359508259891\n2 21.77696655044365 -10.85192405303825\n\"\"\"\n )\n }\n\n mol = gto.M(\n atom=f\"H 0. 0. 0.; H 0. 0. {r}\", unit=\"bohr\", basis=basis, ecp=ecp, verbose=5\n )\n mf = scf.RHF(mol).run()\n \n wf = pyqmc.slater_jastrow(mol, mf)\n freeze = {}\n for k in wf.parameters:\n freeze[k] = np.zeros(wf.parameters[k].shape,dtype='bool')\n print(freeze.keys())\n print(wf.parameters['wf1mo_coeff_alpha'])\n #this freezing allows us to easily go between bonding and \n # AFM configurations.\n freeze['wf1mo_coeff_alpha'][0,0]=True\n freeze['wf1mo_coeff_beta'][1,0]=True\n \n mo_occ = mf.mo_coeff[:, mf.mo_occ > 0]\n a = lo.iao.iao(mol, mo_occ)\n a = lo.vec_lowdin(a, mf.get_ovlp())\n\n obdm_up = OBDMAccumulator(mol=mol, orb_coeff=a, spin=0)\n obdm_down = OBDMAccumulator(mol=mol, orb_coeff=a, spin=1)\n descriptors = {\n \"t\": [[(1.0, (0, 1)), (1.0, (1, 0))], [(1.0, (0, 1)), (1.0, (1, 0))]],\n \"trace\": [[(1.0, (0, 0)), (1.0, (1, 1))], [(1.0, (0, 0)), (1.0, (1, 1))]],\n }\n for i in [0, 1]:\n descriptors[f\"nup{i}\"] = [[(1.0, (i, i))], []]\n descriptors[f\"ndown{i}\"] = [[], [(1.0, (i, i))]]\n \n tbdm_up_down = TBDMAccumulator(mol=mol, orb_coeff=np.array([a,a]), spin=(0,1), ijkl=[[0,0,0,0]])\n tbdm_down_up = TBDMAccumulator(mol=mol, orb_coeff=np.array([a,a]), spin=(1,0), ijkl=[[0,0,0,0]])\n descriptors_tbdm = {\n \"U\": [[(1.0,(0))],[(1.0,(0))]]\n }\n\n acc = PGradDescriptor(\n EnergyAccumulator(mol),\n LinearTransform(wf.parameters, freeze=freeze),\n {\n 'obdm': [obdm_up, obdm_down], \n 'tbdm': [tbdm_up_down, tbdm_down_up],\n },\n {\n 'obdm': DescriptorFromOBDM(descriptors, norm=2.0),\n 'tbdm': DescriptorFromTBDM(descriptors_tbdm, norm=2.0*(2.0-1.0)),\n },\n )\n\n return {\"wf\": wf, \"acc\": acc, \"mol\": mol, \"mf\": mf, \"descriptors\": descriptors, \"descriptors_tbdm\": descriptors_tbdm}\n\nif __name__ == \"__main__\":\n import pyqmc\n import pyqmc.dasktools\n from pyqmc.dasktools import line_minimization, cvmc_optimize\n from dask.distributed import Client, LocalCluster \n\n r = 1.1\n\n ncore = 2\n sys = setuph2(r)\n cluster = LocalCluster(n_workers=ncore, threads_per_worker=1)\n client = Client(cluster)\n\n # Set up calculation\n nconf = 800\n configs = pyqmc.initial_guess(sys[\"mol\"], nconf)\n wf, df = line_minimization(\n sys[\"wf\"],\n configs,\n pyqmc.gradient_generator(sys[\"mol\"], sys[\"wf\"]),\n client=client,\n maxiters=5,\n )\n\n forcing = {}\n obj = {}\n for k in sys[\"descriptors\"]:\n forcing[k] = 0.0\n obj[k] = 0.0\n\n for k in sys[\"descriptors_tbdm\"]:\n forcing[k] = 0.0\n obj[k] = 0.0\n\n forcing[\"t\"] = 0.5\n forcing[\"trace\"] = 1.0\n forcing[\"U\"] = 5.0\n obj[\"t\"] = 0.0\n obj[\"trace\"] = 2.0\n obj[\"U\"] = 1.0\n\n hdf_file = \"saveh2.hdf5\"\n wf, df = cvmc_optimize(\n sys[\"wf\"],\n configs,\n sys[\"acc\"],\n objective=obj,\n forcing=forcing,\n iters=50,\n tstep=0.2,\n hdf_file = hdf_file,\n client = client,\n )\n","sub_path":"examples/cvmc_h2.py","file_name":"cvmc_h2.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"504106288","text":"\"\"\"CPU functionality.\"\"\"\n# Day 1: review specs.\nimport sys\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.ram = []\n self.running = False\n self.pc = 0\n self.reg = [0] * 8\n self.stack = [0] * 256\n self.reg[7] = 256\n self.interrupts = [0] * 8\n self.last_key = []\n self.flags = [0, 0, 0] #LGE\n self.commands = {\n 0b00000000: self.nop,\n 0b00000001: self.halt,\n 0b00010001: self.ret,\n 0b01000101: self.push,\n 0b01000110: self.pop,\n 0b01000111: self.prn,\n 0b01001000: self.pra,\n 0b01010000: self.call,\n 0b01010100: self.jmp,\n 0b01010101: self.jeq,\n 0b01010110: self.jne,\n 0b01010111: self.jgt,\n 0b01011000: self.jlt,\n 0b01011001: self.jle,\n 0b01011010: self.jge,\n 0b01100101: self.alu,\n 0b01101001: self.alu,\n 0b10000010: self.ldi,\n 0b10000100: self.st,\n 0b10100000: self.alu,\n 0b10100010: self.alu,\n 0b10100100: self.alu,\n 0b10100111: self.alu,\n 0b10101000: self.alu,\n 0b10101010: self.alu,\n 0b10101011: self.alu,\n 0b10101100: self.alu,\n 0b10101101: self.alu\n }\n self.com_args = {\n 0b01100101: [\"INC\"],\n 0b01101001: [\"NOT\"],\n 0b10100000: [\"ADD\"],\n 0b10100010: [\"MUL\"],\n 0b10100100: [\"MOD\"],\n 0b10100111: [\"CMP\"],\n 0b10101000: [\"AND\"],\n 0b10101010: [\"OR\"],\n 0b10101011: [\"XOR\"],\n 0b10101100: [\"SHL\"],\n 0b10101101: [\"SHR\"]\n }\n\n def load(self, filename):\n \"\"\"Load a program into memory.\"\"\"\n program = []\n with open(filename, \"r\") as input:\n prog_lines = input.readlines()\n for line in prog_lines:\n split_line = line.split()\n if len(split_line) > 0 and line[0] != \"#\":\n program.append(split_line[0])\n self.ram = [0] * (len(program))\n address = 0\n for instruction in program:\n self.ram_write(address, int(instruction,2))\n address += 1\n self.pc = 0\n\n def ram_read(self, i):\n return self.ram[i]\n \n def ram_write(self, i, v):\n self.ram[i] = v\n \n def nop(self):\n self.pc += 1\n \n def halt(self):\n self.running = False\n \n def ret(self):\n if self.reg[7] >= 256:\n raise Exception(\"Cannot return; stack empty.\")\n self.pc = self.stack[self.reg[7]]\n self.reg[7] += 1\n \n def push(self, v=None):\n if v is None:\n v = self.reg[self.ram_read(self.pc + 1)]\n self.pc += 2\n self.reg[7] -= 1\n self.stack[self.reg[7]] = v\n \n def pop(self, dest=None):\n if dest is None:\n dest = self.ram_read(self.pc + 1)\n self.pc += 2\n self.reg[dest] = self.stack[self.reg[7]]\n self.reg[7] += 1\n \n def prn(self):\n print(self.reg[self.ram_read(self.pc + 1)])\n self.pc += 2\n \n def pra(self):\n print(chr(self.reg[self.ram_read(self.pc + 1)]), end=\"\")\n self.pc += 2\n \n def call(self):\n if self.reg[7] <= 0:\n raise Exception(\"Stack overflow.\")\n self.push(self.pc + 2)\n self.pc = self.reg[self.ram_read(self.pc+1)]\n \n def jmp(self):\n self.pc = self.reg[self.ram_read(self.pc+1)]\n \n def jeq(self):\n if self.flags[2] == 1:\n self.jmp()\n else:\n self.pc += 2\n \n def jne(self):\n if self.flags[2] == 0:\n self.jmp()\n else:\n self.pc += 2\n \n def jgt(self):\n if self.flags[1] == 1:\n self.jmp()\n else:\n self.pc += 2\n \n def jlt(self):\n if self.flags[0] == 1:\n self.jmp()\n else:\n self.pc += 2\n \n def jle(self):\n if (self.flags[0] == 1) or (self.flags[2] == 1):\n self.jmp()\n else:\n self.pc += 2\n \n def jge(self):\n if (self.flags[1] == 1) or (self.flags[2] == 1):\n self.jmp()\n else:\n self.pc += 2\n \n def ldi(self):\n self.reg[self.ram_read(self.pc+1)] = self.ram_read(self.pc+2)\n self.pc += 3\n \n def st(self):\n self.reg[self.ram_read(self.pc+1)] = self.reg[self.ram_read(self.pc+2)]\n self.pc += 3\n\n def alu(self, op, reg_a=None, reg_b=None):\n \"\"\"ALU operations.\"\"\"\n if reg_a is None:\n reg_a = self.ram_read(self.pc + 1)\n if reg_b is None:\n reg_b = self.ram_read(self.pc + 2)\n a = self.reg[reg_a]\n b = self.reg[reg_b]\n if op == \"INC\":\n self.reg[reg_a] += 1\n self.pc += 2\n elif op == \"NOT\":\n self.reg[reg_a] = ~a\n self.pc += 2\n elif op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n self.pc += 3\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n self.pc += 3\n elif op == \"MOD\":\n if b == 0:\n raise Exception(\"Divide-by-zero error in MOD.\")\n self.halt()\n else:\n self.reg[reg_a] = a % b\n self.pc += 3\n elif op == \"CMP\":\n if a == b:\n self.flags = [0,0,1]\n elif a < b:\n self.flags = [1,0,0]\n elif a > b:\n self.flags = [0,1,0]\n self.pc += 3\n elif op == \"AND\":\n self.reg[reg_a] = a & b\n self.pc += 3\n elif op == \"OR\":\n self.reg[reg_a] = a | b\n self.pc += 3\n elif op == \"XOR\":\n self.reg[reg_a] = a ^ b\n self.pc += 3\n elif op == \"SHL\":\n self.reg[reg_a] = a << b\n self.pc += 3\n elif op == \"SHR\":\n self.reg[reg_a] = a >> b\n self.pc += 3\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n self.running = True\n while self.running and (self.pc < len(self.ram)):\n this_instr = self.ram_read(self.pc)\n if this_instr in self.com_args:\n self.commands[this_instr](*self.com_args[this_instr])\n else:\n self.commands[this_instr]()","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"40998718","text":"#!/usr/bin/env python3\n#\n#\tNextBus.py\n#\tResponse to Target Case Study\n#\tCandidate: Dave White, dave4mpls@gmail.com, 612-695-3289\n#\tPosition: Guest Reliability Engineer\n#\tDate Completed: 7-31-2018\n#\n#\tSee design questions section below for design decisions that were made\n#\n#\tPurpose: Contacts the Metro Transit XML web service as described at http://svc.metrotransit.org/\n#\tto retrieve the number of minutes until the next bus, or no return value if there is no further bus.\n#\n#\tInterface: Command-Line\n#\n#\tExternal dependencies:\trequests\n#\t\tInstall this dependency by using: pip install requests\n#\tStandard libraries:\t\ttime, sys\n#\n#\tExample Command-Line: nextbus.py bus-route bus-stop-name direction\n#\t\n#\tbus-route:\t\tshould be a unique substring of the name of the bus route you want\n#\t\t\t\t\tIf you put # followed by a number, it picks a particular Metro Transit route number\n#\t\t\t\t\tIf you put #any, it lists all the routes in the resulting error message\n#\tbus-stop-name:\tshould be a unique substring of the name of the bus stop\n#\t\t\t\t\tIf you put #any, it lists all the stops for that route in the resulting error message\n#\tdirection:\t\t\tmust be east, north, south, or west (case-insensitive)\n#\t\t\t\t\tIf you put #any, it lists all the directions for that route in the resulting error message\n#\n#\tReturn values are sent to Standard Output\n#\tExample return value (as requested in design) if bus-route and bus-stop-name are unique matches:\n#\t\t2 minutes\n#\tReturn value (as requested by design) if no further buses are coming that day:\n#\t\t\n#\t\t(See design questions below-- a different output may be clearer if used interactively)\n#\tReturn value if multiple bus-routes or bus-stop-names match: (returns first one that applies)\n#\t\tMULTIPLE MATCHES ON ROUTE: \n#\t\tMULTIPLE MATCHES ON STOP: \n#\tReturn values if no bus-routes or bus-stop-names match (returns first one that applies):\n#\t\tNO MATCH ON ROUTE\n#\t\tNO MATCH ON STOP\n#\tReturn value if direction is not valid for route or is not a valid direction:\n#\t\tNO MATCH ON DIRECTION\n#\tReturn value for any problem with accessing the network service:\n#\t\tNETWORK ERROR\n#\tReturn value for parameter problems on the command line:\n#\t\tPARAMETER ERROR: (followed by the help text)\n#\tReturn value if first parameter is /?, --help:\n#\t\thelp text\n#\tReturn value for any other problem:\n#\t\tUNKOWN ERROR\n#\n#\tDESIGN QUESTIONS:\n#\tThese are questions I would ask about the design if this was for production. I didn't\n#\task them during the case study because you are all busy people, and probably\n#\tthe interview is the best time to discuss design issues relating to the case study, so\n#\tthat you can see that I follow up with the right questions after receiving a design.\n#\n# \tIn a real work environment, I MIGHT ask these questions of the internal customer or\n#\tmy supervisor, but might also infer them from other information I have, if I can\n#\tmake a strong inference (e.g. if I already knew the script would be used interactively\n#\tonly). I would not assume the answer if I couldn't make a strong inference.\n#\n#\t* Is this going to be used interactively or by a calling script, or both? I assumed both,\n#\t and created responses for edge cases that are both human and computer readable.\n#\t (See return values above.)\n#\t* When there is no further bus that day, the designed result (no output) may confuse\n#\t people if it is used interactively; should I return a standard error message like \n#\t NO FURTHER BUSES instead?\n#\t* When the bus route or bus stop name are not unique, the best interactive result \n#\t would be to list all of the results, but for a calling script, this would confuse the \n#\t calling script. Should an additional API specification (e.g. JSON results?) be added\n#\t for this case? (The specs of course say that they will be unique, but in production,\n#\t you can't guarantee that.)\n#\t* Users who know the bus system may provide a specific route number (e.g. 5F)\n#\t instead of a bus route name; should this be accepted?\n#\t* Metro Transit returns times in the format \"14 Min\" if the data indicates the actual\n#\t expected amount of wait time based on current bus location, and \"10:08\" if\n#\t the time is just the scheduled time. Should my app do the same, or indicate\n#\t the difference between actual estimated arrival time and scheduled time in any way?\n\nimport requests\nimport time\n\n#-- Global constants\nmetroTransitServiceUrl = \"https://svc.metrotransit.org\"\n\n#-- Get a Metro Transit service result as a Python object, given a local path within the service\n#-- starting with the slash after the domain name. Throws an IOError on any error.\ndef getMetroTransitService(localPath):\n\tmyURL = metroTransitServiceUrl + localPath\n\ttry:\n\t\tresult = requests.get(myURL, params = {'format': 'json'})\n\t\tif (result.ok):\n\t\t\treturn result.json()\t\t# on JSON error an exception will be thrown and caught\n\t\telse:\n\t\t\traise IOError\t\t# non-OK HTTP status is thrown as \"IOError\"\n\texcept:\n\t\traise IOError\n\ndef suppressMultipleSpaces(x):\n\t\"\"\" Returns the string X, but with multiple spaces (as found in Metro Transit return values) with single spaces. \"\"\"\n\twhile x.find(\" \") >= 0:\n\t\tx = x.replace(\" \",\" \")\n\treturn x\n\ndef extractMatches(allItems, matchField, substring):\n\t\"\"\"\n\tExtracts a list of items that match a substring case-insensitively, within a larger list.\n\tThe match is case-insensitive and matches a single space against any number of spaces,\n\tsince multiple spaces may be found in Metro Transit results. If the substring starts with \"#\",\n\tit finds only matches where the field STARTS with the rest of the substring followed by a space,\n\tuseful for looking up route numbers. If the entire substring is \"#any\", it returns the whole list.\n\t\n\tParameters\n\t-------------\n\tallItems : list\n\t\tThe whole list of items to search.\n\tmatchField : str\n\t\tThe field within each record to match against the substring.\n\tsubstring : str\n\t\tThe substring to search for.\n\t\t\n\tReturns\n\t--------\n\tlist\n\t\tThe records that matched the substring.\n\t\"\"\"\n\tif (substring.upper()==\"#ANY\"): return allItems # special code #ANY returns whole list\n\tstartMatch = False\n\tif (substring[0:1] == \"#\"):\n\t\tstartMatch = True\n\t\tsubstring = substring[1:]\n\tmatchingItems = [ ]\n\tfor thisItem in allItems:\n\t\tif startMatch:\n\t\t\tif suppressMultipleSpaces(thisItem[matchField].upper()).find(suppressMultipleSpaces(substring.upper()+\" \")) == 0:\n\t\t\t\tmatchingItems.append(thisItem)\n\t\telse:\n\t\t\tif suppressMultipleSpaces(thisItem[matchField].upper()).find(suppressMultipleSpaces(substring.upper())) != -1:\n\t\t\t\tmatchingItems.append(thisItem)\n\treturn matchingItems\n\ndef getRouteMatches(busRouteSubstring):\n\t\"\"\" given a substring, return matching routes as a list in Metro Transit format \"\"\"\n\treturn extractMatches(getMetroTransitService(\"/NexTrip/Routes\"),\"Description\", busRouteSubstring)\n\ndef getDirectionMatches(busRouteNumber, busDirectionSubstring):\n\t\"\"\" given a route number and a direction substring, return matching directions as a list in Metro Transit format \"\"\"\n\treturn extractMatches(getMetroTransitService(\"/NexTrip/Directions/\" + busRouteNumber), \"Text\", busDirectionSubstring)\n\ndef getStopMatches(busRouteNumber, busDirectionNumber, busStopSubstring):\n\t\"\"\" given a bus route number, direction number, and a substring of the stop name, return matching stops as a list in Metro Transit format \"\"\"\n\treturn extractMatches(getMetroTransitService(\"/NexTrip/Stops/\" + busRouteNumber + \"/\" + busDirectionNumber), \"Text\", busStopSubstring)\n\ndef getTimepointDepartures(busRouteNumber, busDirectionNumber, busStopCode):\n\t\"\"\" given a bus route number, bus direction number, and bus stop code, return timepoint departures as a list in Metro Transit format \"\"\"\n\treturn getMetroTransitService(\"/NexTrip/\" + busRouteNumber + \"/\" + busDirectionNumber + \"/\" + busStopCode)\n\ndef minutesTillBus(busTimepoint, nowTime = None):\n\t\"\"\" given a bus timepoint record from getTimepointDepartures, return the number of minutes until that bus, as a float. nowTime is the current time since unix epoch, but leave it out to just use the system time. \"\"\"\n\tt = busTimepoint[\"DepartureTime\"]\n\tif nowTime is None: nowTime = time.time()\n\tsecondsFromNow = float(t[6:-2].split('-')[0])/1000.0 - nowTime\n\treturn secondsFromNow / 60.0\n\ndef formatTimepoint(busTimepoint, nowTime = None):\n\t\"\"\" given a single bus timepoint record from getTimepointDepartures, return the formatted output string; supply the current time if you want using nowTime, or leave it out to use the system time (nowTime is seconds since Unix epoch). \"\"\"\n\tminutesFromNow = round(minutesTillBus(busTimepoint, nowTime))\n\tif (minutesFromNow==1):\n\t\treturn \"1 Minute\"\n\telse:\n\t\treturn \"{:.0f} Minutes\".format(minutesFromNow)\n\ndef getNextBusRecord(busTimepointList):\n\t\"\"\" given a list of bus timepoints from getTimepointDepartures, returns a list with one record (the next bus that hasn't arrived yet) or zero records (no bus is coming) \"\"\"\n\tfor thisTimepoint in busTimepointList:\n\t\tif minutesTillBus(thisTimepoint) > 0:\n\t\t\treturn [ thisTimepoint ]\n\treturn [ ]\n\ndef commaList(inputList, fieldToUse):\n\t\"\"\" Given an input list and a fieldname of which field to use, return a string that contains all those field items, separated by a comma and a space. \"\"\"\n\toutstr = \"\"\n\tfor thisItem in inputList:\n\t\tif outstr != \"\": outstr += \", \"\n\t\toutstr += thisItem[fieldToUse]\n\treturn outstr\n\ndef nextBus(busRouteSubstring, busStopSubstring, directionSubstring, returnDepartureText = False):\n\t\"\"\"\n\tReturns the response for the whole program, giving the formatted time for the next bus\n\twhen provided with a bus route substring, a bus stop substring, and a direction substring.\n\tHandles all errors as described in the comment for the whole program. All substring\n\tsearches are case-insensitive.\n\t\n\tParameters\n\t------------\n\tbusRouteSubstring : str\n\t\tThe substring to look for to find a unique route.\n\tbusStopSubstring : str\n\t\tThe substring to look for in the route's stop list, to find a unique stop.\n\tdirectionSubstring : str\n\t\tThe substring to look for in the route's direction list, to find a unique direction.\n\treturnDepartureText : boolean\n\t\t(Optional, defaults to False): If true, return the Metro Transit departure text instead of \n\t\tthe # of minutes. Used for testing.\n\t\n\tReturns\n\t--------\n\tstr\n\t\tThe output for the program, either \"x minute(s)\" if the bus is coming, a null string\n\t\tif no bus is coming, or an error message. Errors include: \n\t\tMULTIPLE MATCHES ON ROUTE: \n\t\tMULTIPLE MATCHES ON STOP: \n\t\tNO MATCH ON ROUTE\n\t\tNO MATCH ON STOP\n\t\tNO MATCH ON DIRECTION\n\t\tNETWORK ERROR\n\t\tUNKOWN ERROR\n\t\"\"\"\n\ttry:\n\t\t# Get the information from Metro Transit. Return appropriate errors if\n\t\t# no matches are found or multiple matches are found.\n\t\t# routes\n\t\tnoBusReturnValue = \"\"\t# return value for when no busses are coming\n\t\tmatchingRoutes = getRouteMatches(busRouteSubstring)\n\t\tif (len(matchingRoutes) == 0): return \"NO MATCH ON ROUTE\"\n\t\tif (len(matchingRoutes) > 1): return \"MULTIPLE MATCHES ON ROUTE: \" + commaList(matchingRoutes, \"Description\")\n\t\tthisBusNumber = matchingRoutes[0][\"Route\"]\n\t\t# directions\n\t\tmatchingDirections = getDirectionMatches(thisBusNumber, directionSubstring)\n\t\tif (len(matchingDirections) == 0): return \"NO MATCH ON DIRECTION\"\n\t\tif (len(matchingDirections) > 1): return \"MULTIPLE MATCHES ON DIRECTION: \" + commaList(matchingDirections, \"Text\")\n\t\tthisDirectionNumber = matchingDirections[0][\"Value\"]\n\t\t# stops\n\t\tmatchingStops = getStopMatches(thisBusNumber, thisDirectionNumber, busStopSubstring)\n\t\tif (len(matchingStops) == 0): return \"NO MATCH ON STOP\"\n\t\tif (len(matchingStops) > 1): return \"MULTIPLE MATCHES ON STOP: \" + commaList(matchingStops, \"Text\")\n\t\tthisStopCode = matchingStops[0][\"Value\"]\n\t\t# Now, look up the bus schedule for the given location, and return the appropriate time,\n\t\t# or, return \"\" if there are no buses coming.\n\t\tdepartures = getTimepointDepartures(thisBusNumber, thisDirectionNumber, thisStopCode)\n\t\tnextDepartureRecordList = getNextBusRecord(departures)\n\t\tif (len(nextDepartureRecordList) == 0): return noBusReturnValue\n\t\tif returnDepartureText:\n\t\t\treturn nextDepartureRecordList[0][\"DepartureText\"]\n\t\telse:\n\t\t\treturn formatTimepoint(nextDepartureRecordList[0])\n\texcept IOError:\n\t\treturn \"NETWORK ERROR\"\n\texcept:\n\t\treturn \"UNKNOWN ERROR\"\n\n\n","sub_path":"nextbus.py","file_name":"nextbus.py","file_ext":"py","file_size_in_byte":12424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"446334702","text":"import math\nimport os\n\nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn import preprocessing\n\nfrom dnn import DNN\n\nsns.set()\n\n\n\n'''\n对MountainCar模型进行模型辨识\n1 生成数据\n2 学习数据进行数据验证\n3 训练并保存网络\n4 可以对模型预测\n'''\n\n\nclass MountainCar:\n \"\"\"\n 定义预测出来的模型\n \"\"\"\n\n def __init__(self, name='Goodone', net=None, train = 0):\n \"\"\"\n 初始化\n net: 训练的神经网络\n verity:使用还是验证阶段\n 验证阶段,神经网络未训练\n 使用阶段,神经网络已训练\n \"\"\"\n self.env = gym.make(\"MountainCarContinuous-v0\")\n self.name = name\n self.simulation_step = 0.1\n self.units = 50\n self.ratio = 200\n self.reset()\n if net:\n self.net = net\n else:\n self.net = DNN(1, 1, self.units,train = train, name=self.name)\n\n\n def save_samples(self, big_epis=100):\n \"\"\"\n 保存运行得到的数据\n 得到的数据有big_epis*3000行\n \"\"\"\n record = []\n for big_epi in range(big_epis):\n # 初始化\n # 为了能够达到目标点\n a = 0.0025\n change = 100\n observation = self.reset()\n for epi in range(10000):\n if epi % change == 0:\n u = self.action_sample()*3\n print(big_epi, int(20 * epi / 3000) * '=')\n observation_old = observation.copy()\n observation, _, done, _ = self.env.step(u)\n target = self._get_target(observation_old, observation, u)\n x = observation_old[0]\n # 保存真实值和计算得到的值,后期作为比较\n # record.append([x, target, -a * math.cos(3 * x)])\n record.append([x, target])\n data = np.array(record)\n np.save(os.path.join(self.net.model_path0, 'memory.npy'), data)\n return data\n\n def verity_data(self):\n \"\"\"\n 验证数据集的正确性,画出两个自己计算出来的值和真实值的区别\n \"\"\"\n import matplotlib.pyplot as plt\n import pandas as pd\n import seaborn as sns\n sns.set()\n\n self.data = self._load_data()\n data_size = len(self.data)\n indexs = np.random.choice(data_size, size=int(data_size / 10))\n df = pd.DataFrame(self.data[indexs, :], columns=['position', 'target_dot', 'real_dot'])\n plt.figure()\n plt.scatter(df['position'], df['target_dot']*1.1,s = 5,label = 'target') # 为了显示出区别乘以1.1\n plt.scatter(df['position'], df['real_dot'],s = 5,label = 'real')\n plt.legend()\n plt.show()\n\n def train_model(self):\n \"\"\"\n 利用得到的数据对模型进行训练,首先对数据进行缩放,之后利用神经网络进行拟合\n \"\"\"\n # 训练\n data = self._load_data()\n data[:,1:] = data[:,1:]*self.ratio\n self.net.learn_data(data)\n self.net.store_net()\n\n\n def verity_net_1(self):\n \"\"\"\n 验证神经网络的正确性\n \"\"\"\n\n a = 0.0025\n x_ = np.arange(-1.1, 0.5, 0.001)\n y_tru = -a * np.cos(3 * x_)\n y_pre = self.net.predict(x_.reshape((-1, 1)))/self.ratio\n # 验证对所有的x的拟合情况\n fig = plt.figure()\n plt.plot(x_, y_tru, label='x_tru')\n plt.plot(x_, y_pre, label='x_pre')\n plt.legend()\n\n y_tru_dot = 3 * a * np.sin(3 * x_)\n y_pre_dot = self.net.predict_dot(x_.reshape((-1, 1)))[:, 0]/self.ratio\n # y_pre_dot = self.net.predict_dot(x_.reshape((-1, 1)))[:, 0]\n # 验证对所有的x_dot的拟合情况\n fig = plt.figure()\n plt.plot(x_, y_tru_dot, label='x_dot_tru')\n plt.plot(x_, y_pre_dot, label='x_dot_pre')\n plt.legend()\n\n plt.show()\n\n def verity_net_2(self):\n \"\"\"\n 验证神经网络的正确性2\n 与真实系统的的比较\n \"\"\"\n observation_record = []\n observation_record_net = []\n time_record = []\n observation = self.reset()\n observation_net = observation\n\n change = 100\n time = 0\n epi = 0\n while True:\n observation_record.append(observation)\n observation_record_net.append(observation_net)\n time_record.append(time)\n if epi % change == 0:\n action = self.action_sample() * 3\n epi += 1\n observation, _, done, info = self.env.step(action)\n observation_net, _, done_net, info_net = self.step(action)\n time += self.simulation_step\n print(observation, observation_net)\n if done_net:\n break\n\n observation_record = np.array(observation_record)\n observation_record_net = np.array(observation_record_net)\n time_record = np.array(time_record)\n\n plt.figure(1)\n plt.plot(time_record,observation_record[:, 0], label='x_ture')\n plt.plot(time_record,observation_record_net[:, 0], label='x_pre')\n plt.xlabel('Time(s)')\n plt.ylabel('Xposition')\n plt.plot(time_record,0.45 * np.ones(len(observation_record)), 'r')\n plt.legend()\n\n plt.figure(2)\n plt.plot(time_record,observation_record[:, 1], label='v_ture')\n plt.plot(time_record,observation_record_net[:, 1], label='v_pre')\n plt.xlabel('Time(s)')\n plt.ylabel('Vspeed')\n plt.legend()\n plt.show()\n\n\n def _load_data(self):\n \"\"\"\n 将最开始得到的数据读取出来\n :return:\n \"\"\"\n data = np.load(os.path.join(self.net.model_path0, 'memory.npy'))\n return data\n\n def action_sample(self):\n \"\"\"\n 随机选取符合环境的动作\n \"\"\"\n return self.env.action_space.sample()\n\n def reset(self):\n \"\"\"\n 利用原始问题的初始化,随机初始化\n \"\"\"\n self.state = self.env.reset()\n return self.state\n\n def step(self, action):\n \"\"\"\n 利用神经网络进行模型辨识\n \"\"\"\n action = min(max(action, -1.0), 1.0)\n x, v = self.state\n # 神经网络得到的导数\n dot = self.get_dot(self.state)\n v_dot = 0.0015 * action + dot[0]\n v = v + v_dot * self.simulation_step\n v = min(max(v, -0.07), 0.07)\n\n # 通过v计算x\n x = x + self.simulation_step * v\n x = min(max(x, -1.2), 0.6)\n X = np.array([x, v])\n if X.ndim == 2:\n X = X.reshape((2,))\n self.state = X\n # 返回参数\n info = {}\n done = {}\n reward = {}\n if x >= 0.45:\n done = True\n return self.state, reward, done, info\n\n def step_true(self, action):\n \"\"\"\n 利用原进行模型辨识\n \"\"\"\n action = min(max(action, -1.0), 1.0)\n x, v = self.state\n # 神经网络得到的导数\n # dot = self.get_dot(self.state)\n v_dot = 0.0015 * action -0.0025 * math.cos(3*x)\n v = v + v_dot * self.simulation_step\n v = min(max(v, -0.07), 0.07)\n\n # 通过v计算x\n x = x + self.simulation_step * v\n x = min(max(x, -1.2), 0.6)\n X = np.array([x, v])\n if X.ndim == 2:\n X = X.reshape((2,))\n self.state = X\n # 返回参数\n info = {}\n done = {}\n reward = {}\n if x >= 0.45:\n done = True\n return self.state, reward, done, info\n\n def get_dot(self, X):\n return self.net.predict(X[0:1])[0]/self.ratio\n\n def get_dot2(self, X):\n return self.net.predict_dot(X[0:1])[0]/self.ratio\n\n def _get_target(self, X, X_new, u):\n \"\"\"\n 得到神经网络需要的真实值\n 首先求真实的导数,之后计算真实值\n \"\"\"\n u = min(max(u, -1.0), 1.0)\n return (((X_new - X) / self.simulation_step)[1] - u * 0.0015)\n\n\nif __name__ == '__main__':\n mc = MountainCar(train=0)\n # 1 生成数据\n # mc.save_samples()\n\n # 2 验证数据\n # mc.verity_data()\n\n # 3 进行网络训练\n # mc.train_model()\n\n # 4 验证网络\n mc.verity_net_1()\n\n # 5 验证网络2\n mc.verity_net_2()","sub_path":"Algor/MountainCar/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"571503930","text":"with open(\"A-large.in\") as fin:\r\n with open(\"A.out\", \"w\") as fout:\r\n T = int(fin.readline())\r\n for q in range(T):\r\n vals = fin.readline().strip().split(\" \")\r\n A = int(vals[0])\r\n B = int(vals[1])\r\n vals = fin.readline().strip().split(\" \")\r\n p = [float(x) for x in vals]\r\n pCorrect = 1\r\n for k in p:\r\n pCorrect *= k\r\n best = 2 + B\r\n for i in range(len(p)):\r\n best = min(best, i + pCorrect * (B - A + 1) + (1 - pCorrect) * (2 * B - A + 2))\r\n pCorrect /= p[len(p) - i - 1]\r\n A -= 1\r\n fout.write(\"Case #\" + str(q + 1) + \": %(best)06f\" % {\"best\": best} + \"\\n\")\r\n","sub_path":"solutions_1673486_0/Python/Cyanfish/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"505193978","text":"import cv2, operator, os, pathlib \nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \n\ndef binarize_img(img):\n\tproc = cv2.adaptiveThreshold(img.copy(), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n\tproc = cv2.bitwise_not(proc, proc)\n\n\tkernel = np.uint8(np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]]))\n\n\treturn cv2.dilate(proc, kernel)\n\ndef get_corners(img):\n\t_, contours, h = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \n\tcontours = sorted(contours, key=cv2.contourArea, reverse=True) \n\tpolygon = contours[0] # Largest image\n\n\t# Lower-right = largest (x + y) value\n\t# Upper-left = smallest (x + y) value\n\t# Lower-left = smallest (x - y) value\n\t# Upper-right = largest (x - y) value\n\tlower_right, _ = max(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))\n\tupper_left, _ = min(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))\n\tlower_left, _ = min(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))\n\tupper_right, _ = max(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))\n\n\treturn [polygon[upper_left][0], polygon[upper_right][0], polygon[lower_right][0], polygon[lower_left][0]]\n\ndef warp(img, crop_rect):\n\tdef dist(a, b):\n\t\tx = b[0] - a[0]\n\t\ty = b[1] - a[1]\n\t\treturn np.sqrt((x ** 2) + (y ** 2))\n\n\t# rectangle \n\ttop_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]\n\tsrc = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')\n\n\t# longest side \n\tside = max([dist(bottom_right, top_right), dist(top_left, bottom_left), dist(bottom_right, bottom_left), dist(top_left, top_right)])\n\n\t# new square shape \n\tdst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32')\n\n\t# transformation matrix to make into square \n\tm = cv2.getPerspectiveTransform(src, dst)\n\n\t# apply warp \n\treturn cv2.warpPerspective(img, m, (int(side), int(side)))\n\ndef get_grid(img):\n\tsquares = []\n\tside = img.shape[:1]\n\tside = side[0] / 9\n\tfor i in range(9):\n\t\tfor j in range(9):\n\t\t\tp1 = (i * side, j * side) # Top left corner of a bounding box\n\t\t\tp2 = ((i + 1) * side, (j + 1) * side) # Bottom right corner of bounding box\n\t\t\tsquares.append((p1, p2))\n\treturn squares\n\ndef cut_rect(img, rect):\n\treturn img[int(rect[0][1]):int(rect[1][1]), int(rect[0][0]):int(rect[1][0])]\n\ndef scale_and_center(img, size, margin=0, background=0):\n\th, w = img.shape[:2]\n\n\tdef center_pad(length):\n\t\tif length % 2 == 0:\n\t\t\tside1 = int((size-length)/2)\n\t\t\tside2 = side1\n\t\telse:\n\t\t\tside1 = int((size-length)/2)\n\t\t\tside2 = side1 + 1\n\t\treturn side1, side2\n\n\tdef scale(r, x):\n\t\treturn int(r * x)\n\n\tif h > w:\n\t\tt_pad = int(margin / 2)\n\t\tb_pad = t_pad\n\t\tratio = (size - margin) / h\n\t\tw, h = scale(ratio, w), scale(ratio, h)\n\t\tl_pad, r_pad = center_pad(w)\n\telse:\n\t\tl_pad = int(margin / 2)\n\t\tr_pad = l_pad\n\t\tratio = (size - margin) / w\n\t\tw, h = scale(ratio, w), scale(ratio, h)\n\t\tt_pad, b_pad = center_pad(h)\n\n\timg = cv2.resize(img, (w, h))\n\timg = cv2.copyMakeBorder(img, t_pad, b_pad, l_pad, r_pad, cv2.BORDER_CONSTANT, None, background)\n\treturn cv2.resize(img, (size, size))\n\ndef get_largest_component(inp_img, scan_tl, scan_br):\n\timg = inp_img.copy() # Copy the image, leaving the original untouched\n\theight, width = img.shape[:2]\n\n\tmax_area = 0\n\tseed_point = (None, None)\n\n\t# iterate over image \n\tfor x in range(scan_tl[0], scan_br[0]):\n\t\tfor y in range(scan_tl[1], scan_br[1]):\n\t\t\tif img.item(y, x) == 255 and x < width and y < height: \n\t\t\t\tarea = cv2.floodFill(img, None, (x, y), 64)\n\t\t\t\tif area[0] > max_area: \n\t\t\t\t\tmax_area = area[0]\n\t\t\t\t\tseed_point = (x, y)\n\n\tfor x in range(width):\n\t\tfor y in range(height):\n\t\t\tif img.item(y, x) == 255 and x < width and y < height:\n\t\t\t\tcv2.floodFill(img, None, (x, y), 64)\n\n\tmask = np.zeros((height + 2, width + 2), np.uint8) \n\n\t# fill main feature\n\tif all([p is not None for p in seed_point]):\n\t\tcv2.floodFill(img, mask, seed_point, 255)\n\n\ttop, bottom, left, right = height, 0, width, 0\n\n\tfor x in range(width):\n\t\tfor y in range(height):\n\t\t\tif img.item(y, x) == 64: # get rid of non-main features \n\t\t\t\tcv2.floodFill(img, mask, (x, y), 0)\n\n\t\t\t# bounding parameters\n\t\t\tif img.item(y, x) == 255:\n\t\t\t\ttop = y if y < top else top\n\t\t\t\tbottom = y if y > bottom else bottom\n\t\t\t\tleft = x if x < left else left\n\t\t\t\tright = x if x > right else right\n\n\tbbox = [[left, top], [right, bottom]]\n\treturn img, np.array(bbox, dtype='float32'), seed_point\n\ndef extract_digit(img, rect, size):\n\tdigit = cut_rect(img, rect) # get the digit cell \n\n\th, w = digit.shape[:2]\n\tmargin = int(np.mean([h, w]) / 2.5)\n\t_, bbox, seed = get_largest_component(digit, [margin, margin], [w - margin, h - margin]) # get largest feature \n\tdigit = cut_rect(digit, bbox)\n\n\tw = bbox[1][0] - bbox[0][0]\n\th = bbox[1][1] - bbox[0][1]\n\n\tif w > 0 and h > 0 and (w * h) > 100 and len(digit) > 0:\n\t\treturn scale_and_center(digit, size, 4)\n\telse:\n\t\treturn np.zeros((size, size), np.uint8)\n\ndef get_digits(img, squares, size):\n\tdigits = []\n\timg = binarize_img(img.copy())\n\tfor square in squares:\n\t\tdigits.append(extract_digit(img, square, size))\n\treturn digits\n\nif __name__ == \"__main__\":\n\tfile_list = ['image16', 'image101', 'image1', 'image129', 'image51', 'image1084', 'image19', 'image33', 'image90', 'image198', 'image2', \\\n\t'image103', 'image84', 'image179', 'image188', 'image192', 'image164', 'image125', 'image183', 'image159', 'image18', 'image154', \\\n\t'image34', 'image160', 'image115', 'image96', 'image1009', 'image47', 'image111', 'image74', 'image186', 'image201', 'image81', 'image105', \\\n\t'image76', 'image25', 'image118', 'image153', 'image100', 'image148', 'image43', 'image79', 'image1020', 'image106', 'image87', 'image29', \\\n\t'image169', 'image1039', 'image91', 'image195', 'image200', 'image21', 'image32', 'image119', 'image202', 'image30', 'image24', 'image65', \\\n\t'image1003', 'image1041', 'image23', 'image140', 'image157', 'image1067', 'image1014', 'image165', 'image85', 'image31', \\\n\t'image120', 'image1062', 'image180', 'image168', 'image36', 'image40', 'image73', 'image58', 'image50', 'image104', 'image175', 'image211', \\\n\t'image83', 'image1038', 'image107', 'image189', 'image196', 'image174', 'image173', 'image37', 'image93', 'image145', 'image134', 'image72', \\\n\t'image143', 'image172', 'image184', 'image1000', 'image1024', 'image112', 'image80', 'image162', 'image161', 'image128', 'image176', \\\n\t'image109', 'image28', 'image77', 'image97', 'image191', 'image185', 'image122', 'image136', 'image166', 'image82', 'image155', 'image61', \\\n\t'image1036', 'image94', 'image163', 'image182', 'image42', 'image10', 'image170', 'image1082', 'image1055', 'image158', 'image22', \\\n\t'image44', 'image139', 'image147', 'image178', 'image26', 'image35', 'image1045', 'image117', 'image194', 'image193', 'image41', 'image126', \\\n\t'image1083', 'image39', 'image8', 'image56', 'image1086', 'image11', 'image156', 'image113', 'image13', 'image27', 'image9', 'image102', \\\n\t'image171', 'image95', 'image1008', 'image114', 'image167', 'image70', 'image177', 'image116', 'image181', 'image98', 'image150', 'image151']\n\tindex_image_list = [0]*(len(file_list)*81)\n\t\n\tzero_count = 0\n\tdata_folders = ['filled_data', 'unfilled_data']\n\tfor d in data_folders: \n\t\tCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\n\t\tDATA_DIR = os.path.join(CURRENT_DIR, d)\n\t\tLABELS_TXT_DIR = os.path.join(DATA_DIR, 'labels_txt')\n\t\tDIGITS_DIR = os.path.join(DATA_DIR, 'digits')\n\t\tpathlib.Path(DIGITS_DIR).mkdir(parents=True, exist_ok=True) \n\t\tALL_LABELS_DIR = os.path.join(DIGITS_DIR, 'all_digit_labels.txt')\n\t\tall_labels = np.zeros((len(file_list)*81, ))\n\n\t\tfor j in range(len(file_list)): \n\t\t\tf = file_list[j]\n\t\t\tprint('File:', f)\n\t\t\tIMAGE_DIR = os.path.join(DATA_DIR, f + '.jpg')\n\t\t\tLABELS_DIR = os.path.join(LABELS_TXT_DIR, f + '.txt')\n\n\t\t\toriginal = cv2.imread(IMAGE_DIR, cv2.IMREAD_GRAYSCALE)\n\t\t\tcv2.imshow('original', original)\n\t\t\tprocessed = binarize_img(original)\n\t\t\tcorners = get_corners(processed)\n\t\t\tcropped = warp(original, corners)\n\t\t\tsquares = get_grid(cropped)\n\t\t\tdigits = get_digits(cropped, squares, 28) # len 81 list of arrays \n\n\t\t\tlabels = np.transpose(np.loadtxt(LABELS_DIR)).flatten() # (81,) array\n\n\t\t\tfor i in range(len(digits)): # 81 times \n\t\t\t\tDIGIT_DIR = os.path.join(DIGITS_DIR, f + '_' + str(i+1) + '.jpg')\n\t\t\t\tcv2.imwrite(DIGIT_DIR, digits[i])\n\t\t\t\t\n\t\t\t\tall_labels[j*81+i] = labels[i]\n\t\t\t\tindex_image_list[j*81+i] = f + '_' + str(i+1) + '.jpg'\n\t\t\t\tif cv2.countNonZero(digits[i]) < 20:\n\t\t\t\t\tif d == 'filled_data' and j < 81: \n\t\t\t\t\t\tzero_count += 1\n\t\t\t\t\telif d == 'unfilled_data' and j >= 81: \n\t\t\t\t\t\tzero_count += 1 \n\t\t\t\t# if cv2.countNonZero(digits[i]) < 15: # threshold for blank cell detection \n\t\t\t\t# \tall_labels[j*81+i] = 0 \n\t\t\t\t# else:\n\t\t\t\t# \tall_labels[j*81+i] = labels[i]\n\n\t\t# index_image_list = np.repeat(image_list, 81)\n\t\tdf = pd.DataFrame(all_labels, index=index_image_list)\n\t\tdf.to_csv(ALL_LABELS_DIR)\n","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"478628192","text":"import pickle\nfrom scipy.sparse import csr_matrix\nimport csv\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nimport string\nimport math\n\ndf = pd.read_csv('amazon_baby.csv')\nprint (df.head())\n\ndef remove_punctuation(text):\n\ttext = str(text)\n\ttranslator = str.maketrans(\"\", \"\", string.punctuation)\n\treturn text.translate(translator)\n\n# print ('translated is ',df['review'].apply(remove_punctuation))\ndf['review_clean']\t= df['review'].apply(remove_punctuation)\n\ndf = df.fillna({'reviews':''})\ndf = df[df['rating'] != 3]\n\ndf['sentiment'] = df['rating'].apply(lambda rating:+1 if rating > 3 else -1)\nprint (df.columns)\n\ntrain_data,test_data = train_test_split(df,test_size=0.2,random_state=42)\nprint (test_data['name'])\n\nvec = CountVectorizer(token_pattern=r'\\b\\w+\\b')\ntrain_matrix = vec.fit_transform(train_data['review_clean'])\ntest_matrix = vec.transform(test_data['review_clean'])\n\ncount = 0\nsentiment_model = LogisticRegression()\nsentiment_model.fit(train_matrix,train_data['sentiment'])\nprint ('1st answer ',np.sum([x>0 for x in sentiment_model.coef_]))\n# print ('test_matrix ',test_matrix)\n\nsample_test_data = test_data[10:13]\n# print (sample_test_data)\nsample_test_matrix = vec.transform(sample_test_data['review_clean'])\nscores = sentiment_model.decision_function(sample_test_matrix)\n# score2 = clf.score(sample_test_matrix,sample_test_data['sentiment'])\nprint ('2nd score is ',scores)\n\ny_pred = sentiment_model.predict(sample_test_matrix)\nprint ('y_pred ',y_pred)\ny = [+1 if score>0 else -1 for score in scores]\nprint ('y is ',y)\n\ndef prob_pred_sigmod(x):\n\treturn 1/(1+math.exp(-x))\n\nsigmoid_y = []\nfor i in scores:\n\tsigmoid_y.append(prob_pred_sigmod(i))\n\nprint ('sigmoid_y ',sigmoid_y)\nprint ('sorted sigmoid_y ',sorted(sigmoid_y))\n\nscore_full = sentiment_model.decision_function(test_matrix)\nprint ('score full ',score_full)\n\nsigmoid_y_full = []\nfor score in score_full:\n\tsigmoid_y_full.append(prob_pred_sigmod(score))\n\ncap_sigmoid_y_full = np.array(sigmoid_y_full)\nindices_pos = cap_sigmoid_y_full.argsort()[::-1]\t\nindices_neg = cap_sigmoid_y_full.argsort()[::]\t\n# print ('sigmoid_y_full ',sigmoid_y_full)\nprint ('indices_pos ',indices_pos[:20])\nprint ('indices_neg ',indices_neg[:20])\nprint ('3rd ',test_data['name'][indices_pos][:20])\nprint ('4th ',test_data['name'][indices_neg][:20])\n\n\nsignificant_words = ['love', 'great', 'easy', 'old', 'little', 'perfect', 'loves', \n 'well', 'able', 'car', 'broke', 'less', 'even', 'waste', 'disappointed', \n 'work', 'product', 'money', 'would', 'return']\n\nvectorizer_word_subset = CountVectorizer(vocabulary=significant_words)\ntrain_matrix_word_subset = vectorizer_word_subset.fit_transform(train_data['review_clean'])\ntest_matrix_word_subset = vectorizer_word_subset.transform(test_data['review_clean']) \n\nsimple_model = LogisticRegression()\nsimple_model.fit(train_matrix_word_subset,train_data['sentiment'])\nsimple_model_coef_table = pd.DataFrame({'word':significant_words,\n 'coefficient':simple_model.coef_.flatten()})\nsimple_model_coef_table_sorted = simple_model_coef_table.sort(['word','coefficient'], ascending=[True,False])\n\nprint ('values>0 ',np.sum([x>0 for x in simple_model.coef_]))\nprint ('accuracy sentiment_model train data ',sentiment_model.score(train_matrix,train_data['sentiment']))\nprint ('accuracy simple_model train data ',simple_model.score(train_matrix_word_subset,train_data['sentiment']))\n\nprint ('accuracy sentiment_model test data ',sentiment_model.score(test_matrix,test_data['sentiment']))\nprint ('accuracy simple_model test data ',simple_model.score(test_matrix_word_subset,test_data['sentiment']))","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"474862925","text":"\"\"\"\nIntegration tests for the docker_container states\n\"\"\"\n\nimport random\nimport string\nimport sys\n\nimport pytest\nfrom saltfactories.utils.tempfiles import temp_file\n\nimport salt.utils.path\nfrom tests.support.case import ModuleCase\nfrom tests.support.mixins import SaltReturnAssertsMixin\nfrom tests.support.runtests import RUNTIME_VARS\nfrom tests.support.unit import skipIf\n\n\ndef _random_name(prefix=\"\"):\n ret = prefix\n for _ in range(8):\n ret += random.choice(string.ascii_lowercase)\n return ret\n\n\n@skipIf(not salt.utils.path.which(\"dockerd\"), \"Docker not installed\")\n@pytest.mark.destructive_test\nclass DockerCallTestCase(ModuleCase, SaltReturnAssertsMixin):\n \"\"\"\n Test docker_container states\n \"\"\"\n\n def setUp(self):\n \"\"\"\n setup docker.call tests\n \"\"\"\n # Create temp dir\n self.random_name = _random_name(prefix=\"salt_test_\")\n self.image_tag = sys.version_info[0]\n\n self.run_state(\"docker_image.present\", tag=self.image_tag, name=\"python\")\n self.run_state(\n \"docker_container.running\",\n name=self.random_name,\n image=\"python:{}\".format(self.image_tag),\n entrypoint=\"tail -f /dev/null\",\n )\n\n def tearDown(self):\n \"\"\"\n teardown docker.call tests\n \"\"\"\n self.run_state(\"docker_container.absent\", name=self.random_name, force=True)\n self.run_state(\n \"docker_image.absent\",\n images=[\"python:{}\".format(self.image_tag)],\n force=True,\n )\n delattr(self, \"random_name\")\n delattr(self, \"image_tag\")\n\n @pytest.mark.slow_test\n def test_docker_call(self):\n \"\"\"\n check that docker.call works, and works with a container not running as root\n \"\"\"\n ret = self.run_function(\"docker.call\", [self.random_name, \"test.ping\"])\n assert ret is True\n\n @pytest.mark.slow_test\n def test_docker_sls(self):\n \"\"\"\n check that docker.sls works, and works with a container not running as root\n \"\"\"\n core_state = \"\"\"\n {}/testfile:\n file:\n - managed\n - source: salt://testfile\n - makedirs: true\n \"\"\".format(\n RUNTIME_VARS.TMP\n )\n\n with temp_file(\"core.sls\", core_state, RUNTIME_VARS.TMP_BASEENV_STATE_TREE):\n ret = self.run_function(\"docker.apply\", [self.random_name, \"core\"])\n self.assertSaltTrueReturn(ret)\n\n @pytest.mark.slow_test\n def test_docker_highstate(self):\n \"\"\"\n check that docker.highstate works, and works with a container not running as root\n \"\"\"\n top_sls = \"\"\"\n base:\n '*':\n - core\n \"\"\"\n\n core_state = \"\"\"\n {}/testfile:\n file:\n - managed\n - source: salt://testfile\n - makedirs: true\n \"\"\".format(\n RUNTIME_VARS.TMP\n )\n\n with temp_file(\n \"top.sls\", top_sls, RUNTIME_VARS.TMP_BASEENV_STATE_TREE\n ), temp_file(\"core.sls\", core_state, RUNTIME_VARS.TMP_BASEENV_STATE_TREE):\n ret = self.run_function(\"docker.apply\", [self.random_name])\n self.assertSaltTrueReturn(ret)\n","sub_path":"tests/integration/modules/test_dockermod.py","file_name":"test_dockermod.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"50614423","text":"from django.conf.urls import url\nfrom fcm.views import DeviceViewSet\nfrom onadata.apps.fieldsight.viewsets.FieldsightFcmViewset import FcmDeviceViewSet\nfrom onadata.apps.fieldsight.viewsets.ProjectViewSet import OrganizationsProjectViewSet\n\nfrom onadata.apps.fieldsight.viewsets.OrganizationViewset import OrganizationTypeViewSet, OrganizationViewSet\nfrom onadata.apps.fieldsight.viewsets.ProjectViewSet import ProjectTypeViewSet, ProjectCreationViewSet\n\nfrom onadata.apps.fieldsight.viewsets.ProjectViewSet import OrganizationsProjectViewSet\n\nfrom onadata.apps.fieldsight.viewsets.SiteViewSet import SiteViewSet, AllSiteViewSet, SiteCreationSurveyViewSet, \\\n SiteReviewViewSet, ProjectTypeViewset, SiteReviewUpdateViewSet, SiteUnderProjectViewSet\nfrom .forms import RegistrationForm\n\nfrom .views import (\n OrganizationListView,\n OrganizationCreateView,\n OrganizationUpdateView,\n OrganizationDeleteView,\n organization_dashboard,\n alter_org_status,\n add_org_admin,\n ProjectListView,\n ProjectCreateView,\n ProjectUpdateView,\n ProjectDeleteView,\n project_dashboard,\n alter_proj_status,\n add_proj_manager,\n SiteListView,\n SiteCreateView,\n SiteUpdateView,\n SiteDeleteView,\n site_dashboard,\n alter_site_status,\n add_supervisor,\n CreateUserView,\n UserListView, site_images, filter_users, upload_sites, blue_prints, add_project_role, manage_people_site,\n manage_people_project, manage_people_organization, site_survey_list, ajax_upload_sites, ajax_save_site,\n ajax_save_project)\n\n\nurlpatterns = [\n url(r'^accounts/create/$', CreateUserView.as_view(\n form_class=RegistrationForm), name='user-create'),\n\n url(r'^organization/$', OrganizationListView.as_view(), name='organizations-list'),\n url(r'^organization/$', OrganizationListView.as_view(), name='organization-list'),\n url(r'^organization/add/$', OrganizationCreateView.as_view(), name='organization-add'),\n url(r'^organization/(?P[0-9]+)/$', OrganizationUpdateView.as_view(), name='organization-edit'),\n url(r'^organization-dashboard/(?P[0-9]+)/$', organization_dashboard, name='organization-dashboard'),\n url(r'^organization/delete/(?P\\d+)/$', OrganizationDeleteView.as_view(), name='organization-delete'),\n url(r'^organization/alter-status/(?P\\d+)/$', alter_org_status, name='alter_org_status'),\n url(r'^organization/add-org-admin/(?P\\d+)/$', add_org_admin, name='add_org_admin'),\n\n url(r'^api/projects/(?P\\d+)/$', ProjectTypeViewSet.as_view({'get': 'list'})),\n url(r'^api/projects/$', ProjectCreationViewSet.as_view({'post': 'create', 'put': 'update'}),\n name='projects-list'),\n url(r'^project/$', ProjectListView.as_view(), name='projects-list'),\n url(r'^project/$', ProjectListView.as_view(), name='project-list'),\n url(r'^project/add/$', ProjectCreateView.as_view(), name='project-add'),\n url(r'^project/(?P[0-9]+)/$', ProjectUpdateView.as_view(), name='project-edit'),\n url(r'^project-dashboard/(?P[0-9]+)/$', project_dashboard, name='project-dashboard'),\n url(r'^api/org-projects/(?P\\d+)/$', OrganizationsProjectViewSet.as_view({'get': 'list'})),\n url(r'^api/async_save_project/$', ajax_save_project),\n\n\n url(r'^upload/(?P\\d+)/$', upload_sites, name='site-upload'),\n url(r'^api/bulk_upload_site/(?P\\d+)/$', ajax_upload_sites),\n url(r'^api/async_save_site/(?P\\d+)/$', ajax_save_site),\n url(r'^project/delete/(?P\\d+)/$', ProjectDeleteView.as_view(), name='project-delete'),\n url(r'^project/alter-status/(?P\\d+)/$', alter_proj_status, name='alter_proj_status'),\n url(r'^project/add-proj-manager/(?P\\d+)/$', add_proj_manager, name='add_proj_manager'),\n url(r'^project/add-role/(?P\\d+)/$', add_project_role, name='add_project_staffs'),\n url(r'^api/project-sites/(?P\\d+)/$', SiteViewSet.as_view({'get': 'list'}), name='project_sites'),\n\n\n url(r'^survey-sites/(?P\\d+)$', site_survey_list, name='site-survey-list'),\n url(r'^api/sites/$', AllSiteViewSet.as_view({'get': 'list'}), name='sites-list'),\n url(r'^api/project-types/$', ProjectTypeViewset.as_view({'get': 'list'})),\n url(r'^api/survey-sites/(?P\\d+)/$', SiteCreationSurveyViewSet.as_view({'get': 'list'}), name='sites-list'),\n url(r'^api/survey-sites-review/(?P\\d+)/$', SiteReviewViewSet.as_view({'get': 'list'}), name='sites-list-review'),\n url(r'^api/project-sites/(?P\\d+)/$', SiteUnderProjectViewSet.as_view({'get': 'list'}), name='project-sites-list'),\n url(r'^api/survey-sites-review-update/(?P\\d+)/$', SiteReviewUpdateViewSet.as_view({'post': 'update'})),\n url(r'^api/survey-sites/$', SiteCreationSurveyViewSet.as_view({'post': 'create', 'put':'update'}), name='sites-list'),\n url(r'^site/$', SiteListView.as_view(), name='sites-list'),\n url(r'^site/$', SiteListView.as_view(), name='site-list'),\n url(r'^site/add/$', SiteCreateView.as_view(), name='site-add'),\n url(r'^site/(?P[0-9]+)/$', SiteUpdateView.as_view(), name='site-edit'),\n url(r'^site/blue-prints/(?P[0-9]+)/$', blue_prints, name='site-blue-prints'),\n url(r'^site-dashboard/(?P[0-9]+)/$', site_dashboard, name='site-dashboard'),\n\n url(r'^site/delete/(?P\\d+)/$', SiteDeleteView.as_view(), name='site-delete'),\n url(r'^site/alter-status/(?P\\d+)/$', alter_site_status, name='alter_site_status'),\n url(r'^site/add-supervisor/(?P\\d+)/$', add_supervisor, name='add_supervisor'),\n url(r'^api/site-images/(?P\\d+)/$', site_images, name='site_images'),\n\n url(r'^manage/people/site/(?P\\d+)/$', manage_people_site, name='manage-people-site'),\n url(r'^manage/people/project/(?P\\d+)/$', manage_people_project, name='manage-people-project'),\n url(r'^manage/people/organization/(?P\\d+)/$', manage_people_organization, name='manage-people-organization'),\n\n url(r'^accounts/create/$', CreateUserView.as_view(\n form_class=RegistrationForm), name='user-create'),\n url(r'^userlist/$', UserListView.as_view(), name='user-list'),\n url(r'^filter-users/$', filter_users, name='filter-users'),\n url(r'fcm/v1/devices/$', DeviceViewSet.as_view({'get': 'list'})),\n url(r'fcm/add/', FcmDeviceViewSet.as_view({'post': 'create'})),\n url(r'fcm/logout/', FcmDeviceViewSet.as_view({'post': 'inactivate'})),\n\n]\n\n\n","sub_path":"onadata/apps/fieldsight/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"21779221","text":"__author__ = \"Moses A. Boudourides & Sergios T. Lenis\"\n__copyright__ = \"Copyright (C) 2015 Moses A. Boudourides & Sergios T. Lenis\"\n__license__ = \"Public Domain\"\n__version__ = \"1.0\"\n\n'''\nThis script constructs a temporal random graph with 3 time slices.\n'''\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom networkx.algorithms import bipartite\nfrom matplotlib.patches import Ellipse, Polygon\nimport matplotlib\n\ndef synthetic_three_level(n,p1,p2,p3,J_isolates=False,F_isolates=False,D_isolates=False):#,isolate_up=True,isolate_down=True):\n \n k=n\n\n J=nx.erdos_renyi_graph(n,p1) #The first layer graph\n Jis = nx.isolates(J)\n F=nx.erdos_renyi_graph(n,p2) #The second layer graph\n Fis = nx.isolates(F)\n D=nx.erdos_renyi_graph(n,p3) #The third layer graph\n Dis = nx.isolates(D)\n\n def translation_graph(J,F,D):\n H1=nx.Graph()\n H2=nx.Graph()\n for i in range(n):\n H1.add_edges_from([(J.nodes()[i],F.nodes()[i])])\n H2.add_edges_from([(F.nodes()[i],D.nodes()[i])])\n return H1, H2\n\n Jed = set(J.edges())\n Fed = set(F.edges())\n Ded = set(D.edges())\n l=[Jed,Fed,Ded]\n lu = list(set.union(*l))\n JFD=nx.Graph()\n JFD.add_edges_from(lu)\n\n G=nx.Graph() #The synthetic two-layer graph\n \n # Relabing nodes maps\n \n mappingF={}\n for i in range(2*n):\n mappingF[i]=n+i\n FF=nx.relabel_nodes(F,mappingF,copy=True)\n \n mappingD={}\n for i in range(2*n):\n if i >n-1:\n mappingD[i]=i-n\n else:\n mappingD[i]=2*n+i\n DD=nx.relabel_nodes(D,mappingD,copy=True)\n \n H1, HH2 = translation_graph(J,FF,DD)\n \n G.add_edges_from(J.edges())\n G.add_edges_from(H1.edges())\n G.add_edges_from(DD.edges())\n G.add_edges_from(HH2.edges())\n G.add_edges_from(FF.edges())\n\n edgeList = []\n for e in H1.edges():\n edgeList.append(e)\n for e in HH2.edges():\n edgeList.append(e)\n \n return G, J, FF, DD, JFD, edgeList \n\n\ndef plot_graph(n,G,J,FF,DD,JFD,d1=0.8,d2=5.0,nodesize=1000,withlabels=True,edgelist=[],layout=True,b_alpha=0.5): \n \n if layout:\n pos=nx.spring_layout(JFD)\n else:\n pos=nx.random_layout(JFD)\n\n minPos=min(pos.keys())\n \n top_set=set()\n bottom_set=set()\n middle_set=set()\n level1=[]\n level2=[]\n level3=[]\n created_pos={}\n for j in range(3):\n for i in range(len(pos)):\n npos=pos[pos.keys()[i]]\n if j==0:\n ij=i\n created_pos[ij]=[d2*npos[0],d2*(npos[1]-d1)] \n bottom_set.add(i)\n level3.append(created_pos[i])\n elif j==1:\n ij=i+n\n created_pos[ij]=[d2*(npos[0]),d2*(npos[1])] \n middle_set.add(ij)\n level1.append(created_pos[ij])\n else:\n ij=i+2*n \n created_pos[ij]=[d2*(npos[0]),d2*(npos[1]+d1)] \n top_set.add(ij)\n level2.append(created_pos[ij])\n \n xlevel2=[i[0] for i in level2]\n ylevel2=[i[1] for i in level2]\n \n alevel2 = [min(xlevel2)-d1/2.-0.7,max(ylevel2)+d1/2.]\n blevel2 = [max(xlevel2)+d1/2.-0.7,max(ylevel2)+d1/2.]\n clevel2 = [max(xlevel2)+d1/2.,min(ylevel2)-d1/2.]\n dlevel2 = [min(xlevel2)-d1/2.,min(ylevel2)-d1/2.]\n\n xlevel3=[i[0] for i in level3]\n ylevel3=[i[1] for i in level3]\n\n alevel3 = [min(xlevel3)-d1/2.-0.7,max(ylevel3)+d1/2.]\n blevel3 = [max(xlevel3)+d1/2.-0.7,max(ylevel3)+d1/2.]\n clevel3 = [max(xlevel3)+d1/2.,min(ylevel3)-d1/2.]\n dlevel3 = [min(xlevel3)-d1/2.,min(ylevel3)-d1/2.]\n\n xlevel1=[i[0] for i in level1]\n ylevel1=[i[1] for i in level1]\n\n alevel1 = [min(xlevel1)-d1/2.-0.7,max(ylevel1)+d1/2.]\n blevel1 = [max(xlevel1)+d1/2.-0.7,max(ylevel1)+d1/2.]\n clevel1 = [max(xlevel1)+d1/2.,min(ylevel1)-d1/2.]\n dlevel1 = [min(xlevel1)-d1/2.,min(ylevel1)-d1/2.]\n\n fig=plt.figure(figsize=(20,20))\n ax=fig.add_subplot(111)\n\n ax.add_patch(Polygon([alevel2,blevel2,clevel2,dlevel2],color='b',alpha=0.1)) \n plt.plot([alevel2[0],blevel2[0],clevel2[0],dlevel2[0],alevel2[0]],[alevel2[1],blevel2[1],clevel2[1],dlevel2[1],alevel2[1]],'-b')\n\n ax.add_patch(Polygon([alevel3,blevel3,clevel3,dlevel3],color='r',alpha=0.1)) \n plt.plot([alevel3[0],blevel3[0],clevel3[0],dlevel3[0],alevel3[0]],[alevel3[1],blevel3[1],clevel3[1],dlevel3[1],alevel3[1]],'-r')\n\n ax.add_patch(Polygon([alevel1,blevel1,clevel1,dlevel1],color='g',alpha=0.1)) \n plt.plot([alevel1[0],blevel1[0],clevel1[0],dlevel1[0],alevel1[0]],[alevel1[1],blevel1[1],clevel1[1],dlevel1[1],alevel1[1]],'-g')\n\n nx.draw(J,created_pos, with_labels=withlabels,nodelist=list(bottom_set),node_color='r',node_size=nodesize,edge_color='r',alpha=0.2)\n nx.draw(FF,created_pos, with_labels=withlabels,nodelist=list(middle_set),node_color='g',node_size=nodesize,edge_color='g',alpha=0.2)\n nx.draw(DD,created_pos, with_labels=withlabels,nodelist=list(top_set),node_color='b',node_size=nodesize,edge_color='b',alpha=0.2)\n nx.draw_networkx_edges(G,created_pos,edgelist=edgelist,edge_color='k',alpha=0.2)\n\n plt.show()\n\n return created_pos\ndef synthetic_multi_level(k,n,p=[],No_isolates=True):\n\n list_of_Graphs=[]\n list_of_isolates=[]\n list_of_Graphs_final=[]\n for ij in range(k):\n list_of_Graphs.append(nx.erdos_renyi_graph(n,p[ij]))\n list_of_isolates.append(nx.isolates(list_of_Graphs[ij]))\n\n Gagr=nx.Graph()\n for i in list_of_Graphs:\n Gagr.add_edges_from(i.edges())\n Gagr.add_nodes_from(i.nodes())\n\n G=nx.Graph() #The synthetic two-layer graph\n \n # Relabing nodes maps\n for i in range(k):\n mapping={}\n for ij in range(n):\n mapping[ij]=ij+i*n\n\n list_of_Graphs_final.append(nx.relabel_nodes(list_of_Graphs[i],mapping,copy=True))\n\n list_of_translation_graphs=[]\n for ij in range(k-1):\n H1=nx.Graph()\n #### A small fix to pain in the ass\n g1=sorted(list_of_Graphs_final[ij].nodes())\n g2=sorted(list_of_Graphs_final[ij+1].nodes())\n #######\n\n for ji in range(n):\n\n H1.add_edge(g1[ji],g2[ji]) #a small fix\n\n list_of_translation_graphs.append(H1)\n\n luf=set()\n for i in list_of_Graphs_final:\n luf=luf.union(set(i.edges()))\n luf=list(luf)\n G.add_edges_from(luf)\n luf=set()\n for i in list_of_translation_graphs:\n luf=luf.union(set(i.edges()))\n edgeList=list(luf)\n G.add_edges_from(luf)\n\n return G, list_of_Graphs_final, Gagr, edgeList #F\n\ndef plot_graph_k(k,n,G,list_of_Graphs_final, Gagr,d1=0.8,d2=5.0,nodesize=1000,withlabels=True,edgelist=[],layout=True,b_alpha=0.5): \n '''\n Plotting the synthetic graph after increasing the distance among layers by a parameter d1\n and dilating each layer by a parameter d1 \n '''\n\n if layout:\n pos=nx.spring_layout(Gagr)\n else:\n pos=nx.random_layout(Gagr)\n\n minPos=min(pos.keys())\n top_set=set()\n bottom_set=set()\n middle_set=set()\n levels=dict()\n created_pos={}\n colors=[name for name,hex in matplotlib.colors.cnames.iteritems()]\n for j in range(k):\n\n sset=set()\n pos_lis=[]\n for i in range(n):\n ij=i+j*n\n npos=pos[i]\n created_pos[ij]=[d2*npos[0],d2*(npos[1]+j*n*d1)] \n sset.add(ij)\n pos_lis.append(created_pos[ij])\n col_li=colors[j]\n\n levels[j]=(sset,pos_lis,col_li)\n\n xylevels={}\n\n for i in range(k):\n xlevel2=[ij[0] for ij in levels[i][1]]\n ylevel2=[ij[1] for ij in levels[i][1]]\n alevel2 = [min(xlevel2)-d1/2.-0.7,max(ylevel2)+d1/2.]\n blevel2 = [max(xlevel2)+d1/2.-0.7,max(ylevel2)+d1/2.]\n clevel2 = [max(xlevel2)+d1/2.,min(ylevel2)-d1/2.]\n dlevel2 = [min(xlevel2)-d1/2.,min(ylevel2)-d1/2.]\n xylevels[i]=[alevel2,blevel2,clevel2,dlevel2]\n\n fig=plt.figure()#figsize=(20,20))\n ax=fig.add_subplot(111)\n for i in range(k):\n ax.add_patch(Polygon(xylevels[i],color=levels[i][2],alpha=0.1))\n xa=[j[0] for j in xylevels[i]]\n xa.append(xylevels[i][0][0])\n ya=[j[1] for j in xylevels[i]]\n ya.append(xylevels[i][0][1])\n plt.plot(xa,ya,'-',color=levels[i][2])\n nx.draw(list_of_Graphs_final[i],created_pos,with_labels=withlabels,nodelist=list(levels[i][0]),node_color=levels[i][2],node_size=nodesize,edge_color=levels[i][2],alpha=0.2)\n\n nx.draw_networkx_edges(G,created_pos,edgelist=edgelist,edge_color='k',alpha=0.2)\n\n plt.show()\n\n return created_pos\n\n# p1=p2=p3=0.1\n\n# n=500\n# G,J,FF,DD,JFD,edgeList = synthetic_three_level(n,p1,p2,p3,J_isolates=False,F_isolates= False, D_isolates= False)\n# # print JFD.nodes()\n# # print JFD.edges()\n# # print F.nodes()\n# # print F.edges()\n# # print G.nodes()\n# # print edgeList\n# # print aaaa\n# # print nx.isolates(G)\n# # plot_graph(n,G,J,FF,DD,F,d1=2.,d2=3.,nodesize=100,withlabels=False,edgelist=edgeList,layout=True,b_alpha=0.5)\n# plot_graph(n,G,J,FF,DD,JFD,d1=2.,d2=3.,nodesize=50,withlabels=False,edgelist=edgeList,layout=False,b_alpha=0.15)\n# k=5\n# n=10\n# pp=[0.1,.1,.1,.1,.4]\n# G, list_of_Graphs_final, Gagr, edgeList=synthetic_multi_level(k,n,p=pp,No_isolates=True)\n# plot_graph_k(k,n,G, list_of_Graphs_final, Gagr, edgelist=edgeList)\n","sub_path":"vartopics/syntheticThreeLayerGraph_time.py","file_name":"syntheticThreeLayerGraph_time.py","file_ext":"py","file_size_in_byte":9306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"122277176","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 15 14:32:26 2017\n\n@author: 오연\n\"\"\"\nimport tensorflow as tf\n\n#train data\nx_data = [1,2,3]\n\n#real data\ny_data = [1,2,3]\n\n# -1 ~ 1 random value initialization\n# We know that W should be 1 and b 0\nW = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\nb = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\n\n#Placeholder\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n\n#Our hypothesis\nhypothesis = W * x_data + b\n\n#simple cost function\ncost = tf.reduce_mean(tf.square(hypothesis - y_data))\n\n#Minimize\na = tf.Variable(0.1) #Learing rate, alpha\noptimizer = tf.train.GradientDescentOptimizer(a)\ntrain = optimizer.minimize(cost)\n\n#Before Starting, initialize the variables. We will 'run' this first.\ninit = tf.initialize_all_variables()\n\n#Launch the graph.\nsess = tf.Session()\nsess.run(init)\n\n#fit the line\nfor step in range(2001):\n sess.run(train, feed_dict={X:x_data, Y:y_data})\n if step % 20 == 0:\n print(step, sess.run(cost,feed_dict={X:x_data, Y:y_data}), sess.run(W), sess.run(b))\n\n","sub_path":"01.Linear Regression/02.linearPlaceHolder.py","file_name":"02.linearPlaceHolder.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"621444627","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param root: A Tree\n @return: Inorder in ArrayList which contains node values.\n \"\"\"\n\n def inorderTraversal1(self, root):\n # write your code here\n if not root:\n return []\n inorder = [root.val]\n left = []\n right = []\n if root.left: left = self.inorderTraversal(root.left)\n if root.right: right = self.inorderTraversal(root.right)\n return left + inorder + right\n\n def inorderTraversal2(self, root):\n self.inorder = []\n self.traverse(root)\n return self.inorder\n\n def traverse(self, root):\n if not root:\n return\n if root.left: self.traverse(root.left)\n self.inorder.append(root.val)\n if root.right: self.traverse(root.right)\n return\n\n def inorderTraversal(self, root):\n stack = []\n result = []\n curr = root\n while (curr or stack):\n while curr:\n stack.append(curr)\n curr = curr.left\n curr = stack.pop()\n result.append(curr.val)\n curr = curr.right\n return result","sub_path":"pythonlearn/leetCode/binaryTree/n67inorder.py","file_name":"n67inorder.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"301311890","text":"# Copyright 2016 Netherlands eScience Center\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport logging\n\nfrom docopt import docopt\nimport yaml\nfrom .format import jekyllfile2object\nfrom .validate import Validator\nfrom .version import __version__\n\n\nLOGGER = logging.getLogger('estep')\n\n\nclass Collection(object):\n name = ''\n directory = ''\n schema = ''\n\n def __init__(self, name, directory, schema):\n self.name = name\n self.directory = directory\n self.schema = schema\n\n def documents(self):\n return recurseDirectory(self.directory, self.name)\n\n\nclass Config(object):\n def __init__(self, filename='_config.yml'):\n with open(filename) as f:\n self.config = yaml.load(f)\n\n def validator(self, schemadir):\n schema_uris = list(self.schemas().values())\n return Validator(schema_uris, schemadir)\n\n def schemas(self):\n schemas = {}\n for default in self.config['defaults']:\n schemas[default['scope']['type']] = default['values']['schema']\n return schemas\n\n def collections(self):\n collections = []\n for colname in self.config['collections'].keys():\n colschema = self.schemas()[colname]\n collection = Collection(colname, directory='_' + colname, schema=colschema)\n collections.append(collection)\n return collections\n\n\ndef validate(schemadir):\n config = Config()\n validator = config.validator(schemadir)\n nr_errors = 0\n for collection in config.collections():\n LOGGER.info('Collection: %s', collection.name)\n for docname, document in collection.documents():\n nr_errors += validator.validate(docname, document)\n if nr_errors:\n LOGGER.warning('%i error(s) found', nr_errors)\n sys.exit(1)\n else:\n LOGGER.info('No errors found')\n\n\ndef main(argv=sys.argv[1:]):\n \"\"\"\n Utility for estep website.\n\n Usage:\n estep validate [--schemadir=] [-v]\n\n Options:\n -h, --help Show this screen.\n -v, --verbose Show more output.\n --schemadir= Use local schema directory instead of remote schemas\n\n \"\"\"\n arguments = docopt(main.__doc__, argv, version=__version__)\n\n logging.basicConfig(format='%(message)s', level=logging.WARN)\n if arguments['--verbose']:\n LOGGER.setLevel(logging.DEBUG)\n\n if arguments['validate']:\n validate(schemadir=arguments['--schemadir'])\n\n\ndef recurseDirectory(directory, schemaType):\n obj = []\n for dirpath, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n ext = os.path.splitext(filename)[1]\n if ext.lower() in ['.md', '.markdown', '.mdown']:\n path = os.path.join(dirpath, filename)\n obj.append((path, jekyllfile2object(path, schemaType=schemaType)))\n return obj\n\n","sub_path":"estep/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"17801028","text":"import os\nimport datetime as dt\nimport sys\n\nhours_txt_path = '/Users/Bharat_Srirangam/Desktop/HoursDocumentation/hours.txt'\n\nargs = sys.argv\ndef check_exists():\n\texists = os.path.isfile(hours_txt_path)\n\tif not exists:\n\t\tprint('does not exist')\n\t\tfile = open(hours_txt_path,'w')\n\t\tfile.write('Current Hours: \\n0.0\\n======================')\n\t\tfile.close()\n\ndef main():\n\tstringdate = str(dt.date.today())\n\tfile = open(hours_txt_path, 'r')\n\ttemp = file.read()\n\tsplit_unclean = temp.split('\\n')\n\tsplit = []\n\tfor line in split_unclean:\n\t\tif len(line) > 1:\n\t\t\tsplit.append(line)\n\tcount = float(split[1])\n\tfile.close()\n\n\tcontent = ''\n\tif args[1] == '-view':\n\t\tcontent = '\\n'.join(split)\n\t\tdivider = '=================================' + '\\n'\n\t\tsections = temp.split(divider)\n\t\tprint(sections[-1])\n\telif args[1] == '-clear':\n\t\tsplit[1] = '0.0'\n\t\tcontent = '\\n'.join(split)\n\t\tcontent = content + '\\n' + 'Week\\'s Total - ' + str(count) + '\\n' + '=================================' + '\\n'\n\t\tcount = 0\n\telif len(args) == 3 and args[1] == '-a':\n\t\tisTrue = False\n\t\tindecies = range(len(split))\n\t\tindecies.reverse()\n\t\tfor i in indecies:\n\t\t\tif ': ' in split[i]:\n\t\t\t\telement = split[i].split(': ')\n\t\t\t\tif element[0] == stringdate:\n\t\t\t\t\tisTrue = True\n\t\t\t\t\tsplit[i] = stringdate + ': ' + str((float(element[1]) + float(args[2])))\n\t\t\t\t\tbreak\n\t\tcount = count + float(args[2])\n\t\tsplit[1] = str(count)\n\t\tcontent = '\\n'.join(split)\n\t\tif not isTrue: \n\t\t\tcontent = content + '\\n' + stringdate + ': ' + str(args[2])\n\telse:\n\t\tcount = count + float(args[1])\n\t\tsplit[1] = str(count)\n\t\tcontent = '\\n'.join(split)\n\t\tcontent = content + '\\n' + stringdate + ': ' + str(args[1])\n\t\t\n\tfile = open(hours_txt_path, 'w')\n\tfile.write(content)\n\tfile.close()\n\tprint('Your total accumulated hours is: ' + str(count))\n\nmain()","sub_path":"hours.py","file_name":"hours.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"244938858","text":"#!/usr/bin/env python\n\n# rodar em terminais diferentes: \n# roscore\n# rosrun turtlesim turtlesim_node\n# rosrun meu_projeto quadrado_turtle.py\nimport rospy\nfrom geometry_msgs.msg import Twist, Vector3\nfrom math import pi\n\ndef move():\n vel = Twist(Vector3(2,0,0), Vector3(0,0,0))\n \n while not rospy.is_shutdown():\n\n t0 = rospy.Time.now().to_sec()\n current_distance = 0\n\n while current_distance < 5:\n pub.publish(vel)\n t1 = rospy.Time.now().to_sec()\n current_distance = vel.linear.x * (t1-t0)\n\n vel.linear.x = 0\n pub.publish(vel)\n return\n\ndef rotate():\n vel = Twist(Vector3(0,0,0), Vector3(0,0,pi/2))\n\n t0 = rospy.Time.now().to_sec()\n current_angle = 0\n\n while current_angle <= pi/2:\n pub.publish(vel)\n t1 = rospy.Time.now().to_sec()\n current_angle = vel.angular.z * (t1-t0)\n\n vel.angular.z = 0\n pub.publish(vel)\n return\n\nif __name__ == '__main__':\n rospy.init_node('quadrado')\n pub = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)\n try:\n for i in range(4):\n move()\n rotate()\n except rospy.ROSInterruptException: pass\n","sub_path":"Robo/quadrado_turtle.py","file_name":"quadrado_turtle.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"625841772","text":"import subprocess\nimport logging\nimport re\n\nHOST_RE = re.compile(r\"Host: (\\d+\\.\\d+\\.\\d+\\.\\d+) \\(([\\w\\d\\.-]*)\\)\\s+Ports: (.+)\")\nPORT_RE = re.compile(r\"(\\d+?)\\/(\\w*?)\\/(\\w+?)\\/(.*?)\\/(.*?)\\/(.*?)\\/(.*?)\\/(.*)\")\nPORT_KEYS = (\"port\", \"state\", \"protocol\", \"owner\", \"service\", \"rpc_info\", \"version\")\n\nclass Nmap:\n def __init__(self):\n pass\n\n def __call__(self, parameters, _nmap_command=None):\n\n if not _nmap_command:\n _nmap_command = self.nmap_command\n\n ip = parameters[\"ip\"]\n stdout = _nmap_command(ip)\n result = self.parse_nmap(stdout, ip)\n\n return result\n\n def nmap_command(self, ip:str):\n\n output = subprocess.run(\n [\"nmap\", \"--top-ports\", \"100\", \"-oG\", \"-\", \"-sV\", \"-A\", \"--version-all\", \"-d\", ip], stdout=subprocess.PIPE)\n stdout = str(output.stdout, \"utf8\")\n return stdout\n\n def is_up(self, stdout, ip):\n\n if re.search(\"Host: {}.+?Status: (Up|Unknow)\".format(ip), stdout):\n return True\n return False\n\n def get_ip_dns_ports(self, stdout):\n # from greppable nmap output\n # extract the ip, the dns and ports\n # ports need to be extract one by one\n re_result = HOST_RE.search(stdout)\n return re_result.groups()\n\n def parse_ports(self, ports):\n # format is ($port, $state, $protocol, $owner, $service, $rpc_info, $version)\n output = []\n for p in ports.split(\", \"):\n splitted = zip(PORT_KEYS, PORT_RE.search(p).groups())\n only_available_field = filter(lambda x: len(x[1])>0, splitted)\n port_info = dict(only_available_field)\n output.append(port_info)\n\n return output\n\n def parse_nmap(self, stdout, ip):\n\n if not self.is_up(stdout, ip):\n return {\"ip\":ip, \"status\":\"down\"}\n \n output = {\"ip\": ip, \"status\": \"up\"}\n\n _, dns, ports = self.get_ip_dns_ports(stdout)\n output[\"dns\"] = dns\n output[\"scan\"] = self.parse_ports(ports)\n\n return output\n\n \n","sub_path":"node/nmap.py","file_name":"nmap.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"648574436","text":"# Sanjana Wadhwani\n# 1st Period\n# Feb 5th, 2020\n# Assignment 23\n\ndef main():\n\n #create the file\n print('First use f = open(\"test.py\", \"a\") to create a file')\n f = open(\"test.py\", \"a\")\n\n #add more content to the file\n print(\"Use f.write() to add more text to the file\")\n f.write(\"Use f.write() to add more text to the file\\n\")\n\n \n #loop for adding text from input\n success = False\n while (not success):\n try:\n num = int(input(\"How many lines do you want to add? \"))\n success = True\n except ValueError:\n print(\"You need to type a number.\")\n for i in range(num):\n f.write(\"This is a line %d\\r\\n\" % (i+1))\n\n #print file content\n print('\\nThis is what is on the file right now')\n f = open(\"test.py\", \"r\")\n print(f.read())\n\n \n\nif (__name__==\"__main__\"):\n main()\n","sub_path":"project23.py","file_name":"project23.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"281571921","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Episode',\n fields=[\n ('episode_id', models.IntegerField(serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('season_number', models.IntegerField()),\n ('episode_number', models.IntegerField()),\n ('airdate', models.DateField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='File',\n fields=[\n ('file_id', models.IntegerField(serialize=False, primary_key=True)),\n ('episode_id', models.IntegerField()),\n ('season_number', models.IntegerField()),\n ('episode_number', models.IntegerField()),\n ('status', models.CharField(max_length=200)),\n ('filename', models.CharField(max_length=200)),\n ('quality', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Show',\n fields=[\n ('show_id', models.IntegerField(serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('directory', models.CharField(max_length=200)),\n ('quality', models.CharField(max_length=200)),\n ('enabled', models.BooleanField(default=True)),\n ('last_check', models.DateField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='file',\n name='show',\n field=models.ForeignKey(to='avon.Show'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='episode',\n name='show',\n field=models.ForeignKey(to='avon.Show'),\n preserve_default=True,\n ),\n ]\n","sub_path":"avon/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"652831280","text":"import os, glob, sys\n\ndarknetdir = './Labels-darknet'\nmetadir = './meta'\n\nframe_rate = 2.5 #vary according to the source.\n\nif not os.path.exists(darknetdir):\n print('Something is wrong with the darknet labels dir.')\n sys.exit()\n\nif not os.path.exists(metadir):\n os.mkdir(metadir)\n\n\nlabelsList = glob.glob(os.path.join(darknetdir, '**/*.txt'), recursive=True)\n\nif len(labelsList) == 0:\n print(\"No labels found on dir.\")\n sys.exit()\n\nmetaDic = {}\nfor lfile in labelsList:\n with open(lfile, 'r') as f:\n #count number of lines\n lcount = sum(1 for _ in f)\n if not lcount in metaDic:\n metaDic[lcount] = []\n metaDic[lcount].append(lfile)\n\ntotalLabeled = 0\ntotalBBoxes = 0\nsummary_file_path = 'meta/summary.txt'\nwith open(summary_file_path, 'w')as summary_f:\n\n for lcount in metaDic:\n labelList = metaDic[lcount]\n if lcount > 0:\n totalLabeled = totalLabeled + len(labelList)\n totalBBoxes = totalBBoxes + (lcount * len(labelList))\n out = \"{} frames with {} bboxes. Aprox {} seconds of video.\".format(len(labelList), lcount, len(labelList) / frame_rate)\n summary_f.write(out+'\\n')\n print(out)\n fbboxcountname = \"bbox_number_{}.txt\".format(lcount)\n fbboxcount = os.path.join(metadir, fbboxcountname)\n with open(fbboxcount, 'w') as f:\n for l in labelList:\n f.write(l + '\\n')\n out = \"Total of labeled frames: {}. Aprox {} seconds of video.\".format(totalLabeled, totalLabeled / frame_rate)\n summary_f.write(out+'\\n')\n print(out)\n out = \"Total of bounding boxes: {}.\".format(totalBBoxes)\n summary_f.write(out+'\\n')\n print(out)\n","sub_path":"meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"253161692","text":"import click\n\nfrom rich_click import RichCommand, RichGroup\n\n\n@click.group(cls=RichGroup)\n@click.option(\"--debug/--no-debug\", default=False)\ndef cli(debug):\n \"\"\"\n My amazing tool does all the things.\n\n This is a minimal example based on documentation\n from the 'click' package.\n\n You can try using --help at the top level and also for\n specific group subcommands.\n \"\"\"\n click.echo(f\"Debug mode is {'on' if debug else 'off'}\")\n\n\n@cli.command(cls=RichCommand)\ndef sync():\n \"\"\"Synchronise all your files between two places.\"\"\"\n click.echo(\"Syncing\")\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"examples/02_declarative.py","file_name":"02_declarative.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"71900130","text":"# \"\"\"\r\n# utility function for working with DataFrame\r\n# \"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport pandas_ml as pdml\r\n\r\n#return train, validate, test of a single df which rows has been permutation\r\ndef train_val_test_permutation_split(df, train_percent, validate_percent, seed=None):\r\n\r\n np.random.seed(seed)\r\n perm = np.random.permutation(df.index) #perm will randomly reorganize the rows\r\n m = len(df.index) #grab the number of row in the dataframe\r\n\r\n #find out number of row to train, validate, use the reminder for test\r\n train_end = int(train_percent * m)\r\n validate_end = int(validate_percent * m) + train_end\r\n\r\n #create the train, validate, test variables then return.\r\n train = df.iloc[perm[:train_end]]\r\n validate = df.iloc[perm[train_end:validate_end]]\r\n test = df.iloc[perm[validate_end:]]\r\n\r\n return train,validate,test\r\n#\r\n\"\"\"\r\ncreating a confusion matrix\r\nfunction take takes in y_actual, and y_predicted then spits out a\r\ncrosstab, which is turned into a heatmap;\r\nneed to pass in list of y_actual and y_predicted!\r\nthis is for numerical data\r\n\"\"\"\r\ndef confusion_Matrix_HeatMap_numerical(y_actual, y_predicted):\r\n #create the dataframe from y_actual and y_predicted\r\n data = {'y_Predicted':y_predicted,\r\n 'y_Actual':y_actual\r\n }\r\n df = pd.DataFrame(data,columns=['y_Actual','y_Predicted'])\r\n #crosstab the confusion_matrix\r\n crosstab_confusion_matrix = pd.crosstab(df['y_Actual'], df['y_predicted'], rownames=['Actual'], colnames=['Predicted'])\r\n #heatmap the confusion_matrix\r\n heatmap = sns.heatmap(crosstab_confusion_matrix, annot=True)\r\n return heatmap\r\n\r\n\r\n\r\n","sub_path":"Init/df_utils.py","file_name":"df_utils.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"610617752","text":"from __future__ import unicode_literals\n\nfrom actstream.models import Action\n\nfrom documents.tests import GenericDocumentViewTestCase\n\nfrom ..events import (\n event_metadata_type_created, event_metadata_type_edited\n)\nfrom ..models import MetadataType\nfrom ..permissions import (\n permission_metadata_type_create, permission_metadata_type_edit\n)\n\nfrom .mixins import MetadataTestsMixin\n\n\nclass MetadataTypeEventsTestCase(MetadataTestsMixin, GenericDocumentViewTestCase):\n def test_metadata_type_create_event_no_permissions(self):\n self.login_user()\n\n Action.objects.all().delete()\n\n response = self._request_metadata_type_create_view()\n self.assertEqual(response.status_code, 403)\n self.assertEqual(Action.objects.count(), 0)\n\n def test_metadata_type_create_event_with_permissions(self):\n self.login_user()\n\n Action.objects.all().delete()\n\n self.grant_permission(permission=permission_metadata_type_create)\n\n response = self._request_metadata_type_create_view()\n\n self.assertEqual(response.status_code, 302)\n\n event = Action.objects.first()\n\n metadata_type = MetadataType.objects.first()\n\n self.assertEqual(event.verb, event_metadata_type_created.id)\n self.assertEqual(event.target, metadata_type)\n self.assertEqual(event.actor, self.user)\n\n def test_metadata_type_edit_event_no_permissions(self):\n self._create_metadata_type()\n\n self.login_user()\n\n Action.objects.all().delete()\n\n response = self._request_metadata_type_edit_view()\n self.assertEqual(response.status_code, 403)\n self.assertEqual(Action.objects.count(), 0)\n\n def test_metadata_type_edit_event_with_access(self):\n self._create_metadata_type()\n\n self.login_user()\n\n Action.objects.all().delete()\n\n self.grant_access(\n permission=permission_metadata_type_edit, obj=self.metadata_type\n )\n\n response = self._request_metadata_type_edit_view()\n\n self.assertEqual(response.status_code, 302)\n\n event = Action.objects.first()\n\n self.assertEqual(event.verb, event_metadata_type_edited.id)\n self.assertEqual(event.target, self.metadata_type)\n self.assertEqual(event.actor, self.user)\n","sub_path":"mayan/apps/metadata/tests/test_events.py","file_name":"test_events.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"468716248","text":"import sys\nfrom flask import g, redirect, url_for, request, render_template, flash\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom flask_oauthlib.client import OAuthException\nfrom vk.exceptions import VkException\nfrom . import app, vk_oauth, lm\nfrom .forms import DownloadForm\nfrom .models import VkUser\nfrom .vkpg import VkPhotoGetter\n\n\n@app.before_request\ndef before_request():\n g.user = current_user\n\n\n@lm.user_loader\ndef load_user(user_id):\n return VkUser.get(user_id)\n\n\n@app.route(\"/login/oauth/vk\")\ndef login_oauth_vk():\n if app.config[\"VK_APP_PARAMS_SET\"]:\n next_url = request.args.get(\"next\") or request.referrer or None\n callback_url = url_for(\"callback_oauth_vk\", next=next_url, _external=True)\n return vk_oauth.authorize(callback=callback_url)\n\n flash(\"Login feature is not available as no VK app parameters set. \"\n \"Consult %s for solution.\" % app.config[\"REPO_URL\"],\n category=\"warning\")\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/callback/oauth/vk\")\ndef callback_oauth_vk():\n next_url = request.args.get(\"next\") or url_for(\"index\")\n\n try:\n response = vk_oauth.authorized_response()\n except OAuthException as e:\n app.logger.exception(e)\n flash(\"Was not able log you in, try again later\", category=\"danger\")\n return redirect(next_url)\n\n if response is None:\n app.logger.info(\"%s denied request to sign in\", current_user)\n flash(\"You denied the request to sign in\", category=\"warning\")\n return redirect(next_url)\n\n user = VkUser.create(\n user_id=response[\"user_id\"],\n access_token=response[\"access_token\"]\n )\n if user is not None:\n app.logger.info(\"Logging in %s\", user)\n login_user(user)\n flash(\"Successfully logged in\", category=\"success\")\n else:\n app.logger.error(\"Was not able to login %s\", user)\n flash(\"Was not able log you in, try again later\", category=\"danger\")\n return redirect(next_url)\n\n\n@app.route(\"/logout\")\n@login_required\ndef logout():\n next_url = request.args.get(\"next\") or url_for(\"index\")\n app.logger.info(\"Logging out %s\", current_user)\n logout_user()\n flash(\"Logged out\", category=\"info\")\n return redirect(next_url)\n\n\n@app.route(\"/shutdown\")\ndef shutdown():\n if current_user.is_authenticated:\n return redirect(url_for(\"logout\", next=url_for(\"shutdown\")))\n app.logger.info(\"<== Shutting down ==>\")\n sys.exit(0)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n form = DownloadForm()\n if form.validate_on_submit():\n access_token = current_user.access_token if current_user.is_authenticated else \"\"\n vkpg = VkPhotoGetter(access_token=access_token)\n try:\n app.logger.info(\"Downloading %s\", form.album_url.parsed)\n vkpg.get_album(url=form.album_url.parsed)\n app.logger.info(\"Done\")\n flash(\"Album downloaded\", category=\"success\")\n except VkException as e:\n app.logger.warning(e)\n flash(str(e), category=\"danger\")\n except Exception as e:\n app.logger.exception(e)\n flash(str(e), category=\"danger\")\n return render_template(\"index.html\", form=form)\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"179972244","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@author: lizhifeng\n@contact: lizhifeng2009@126.com\n@site: \n@file: cluster_similarity.py\n@time: 7/14/16 5:28 PM\n\"\"\"\n\nimport sys\nsys.path.append(\"..\")\n\nimport distance.tanimoto as tanimoto\n\n\ndef cluster_similarity(matrix, bmus, code_book):\n cs = []\n for i in range(len(code_book)):\n if i not in bmus.keys():\n cs.append(\"None\")\n continue\n\n if len(bmus[i]) == 1:\n cs.append(1)\n continue\n\n sum_similarity = 0\n items = bmus[i]\n for j in range(len(items) - 1):\n for k in range(j + 1, len(items)):\n sum_similarity += tanimoto.bin_tanimoto_similarity(matrix[items[j]], matrix[items[k]])\n\n cs.append(sum_similarity*2/(len(items) * (len(items)-1)))\n\n return cs\n\n\nif __name__ == '__main__':\n pass","sub_path":"chemical_landscape_tmp/chemical/train/cluster_similarity.py","file_name":"cluster_similarity.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"38235964","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.models import resnet as vrn\n\nfrom .resnet import ResNet\nfrom .utils import register\n\nclass Bottleneck(nn.Module):\n # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)\n # while original implementation places the stride at the first 1x1 convolution(self.conv1)\n # according to \"Deep residual learning for image recognition\"https://arxiv.org/abs/1512.03385.\n # This variant is also known as ResNet V1.5 and improves accuracy according to\n # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.\n\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n \n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n \n width = int(planes * (base_width / 64.)) * groups\n \n downsample = nn.Sequential(\n vrn.conv1x1(inplanes, planes * self.expansion, stride),\n norm_layer(planes * self.expansion)\n )\n \n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = vrn.conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = vrn.conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = vrn.conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\nclass FPN_Mod(nn.Module):\n 'Feature Pyramid Network - https://arxiv.org/abs/1612.03144'\n\n def __init__(self, features):\n super().__init__()\n\n self.stride = 128\n self.features = features\n\n is_light = features.bottleneck == vrn.BasicBlock\n channels = [128, 256, 512] if is_light else [512, 1024, 2048]\n\n self.lateral3 = nn.Conv2d(channels[0], 256, 1)\n self.lateral4 = nn.Conv2d(channels[1], 256, 1)\n self.lateral5 = nn.Conv2d(channels[2], 256, 1)\n self.lateral6 = nn.Conv2d(channels[2], 256, 1)\n self.lateral7 = nn.Conv2d(256, 256, 1)\n self.smooth3 = nn.Conv2d(256, 256, 3, padding=1)\n self.smooth4 = nn.Conv2d(256, 256, 3, padding=1)\n self.smooth5 = nn.Conv2d(256, 256, 3, padding=1)\n self.smooth6 = nn.Conv2d(256, 256, 3, padding=1)\n self.smooth7 = nn.Conv2d(256, 256, 3, padding=1)\n \n # add c6 and c7 for better large predictions\n self.feature6 = Bottleneck(\n channels[2],\n channels[2],\n stride=2,\n norm_layer=features._norm_layer\n )\n self.feature7 = Bottleneck(\n channels[2],\n 256,\n stride=2,\n norm_layer=features._norm_layer\n )\n\n def initialize(self):\n def init_layer(layer):\n if isinstance(layer, nn.Conv2d):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n self.apply(init_layer)\n\n self.features.initialize()\n\n def forward(self, x):\n c3, c4, c5 = self.features(x)\n c6 = self.feature6(c5)\n c7 = self.feature7(c6)\n \n p7 = self.lateral7(c7)\n p6 = self.lateral6(c6)\n p6 = F.interpolate(p7, scale_factor=2) + p6\n p5 = self.lateral5(c5)\n p5 = F.interpolate(p6, scale_factor=2) + p5\n p4 = self.lateral4(c4)\n p4 = F.interpolate(p5, scale_factor=2) + p4\n p3 = self.lateral3(c3)\n p3 = F.interpolate(p4, scale_factor=2) + p3\n\n p3 = self.smooth3(p3)\n p4 = self.smooth4(p4)\n p5 = self.smooth5(p5)\n p6 = self.smooth5(p6)\n p7 = self.smooth5(p7)\n\n return [p3, p4, p5, p6, p7]\n \n@register\ndef ResNet18FPN_Mod():\n return FPN_Mod(ResNet(layers=[2, 2, 2, 2], bottleneck=vrn.BasicBlock, outputs=[3, 4, 5], url=vrn.model_urls['resnet18']))\n\n@register\ndef ResNet34FPN_Mod():\n return FPN_Mod(ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.BasicBlock, outputs=[3, 4, 5], url=vrn.model_urls['resnet34']))\n\n@register\ndef ResNet50FPN_Mod():\n return FPN_Mod(ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], url=vrn.model_urls['resnet50']))\n\n@register\ndef ResNet101FPN_Mod():\n return FPN_Mod(ResNet(layers=[3, 4, 23, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], url=vrn.model_urls['resnet101']))\n\n@register\ndef ResNet152FPN_Mod():\n return FPN_Mod(ResNet(layers=[3, 8, 36, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], url=vrn.model_urls['resnet152']))\n\n@register\ndef ResNeXt50_32x4dFPN_Mod():\n return FPN_Mod(ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], groups=32, width_per_group=4, url=vrn.model_urls['resnext50_32x4d']))\n\n@register\ndef ResNeXt101_32x8dFPN_Mod():\n return FPN_Mod(ResNet(layers=[3, 4, 23, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], groups=32, width_per_group=8, url=vrn.model_urls['resnext101_32x8d']))\n","sub_path":"retinanet/backbones/fpn_mod.py","file_name":"fpn_mod.py","file_ext":"py","file_size_in_byte":5622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"564581993","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 21 07:52:01 2021\r\n\r\n@author: David Gerardo Mora Salazar\r\n\"\"\"\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport requests\r\nimport io\r\nimport xlrd\r\nfrom bccr import SW\r\n#pip install bccr\r\n#1.a\r\n# Download data from web page\r\nurla=\"https://vincentarelbundock.github.io/Rdatasets/csv/datasets/airquality.csv\"\r\ns1=requests.get(urla).content\r\na=pd.read_csv(io.StringIO(s1.decode('utf-8')))\r\n#1.b\r\n#Rename the columns of a: a.1\r\nb = a.rename(columns = {0: 'Time'}, inplace = False)\r\npd.options.display.max_rows = 20\r\n#1.c\r\na.plot(subplots=True, figsize=(10, 10)); plt.legend(loc='best')\r\n#1.c.alternativa1\r\nfig, axes = plt.subplots(nrows=2, ncols=2)\r\na['Ozone'].plot(ax=axes[0,0]); axes[0,0].set_title('Ozone')\r\na['Temp'].plot(ax=axes[0,1]); axes[0,1].set_title('Temp')\r\na['Solar.R'].plot(ax=axes[1,0]); axes[1,0].set_title('Solar.R')\r\na['Wind'].plot(ax=axes[1,1]); axes[1,1].set_title('Wind')\r\n#1.c.alternativa2\r\n# y = [\"Ozone\"]\r\n# x = [\"Day\"]\r\n# g = sns.FacetGrid(a, row='Ozone', col='Day')\r\n# g.map(sns.lmplot, \"age\")\r\n# plt.show()\r\n#1.d\r\n\r\n# #2.a\r\nurlb=\"https://vincentarelbundock.github.io/Rdatasets/csv/datasets/AirPassengers.csv\"\r\ns2=requests.get(urlb).content\r\nb=pd.read_csv(io.StringIO(s2.decode('utf-8')))\r\n#2.b\r\nsns.lineplot(data=b, x=\"Unnamed: 0\", y=\"value\")\r\n#2.c\r\nb['Natural logarithm of value'] = np.log(b['value'])\r\nsns.lineplot(data=b, x=\"Unnamed: 0\", y=\"Natural logarithm of value\")\r\n#3.a\r\nwb = xlrd.open_workbook('C:/Users/MiBebe/Downloads/IPC.xlsx')\r\nsh1 = wb.sheet_by_name(u'Hoja1')\r\nx = sh1.col_values(0) # column 0\r\ny = sh1.col_values(1) # column 1\r\nplt.plot(x[290:318], y[290:318])\r\nplt.xticks(x[290:318], rotation='vertical', fontsize=5)\r\nplt.subplots_adjust(bottom=0.35)\r\nplt.show()\r\n#3.a.alternativa\r\nIPC = SW({979:'IPC Julio 2006 = 100'}, FechaInicio=2000) # pasando un diccionario para renombrar las series\r\nIPC.plot();\r\n#3.a.alternativa2\r\ndef figura(datos, titulo, y):\r\n fig, ax = plt.subplots(figsize=(12,5))\r\n ax = datos.plot(ax=ax, legend=None)\r\n ax.set(title=titulo, xlabel=\" \", ylabel=y)\r\n return fig\r\nfigura(IPC,\"IPC\", \"Julio 2006 = 100\")\r\n#3.b\r\nfigura(IPC.diff(1),\r\n 'Cambio trimestral en el IPC de Costa Rica',\r\n 'Base J2006 = 100');\r\n#3.b.alternativa\r\nIPC1 = SW({1043:'IPC (J2006=100) variación mensual'}, FechaInicio=2000) # pasando un diccionario para renombrar las series\r\nIPC1.plot(figsize = (12,5));\r\n\r\n#3.c\r\ndef figura1(datos, datos1, titulo, y):\r\n fig1, ax = plt.subplots(figsize=(12,5))\r\n concatenar = pd.concat([datos, datos1]) \r\n ax = sns.lineplot(data=concatenar)\r\n ax.set(title=titulo, xlabel=\" \", ylabel=y) \r\n\r\n return fig1\r\nfigura1(100*IPC.pct_change(1), 100*np.log(IPC).diff(1),\" Tasa de crecimiento y la primera diferencia del logaritmo del IPC\", \"Julio 2006 = 100\")\r\n#3.c.alternativa\r\ntransIPC1 = 100*IPC.pct_change(1)\r\ntranslogIPC2 = 100*np.log(IPC).diff(1)\r\ntransIPC1.plot(figsize = (12,5), title = \"Tasa de crecimiento mensual del IPC\");\r\ntranslogIPC2.plot(figsize = (12,5), title = \"Primera diferencia del logaritmo del IPC\");\r\n#3.d\r\nfigura(IPC.diff(4),\r\n 'Cambio interanual en el IPC de Costa Rica',\r\n '\"Julio 2006 = 100\"');\r\nfigura(100*np.log(IPC).diff(4),\r\n 'Tasa de crecimiento interanual del IPC de Costa Rica',\r\n 'por ciento');\r\n#3.e\r\nIPC3 = pd.concat([IPC, IPC.rolling(4).mean()], axis=1)\r\nIPC3.columns = ['Serie original', 'Serie suavizada']\r\n\r\nfigura(IPC3,\r\n 'IPC de Costa Rica tomando el promedio para cada trimestre',\r\n 'por ciento');\r\n#3.f\r\nIPC4 = pd.concat([IPC, IPC.rolling(13).mean()], axis=1)\r\nIPC4.columns = ['Serie original', 'Serie suavizada']\r\n\r\nfigura(IPC4,\r\n 'IPC de Costa Rica tomando el promedio de los 12 meses de cada año',\r\n 'por ciento');\r\n\r\n#4.a\r\n","sub_path":"Tarea 1 Macroeconometría.py","file_name":"Tarea 1 Macroeconometría.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"471966318","text":"import json\nfrom bottle import Bottle, HTTPResponse, request, response\nfrom config import MDB_CONFIG\nfrom challenge.db.story import Story\nfrom challenge.validations.field_required import is_required, is_status_valid\n\napp = Bottle()\n\n\ndef enable_cors(fn):\n def _enable_cors(*args, **kwargs):\n # set CORS headers\n response.headers['Access-Control-Allow-Origin'] = 'http://localhost:3000'\n response.headers['Access-Control-Allow-Methods'] = 'POST, PUT, DELETE, GET, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'X-Requested-With, Content-Type'\n response.headers['Access-Control-Allow-Credentials'] = 'true'\n\n if request.method != 'OPTIONS':\n # actual request; reply with the actual response\n return fn(*args, **kwargs)\n\n return _enable_cors\n\n\n@app.route(\"/api/v1/health.json\", method=\"GET\")\ndef health():\n return HTTPResponse(status=200, body=\"Healthy\")\n\n\n@app.route(\"/api/v1/stories.json\", method=\"POST\")\n@is_required\n@is_status_valid\ndef story():\n data = request.forms\n title = data.title\n description = data.description\n status = data.status\n author_id = data.author_id\n\n db = Story(**MDB_CONFIG)\n with db as cursor:\n db.create_story(cursor, title, description, db.get_status(status), author_id)\n\n payload = {\"title\": title,\n \"description\": description,\n \"status\": status,\n \"author_id\": author_id}\n\n return HTTPResponse(status=201, body=payload)\n\n\n@app.route(\"/api/v1/stories.json\", method=['OPTIONS', 'GET'])\n@enable_cors\ndef story():\n if request.method == 'OPTIONS':\n return {}\n else:\n db = Story(**MDB_CONFIG)\n with db as cursor:\n results = db.get_stories(cursor)\n\n stories = {\"status\": 200, \"data\": []}\n for result in results:\n db_result = {'story_id': result['story_id'],\n 'title': result['title'],\n 'description': result['description'],\n 'status': 'PUBLISHED' if result['status'] == Story.STATUS_PUBLISHED else 'DRAFT',\n 'author_id': result['author_id']}\n\n stories['data'].append(db_result)\n\n stories = json.dumps(stories)\n return HTTPResponse(status=200, body=stories)\n","sub_path":"challenge/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"549257496","text":"from datetime import datetime\nfrom docutils.core import publish_parts\n\nfrom zope import schema, interface\nfrom zope.annotation.interfaces import IAttributeAnnotatable\n\nfrom hurry.workflow.interfaces import IWorkflowInfo\n\nimport grok\n\nfrom grokstar.blog import Blog\nfrom grokstar import interfaces\nfrom form import GrokstarAddForm, GrokstarEditForm\n\nclass Entry(grok.Model):\n interface.implements(interfaces.IEntry, IAttributeAnnotatable)\n\n def __init__(self, title, summary, rightsinfo, categories=None):\n self.title = title\n self.updated = datetime.now()\n self.published = None\n self.summary = summary\n self.rightsinfo = rightsinfo\n if categories is None:\n self.categories = []\n else:\n self.categories = categories\n\nclass RestructuredTextEntry(Entry):\n interface.implements(interfaces.IRestructuredTextEntry)\n\n def __init__(self, title, summary, rightsinfo, content, categories=None):\n super(RestructuredTextEntry, self).__init__(title, summary, rightsinfo, categories)\n self.content = content\n\ngrok.context(RestructuredTextEntry)\n\n\nclass Index(grok.View):\n pass\n\n\nclass Item(grok.View):\n def format_published(self, published_date):\n return published_date.strftime('%Y-%m-%d')\n\n\nclass Add(GrokstarAddForm):\n grok.context(Blog)\n title = u'Add Entry'\n # add the url that the user wants\n form_fields = grok.Fields(\n id=schema.TextLine(title=u\"Post slug\"))\n # don't show them these timestamps\n form_fields += grok.AutoFields(RestructuredTextEntry).omit(\n 'published', 'updated')\n\n @grok.action('Add entry')\n def add(self, id, **data):\n new_entry = RestructuredTextEntry(**data)\n self.context['entries'][id] = new_entry\n IWorkflowInfo(new_entry).fireTransition('create')\n self.redirect(self.url(self.context))\n\n @grok.action('Add published entry')\n def add_published(self, id, **data):\n new_entry = RestructuredTextEntry(**data)\n self.context['entries'][id] = new_entry\n IWorkflowInfo(new_entry).fireTransition('create')\n IWorkflowInfo(new_entry).fireTransitionToward(interfaces.PUBLISHED) \n self.redirect(self.url(self.context))\n\n\nclass Edit(GrokstarEditForm):\n grok.context(RestructuredTextEntry)\n title = u'Edit Entry'\n form_fields = grok.AutoFields(RestructuredTextEntry).omit(\n 'published', 'updated')\n\n @grok.action('Save changes')\n def edit(self, **data):\n self.applyData(self.context, **data)\n self.redirect(self.url(self.context))\n\n @grok.action('Publish')\n def publish(self, **data):\n self.applyData(self.context, **data)\n IWorkflowInfo(self.context).fireTransitionToward(interfaces.PUBLISHED)\n self.redirect(self.url(self.context))\n\n\nclass RenderedContent(grok.View):\n def render(self):\n return renderRest(self.context.content)\n\n\nrest_settings = {\n # Disable inclusion of external files, which is a security risk.\n 'file_insertion_enabled': False,\n # Disable the promotion of a lone top-level section title to document title\n # (and disable the promotion of a subsequent section title to document\n # subtitle).\n 'doctitle_xform': False\n }\n\ndef renderRest(source):\n return publish_parts(\n source, writer_name='html', settings_overrides=rest_settings\n )['html_body']\n","sub_path":"Grokstar/branches/old-grokstar/src/grokstar/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"594695456","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\n\n\nclass CrmLead(models.Model):\n _inherit = \"crm.lead\"\n\n survey_input_ids = fields.One2many(\n 'survey.user_input', 'survey_input_id', string='Surveys', readonly=True, index=True)\n survey_id = fields.Many2one(\n 'survey.survey', related='stage_id.survey', string=\"Survey\")\n\n @api.multi\n def action_start_survey(self):\n \"\"\" Open the website page with the survey form \"\"\"\n self.ensure_one()\n if not self.survey_id.id:\n raise ValidationError(_('No survey is linked to this stage.'))\n else:\n response = self.env['survey.user_input'].create(\n {'survey_input_id': self.id, 'survey_id': self.survey_id.id, 'partner_id': self.partner_id.id, 'type': 'manually'})\n return self.survey_id.with_context(survey_token=response.token,\n id=self).action_start_survey()\n\n\nclass CrmStage(models.Model):\n _inherit = 'crm.stage'\n\n survey = fields.Many2one('survey.survey')\n\n\nclass SurveyQue(models.Model):\n _inherit = 'survey.question'\n\n related_lead = fields.Many2one('ir.model.fields', domain=[\n ('model', '=', 'crm.lead')])\n\n\nclass SurveyInput(models.Model):\n _inherit = 'survey.user_input'\n\n survey_input_id = fields.Many2one('crm.lead')\n\n\nclass Survey(models.Model):\n _inherit = 'survey.survey'\n\n @api.multi\n def action_start_survey(self):\n \"\"\" Open the website page with the survey form \"\"\"\n self.ensure_one()\n token = self.env.context.get('survey_token')\n trail = \"/%s\" % token if token else \"\"\n trail += '?lead_id=%s' % (self.env.context.get('id').id)\n return {\n 'type': 'ir.actions.act_url',\n 'name': \"Start Survey\",\n 'target': 'self',\n 'url': self.with_context(relative_url=True).public_url + trail,\n }\n","sub_path":"add_survey_to_crm_drc/models/add_survey_to_crm.py","file_name":"add_survey_to_crm.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"61384393","text":"\n\n\"\"\"\n[E] Given a binary tree, return an array containing nodes in its right view. The right view of a \nbinary tree is the set of nodes visible when the tree is seen from the right side.\n\ntree:\n 1\n 2 3\n 4 5 6 7 \n\nright view: [1,3,7]\n\"\"\"\n\n\nfrom __future__ import print_function\nfrom collections import deque\n\n\n# TIme: O(N) Space: O(N)\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right, self.next = None, None, None\n\n # tree traversal using 'next' pointer\n def print_tree(self):\n print(\"Traversal using 'next' pointer: \", end='')\n current = self\n while current:\n print(str(current.val) + \" \", end='')\n current = current.next\n\n\ndef connect_all_siblings(root):\n if root is None:\n return\n\n queue = deque()\n queue.append(root)\n currentNode, previousNode = None, None\n while queue:\n currentNode = queue.popleft()\n if previousNode:\n previousNode.next = currentNode\n previousNode = currentNode\n\n # insert the children of current node in the queue\n if currentNode.left:\n queue.append(currentNode.left)\n if currentNode.right:\n queue.append(currentNode.right)\n\n\ndef main():\n root = TreeNode(12)\n root.left = TreeNode(7)\n root.right = TreeNode(1)\n root.left.left = TreeNode(9)\n root.right.left = TreeNode(10)\n root.right.right = TreeNode(5)\n connect_all_siblings(root)\n root.print_tree()\n\n\nmain()\n","sub_path":"coding patterns/tree Breadth First Search/binary_tree_right_view.py","file_name":"binary_tree_right_view.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"154194977","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 06 14:46:00 2015\n\n@author: jroth\n\"\"\"\n\ndef get_data(in_file):\n #\n dat = np.genfromtxt(in_file, names=('SiteNo', 'StaName','RecNo', 'Date', 'ParmCode', 'Junk', 'Val', 'Color', 'RptLev', 'Units', 'LU'), skip_header=1, delimiter=',', dtype=('S16','S12', 'S12', 'f8', 'S24', 'S6', 'f8', 'S12', 'f8', 'S24', 'S24'), converters={'Date': make_date}, comments=\"_\", autostrip=True) \n return dat \n\ndef make_date(date_txt):\n new_date = dt.datetime.toordinal(dt.datetime.strptime(date_txt,\"%m/%d/%Y\"))\n return new_date\n \n \ndef make_xtick_labels(min_x, max_x):\n \n x_axis_tix = []\n x_axis_lab = []\n min_date_obj = dt.datetime.fromordinal(int(min_x))\n \n ## Plots will start on an even quarter i.e. 1/1, 4/1, 7/1, or 10/1\n ## set a string to the first of the month\n if int(min_date_obj.strftime('%m')) < 4:\n beg_month = '01'\n elif int(min_date_obj.strftime('%m')) < 7:\n beg_month = '04'\n elif int(min_date_obj.strftime('%m')) < 10:\n beg_month = '07'\n else:\n beg_month = '10'\n beg_year = str(min_date_obj.strftime('%y'))\n \n x_axis_tix.append(dt.datetime.toordinal(dt.datetime.strptime(beg_month+\"/01/\"+min_date_obj.strftime('%Y'),\"%m/%d/%Y\"))) ## set x-axis tick marks\n \n x_axis_lab.append(beg_month+\" / \"+beg_year) ## set x-axis tick labels\n \n ## create lists of quarterly spaced tick marks and corresponding labels spanning the lookback period\n i = 0\n while x_axis_tix[i] < max_x:\n temp_ord = x_axis_tix[i]+95\n temp_date = dt.datetime.fromordinal(temp_ord)\n m = str(temp_date.strftime('%m'))\n y = str(temp_date.strftime('%y'))\n x_axis_lab.append(str(m)+\" / \"+str(y))\n x_axis_tix.append(dt.datetime.toordinal(dt.datetime.strptime(x_axis_lab[i+1],\"%m / %y\")))\n i+=1\n \n return x_axis_tix, x_axis_lab\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport datetime as dt\nimport pylab as P\nimport numpy as np\nimport math\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\nsave_pdf = True\n\n\nsave_png = True\n\nin_file = 'H:\\\\GitHub\\\\asst_python_code\\\\nwis_utils\\\\piic_qw.csv' ## this is the file that contains\n\ndat = get_data(in_file) ## get all the nwis data for the sites\n\nparms = np.unique(dat['ParmCode']) ## get the unique parameters from input data\n\n\npdf_name = 'qw_plots.pdf' ## this is the output file in the cwd\n\nif save_pdf == True:\n cpdf = PdfPages(pdf_name)\n\ncols = []\n\nfor parm in parms : ## iterate through the set of unique parameters\n \n fig = plt.figure() ## instantiate a figure object\n fig.set_tight_layout(True)\n plt.hold = True\n \n ax = fig.add_subplot(1,1,1) ## add one axis object\n \n sites = np.unique(dat['SiteNo'][np.where(dat['ParmCode']==parm)]) ## get unique SiteNo's that have data for parm\n \n min_x = np.min(dat['Date'][np.where(dat['ParmCode']==parm)]) ## get the earliest date for this parm\n \n max_x = np.max(dat['Date'][np.where(dat['ParmCode']==parm)]) ## get the latest date for this parm\n \n min_y = np.min(dat['Val'][np.where(dat['ParmCode']==parm)]) ## get the min value for this parm\n\n max_y = np.max(dat['Val'][np.where(dat['ParmCode']==parm)]) ## get the max value date for this parm\n \n x_tix, x_lab = make_xtick_labels(min_x, max_x) ## pass min x and max x to mak_xtick_labels\n \n min_x = np.min(x_tix) ## set vars for x-axis limits\n max_x = np.max(x_tix) ## set vars for x-axis limits\n \n frac_y = (max_y-min_y)*0.1\n min_y = max(min_y-frac_y, 0)\n max_y = max_y + frac_y\n if max_y == min_y:\n max_y = 2 * min_y\n \n means = []\n landuse = []\n std = []\n for site in sites:\n \n dates = dat['Date'][np.where(np.logical_and(dat['ParmCode']==parm, dat['SiteNo']==site ))]\n\n vals = dat['Val'][np.where(np.logical_and(dat['ParmCode']==parm, dat['SiteNo']==site ))]\n \n col = dat['Color'][np.where(np.logical_and(dat['ParmCode']==parm, dat['SiteNo']==site ))]\n \n cols.append(col)\n lu = dat['LU'][np.where(np.logical_and(dat['ParmCode']==parm, dat['SiteNo']==site ))]\n \n landuse.append(lu[0])\n \n means.append(np.mean(vals))\n \n ax.plot(dates, vals, 'o', c=col[0])\n \n \n rep = dat['RptLev'][np.where(np.logical_and(dat['ParmCode']==parm, dat['SiteNo']==site ))]\n ax.plot([min_x, max_x], [rep[0],rep[0]], 'k--')\n ax.text(min_x+10, rep[0]+frac_y/10,'RL', fontsize=12, color='red')\n \n unts = dat['Units'][np.where(np.logical_and(dat['ParmCode']==parm, dat['SiteNo']==site ))]\n \n unt = unts[0] \n \n \n ax.set_xlim([min_x, max_x])\n ax.set_xticks(x_tix)\n ax.set_xticklabels(x_lab, size='8', rotation='45', family='sans-serif', weight='normal' )\n ax.set_xlabel('Date', weight='semibold')\n \n ax.set_ylim([min_y, max_y])\n #ax.set_yticks(size='xx-small')\n ax.set_yticklabels(ax.get_yticks(), size='8', family='sans-serif', weight='normal')\n ax.set_ylabel(parm.split('\"')[1]+\" [\"+unt+\"]\", weight='semibold') \n \n \n ax.set_title(parm.split('\"')[1], weight='bold')\n leg_txt = []\n for l, m in zip(landuse, means): \n leg_txt.append(l+\": mean = {0:0.2f}\".format(m))\n ax.legend(leg_txt, loc='upper left', numpoints=1, fontsize='small', markerscale=1.5, handletextpad=0, framealpha=0.7)\n if save_pdf == True: \n cpdf.savefig()\n if save_png == True:\n date_lab1 = dt.datetime.strftime(dt.datetime.fromordinal(x_tix[0]),\"%m%d%Y\")\n date_lab2 = dt.datetime.strftime(dt.datetime.fromordinal(x_tix[-1]),\"%m%d%Y\")\n plt.savefig(parm.split('\"')[1]+\"_\"+date_lab1+\"-\"+date_lab2+\".png\", dpi=300, format='png')\n plt.close(fig)\nif save_pdf == True: \n cpdf.close()\n","sub_path":"nwis_utils/wat_qw_plots.py","file_name":"wat_qw_plots.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"199119598","text":"import open3d as o3d\nimport torch\nimport copy\nimport math\nimport torch.optim as optim\nfrom tqdm import tqdm\nimport smplx\nfrom human_body_prior.tools.model_loader import load_vposer\nimport argparse\nimport sys, os, glob\nimport numpy as np\nfrom utils import *\nfrom utils_read_data import *\nimport pdb\n\n\ndef get_rotmat(angle):\n rotmat = np.array([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0,0,1]] )\n return rotmat\n\n\n\ndef update_globalRT_for_smplx_batch(body_params_dict, smplx_model, trans_to_target_origin, delta_T=None):\n '''\n input:\n body_params: array, [b, 72], under camera coordinate\n smplx_model: the model to generate smplx mesh, given body_params\n trans_to_target_origin: coordinate transformation [b,4,4] mat\n Output:\n body_params with new globalR and globalT, which are corresponding to the new coord system\n '''\n\n ### step (1) compute the shift of pelvis from the origin\n batch_size = body_params_dict['transl'].shape[0]\n\n body_param_dict_torch = {}\n for key in body_params_dict.keys():\n body_param_dict_torch[key] = body_params_dict[key].clone()\n body_param_dict[key] = body_param_dict[key].detach().cpu().numpy()\n\n \n\n if delta_T is None:\n body_param_dict_torch['transl'] = torch.zeros([batch_size,3], dtype=torch.float32).to(device)\n body_param_dict_torch['global_orient'] = torch.zeros([batch_size,3], dtype=torch.float32).to(device)\n smplx_out = smplx_model(return_verts=True, **body_param_dict_torch)\n delta_T = smplx_out.joints[:,0,:] # (3,)\n delta_T = delta_T.detach().cpu().numpy()\n\n\n body_transl = body_params_dict['transl']+delta_T\n body_rot_angle = body_params_dict['global_orient']\n body_rotmat = R.from_rotvec(body_rot_angle).as_dcm() # to a [b, 3,3] rotation mat\n body_transf = np.tile(np.eye(4), (batch_size, 1,1))\n body_transf[:,:-1,:-1] = body_rotmat\n body_transf[:,:-1, -1] = body_transl\n body_transf_w = np.matmul(trans_to_target_origin, body_transf)\n body_params_dict['global_orient'] = R.from_dcm(body_transf_w[:,:-1,:-1]).as_rotvec()\n body_params_dict['transl'] = body_transf_w[:,:-1, -1] - delta_T\n\n return body_param_dict\n\n\n\n\n\n\n\n\n\n## figure out body model\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nvposer_model, _ = load_vposer('/home/yzhang/body_models/VPoser/vposer_v1_0/', vp_model='snapshot')\nvposer_model = vposer_model.to(device)\n\nsmplx_model = smplx.create('/home/yzhang/body_models/VPoser/',\n model_type='smplx',\n gender='neutral', ext='npz',\n num_pca_comps=12,\n create_global_orient=True,\n create_body_pose=True,\n create_betas=True,\n create_left_hand_pose=True,\n create_right_hand_pose=True,\n create_expression=True,\n create_jaw_pose=True,\n create_leye_pose=True,\n create_reye_pose=True,\n create_transl=True,\n batch_size=50\n ).to(device)\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--result_path\", default='optimize_results', type=str)\n parser.add_argument(\"--dataset\", default='replica', type=str)\n parser.add_argument(\"--scene_name\", default=None)\n\n args = parser.parse_args()\n scenename = args.scene_name if args.scene_name is not None else '*'\n result_folder_per_scene = glob.glob(os.path.join(args.result_path,\n args.dataset,\n scenename))\n\n for folder in result_folder_per_scene:\n ## load results of PLACE\n body_params_75 = torch.FloatTensor(np.load(folder+'/body_params_opt_list_s2.npy')).to(device)\n rot_angles = np.load(folder+'/rot_angle_list_1.npy')\n shifts = np.load(folder+'/shift_list.npy')\n \n ## convert smplx parameters\n body_params_72 = convert_to_3D_rot(body_params_75) # tensor, [bs=1, 72]\n body_pose = vposer_model.decode(body_params_72[:, 16:48], output_type='aa').view(-1,63) # tensor, [bs=1, 63]\n body_verts, body_param_dict = gen_body_mesh(body_params_72, body_pose, smplx_model)\n\n # ## visualize the body parameters before body param transform\n # body_mesh_list = []\n # for j in range(10):\n # body_verts_one = body_verts.detach().cpu().numpy()[j]\n # # transfrom the body verts to the PROX world coordinate system\n # ####----------TODO change the transform to transformation matrix, and then update the SMPLX global params in the world coordinate\n # body_verts_opt_prox_s2 = np.zeros(body_verts_one.shape) # [10475, 3]\n # temp = body_verts_one - shifts[j]\n # body_verts_opt_prox_s2[:, 0] = temp[:, 0] * math.cos(-rot_angles[j]) - temp[:, 1] * math.sin(-rot_angles[j])\n # body_verts_opt_prox_s2[:, 1] = temp[:, 0] * math.sin(-rot_angles[j]) + temp[:, 1] * math.cos(-rot_angles[j])\n # body_verts_opt_prox_s2[:, 2] = temp[:, 2]\n\n # body_mesh_opt_s2 = o3d.geometry.TriangleMesh()\n # body_mesh_opt_s2.vertices = o3d.utility.Vector3dVector(body_verts_opt_prox_s2)\n # body_mesh_opt_s2.triangles = o3d.utility.Vector3iVector(smplx_model.faces)\n # body_mesh_opt_s2.compute_vertex_normals()\n # body_mesh_list.append(body_mesh_opt_s2)\n # scene_name = os.path.basename(folder)\n # scene_mesh = o3d.io.read_point_cloud(os.path.join(os.path.join('/mnt/hdd/datasets/PlaceInReplica/replica_v1/', scene_name), 'mesh.ply'))\n # o3d.visualization.draw_geometries([scene_mesh]+body_mesh_list)\n\n ## resolve global transformations\n transf_w2c = np.tile(np.eye(4), (shifts.shape[0],1,1))\n transf_w2c[:,:-1,-1] = shifts #[b,3]\n transf_w2c[:,:-1,:-1] = np.stack(map(get_rotmat, rot_angles))\n transf_c2w = np.stack(tuple(map(np.linalg.inv, transf_w2c)))\n body_param_dict_w = update_globalRT_for_smplx_batch(body_param_dict, smplx_model, transf_c2w, delta_T=None)\n\n ## save result to file\n np.savez(os.path.join(folder, 'body_param_dict_w.npz'), **body_param_dict_w)\n\n # # # ## visualize the body parameters after body param transform\n # body_param_dict_w_torch = {}\n # for key in body_param_dict_w:\n # body_param_dict_w_torch[key] = torch.FloatTensor(body_param_dict_w[key]).to(device)\n # smplx_output = smplx_model(return_verts=True, **body_param_dict_w_torch) # generated human body mesh\n # body_verts = smplx_output.vertices # [bs, n_body_vert, 3]\n # body_mesh_list = []\n # for j in range(10):\n # body_verts_one = body_verts.detach().cpu().numpy()[j]\n # body_mesh_opt_s2 = o3d.geometry.TriangleMesh()\n # body_mesh_opt_s2.vertices = o3d.utility.Vector3dVector(body_verts_one)\n # body_mesh_opt_s2.triangles = o3d.utility.Vector3iVector(smplx_model.faces)\n # body_mesh_opt_s2.compute_vertex_normals()\n # body_mesh_list.append(body_mesh_opt_s2)\n # scene_name = os.path.basename(folder)\n # scene_mesh = o3d.io.read_point_cloud(os.path.join(os.path.join('/mnt/hdd/datasets/PlaceInReplica/replica_v1/', scene_name), 'mesh.ply'))\n # o3d.visualization.draw_geometries([scene_mesh]+body_mesh_list)\n\n\n\n\n\n\n","sub_path":"utils_getPlaceFinalParam.py","file_name":"utils_getPlaceFinalParam.py","file_ext":"py","file_size_in_byte":7756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"449615167","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@author: lijingxin\n@contact: lijingxin666@gmail.com\n@site: https://github.com/lijingxin666\n@time: Created on 2:41 PM 5/21/20\n\nQuestion: \n\n\"\"\"\n\ndef updateMatrix(matrix):\n q, m, n = [], len(matrix), len(matrix[0])\n for i in range(m):\n for j in range(n):\n if matrix[i][j] != 0:\n matrix[i][j] = 0x7fffffff # 非0的地方设置成最大值 因为要找最小值\n else:\n q.append((i, j)) # 把所有的0放进stack\n for i, j in q:\n for r, c in ((i, 1+j), (i, j-1), (i+1, j), (i-1, j)):\n z = matrix[i][j] + 1\n if 0 <= r < m and 0 <= c < n and matrix[r][c] > z: # 找 neighbor\n matrix[r][c] = z\n q.append((r, c))\n return matrix\n\nmatrix = [\n [0, 0, 0],\n [0, 1, 0],\n [0, 1, 1],\n]\nupdateMatrix(matrix)","sub_path":"Algorithm_PY/ch18/F17074Matrix.py","file_name":"F17074Matrix.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"45080068","text":"from helpers.base_helper import TrainTestHelper, transform_train, transform_test\nfrom utils import get_channels_axis,save_roc_pr_curve_data, show_roc_pr_curve_data, get_class_name_from_index\nfrom models.LSA_mnist import LSA_MNIST, LSA_MNIST_DOUBLE\nfrom models.encoders_decoders import CAE_group_pytorch\nfrom keras2pytorch_dataset import trainset_pytorch, transformer_dataset\nimport torch.utils.data as data\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.optim as optim\nfrom sklearn.ensemble import IsolationForest\nfrom transformations import RA, RA_IA, RA_IA_PR, Rotate4D\nfrom models.wrn_pytorch import WideResNet\nfrom models.resnet_pytorch import ResNet\nfrom models.densenet_pytorch import DenseNet\nfrom models.SN_mnist import SN_MNIST, RSRBoneV2, RSRBoneV2Linear, RSRBoneV3Linear\nimport numpy as np\nfrom misc import AverageMeter\nfrom loss_functions.coteaching_loss import InCoTeachingEstLoss\nimport itertools\nimport os\n\nclass EstRSRAEHelper(TrainTestHelper):\n def __init__(self, n_channels, noise_rate, group=2, lamb1=1., mode='exchange', *args, **kwargs):\n super(EstRSRAEHelper, self).__init__(*args, **kwargs)\n self.method_tag = \"EstRSRAE\"\n\n self.n_channels = n_channels\n self.group = group\n # self.print(\"group: {}\".format(group))\n # self.model = LSA_MNIST_DOUBLE(input_shape=(n_channels, h, w),code_length=code_length,\n # cpd_channels=cpd_channels, group=group).cuda()\n # self.model = CAE_group_pytorch(in_channels=self.n_channels, group=self.group).cuda()\n lr = 0.00025\n if self.dataset_name in ['caltech101', 'fashion-mnist-rsrae', 'fashion-mnist']:\n pass\n else:\n self.model = RSRBoneV3Linear(input_shape=n_channels, z_channels=10,\n hidden_layer_sizes=[32, 64, 128], bn=False).cuda()\n # lr = 0.001\n\n self.lamb1 = lamb1\n # self.batch_size = 128\n # self.print(noise_rate)\n\n\n self.criterion = InCoTeachingEstLoss(lamd=self.lamb1, noise_rate=noise_rate, cpd_channels=100,\n mode=mode).cuda()\n cudnn.benchmark = True\n\n self.noise_rate = noise_rate\n self.print(\"lamb1:{} noise_rate: {} mode:{}\".format(self.lamb1, noise_rate, mode))\n # use adam always\n self.optimizer = optim.Adam(self.model.parameters(), lr=lr, weight_decay=1e-7)\n # self.epochs = 250\n\n def train(self):\n self.losses = AverageMeter()\n for epoch in range(self.epochs):\n if epoch % self.test_per_epoch == 0:\n self.test(True)\n self.model.train()\n for batch_idx, (inputs, y) in enumerate(self.trainloader):\n self.train_step(inputs, y)\n # if self.criterion.noise_rate < self.noise_rate:\n # self.criterion.noise_rate += 0.05\n self.print('Epoch: [{} | {}], loss: {}'.format(epoch + 1, self.epochs,\n self.losses.avg))\n\n def train_step(self, x, y=None):\n inputs = torch.autograd.Variable(x.cuda())\n z, x_r, z_dist = self.model(inputs)\n loss, _, _ = self.criterion(x_r, inputs, z, z_dist)\n self.losses.update(loss.item(), 1)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n def compute_scores(self):\n self.model.eval()\n # reloss = np.zeros(shape=len(self.testloader.dataset, ))\n scoreRes = []\n scoreAug = []\n y_test = []\n for batch_idx, (inputs, labels) in enumerate(self.testloader):\n x = inputs.cuda()\n # print(x)\n with torch.no_grad():\n z, x_r, z_dist = self.model(x)\n _, mse, autoreg = self.criterion(x_r, x, z, z_dist)\n mse = mse.pow(2)\n scoreRes.append(-mse.cpu())\n scoreAug.append(-autoreg.cpu())\n\n y_test.append(labels.data.cpu())\n\n\n scoreRes = torch.cat(scoreRes, dim=0)\n scoreAug = torch.cat(scoreAug, dim=0)\n y_test = torch.cat(y_test, dim=0)\n remin, remax = scoreRes.min(), scoreRes.max()\n augmin, augmax = scoreAug.min(), scoreAug.max()\n scoreRes = (scoreRes - remin) / (remax - remin + 1e-12)\n scoreAug = (scoreAug - augmin) / (augmax - augmin + 1e-12)\n return (scoreRes, scoreAug, scoreRes + scoreAug), y_test.numpy()\n\n def test(self, is_show=True):\n scores, y_test = self.compute_scores()\n if is_show:\n tags = ['Reconstruction', 'autoregress', 'Combine']\n for i in range(len(tags)):\n self.print('score {}'.format(tags[i]))\n roc_auc, pr_auc_norm, pr_auc_anom = show_roc_pr_curve_data(scores[i], y_test)\n self.print(\"auroc:{}, pr_auc_norm:{}, pr_auc_anom:{}\".format(roc_auc, pr_auc_norm, pr_auc_anom), False)\n\n else:\n res_file_path = self.get_result_file_path()\n save_roc_pr_curve_data(scores, y_test, res_file_path)","sub_path":"helpers/estrsrae_helper.py","file_name":"estrsrae_helper.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"322735929","text":"#---------------------------------- DataFrame Utils ----------------------------------#\r\n\r\nimport pandas as pd\r\n\r\nclass DataframeUtils(object):\r\n \r\n @classmethod\r\n def get_cuc_from_doc_in_df(cls, df, column_doctype, column_doc, column_cuc):\r\n \"\"\"\r\n Returns the variable CUC (Series) based on the type and the number of the documents.\r\n df: Dataframe.\r\n column_doctype: (str) Column name of the document type.\r\n column_doc: (str) Column name of the document number.\r\n column_cuc: (str) Column name for storing the CUC.\r\n \"\"\"\r\n df[column_cuc] = df[column_doctype].map(str) + '-' + df[column_doc].map(str)\r\n return df\r\n \r\n @classmethod\r\n def n_values(cls, df, cols = None, n = 6):\r\n \"\"\"\r\n Returns a DataFrame with the first 'n' values by each columns.\r\n df: Dataframe.\r\n cols: (str, list) Column(s) name for describing.\r\n n: (int) Parameter for showing first 'n' values.\r\n \"\"\"\r\n if cols is None:\r\n cols = df.columns\r\n elif isinstance(cols, str):\r\n cols = [cols]\r\n# N_cols = ['N_' + col for col in cols]\r\n# cols2 = np.asarray([cols, N_cols]).ravel(order = 'F')\r\n df_tmp = pd.DataFrame(columns = cols)\r\n for col in cols:\r\n firstn = df[col].value_counts().iloc[:n]\r\n firstn_index = firstn.index.tolist()\r\n firstn_N = firstn.values.tolist()\r\n for i in range(n - len(firstn)):\r\n firstn_index.append('')\r\n firstn_N.append(0)\r\n df_tmp[col] = ['{} ({:,})'.format(x, y) for x, y in zip(firstn_index, firstn_N)]\r\n df_tmp = df_tmp.applymap(str)\r\n df_tmp = df_tmp.applymap(lambda x: x.replace('(0)', ''))\r\n df_tmp.index = ['V' + str(x) for x in range(1, n + 1)]\r\n return df_tmp.T\r\n \r\n @classmethod\r\n def descrip_colmuns(cls, df, cols = None, n_vals = 6, summary = True):\r\n \"\"\"\r\n Returns a DataFrame with descriptives like, dtype, N° Nulls,\r\n most frequency values, mean, etc by each column.\r\n df: Dataframe.\r\n cols: (str, list) Column(s) name for describing.\r\n n: (int) Parameter for showing first 'n' values.\r\n summary: (bool) Show Descriptive statistics ?\r\n \"\"\"\r\n if cols is None:\r\n cols = df.columns\r\n df2 = df[cols].copy()\r\n dc = df2.dtypes.to_frame()\r\n dc.columns = ['dtype']\r\n dc['dtype'] = dc['dtype'].astype(str)\r\n dc['Tipo'] = ''\r\n dc['Tipo'][dc['dtype'] == 'object'] = 'Categoria'\r\n dc['Tipo'][[x.startswith(('int', 'float')) for x in dc['dtype']]] = 'Numero'\r\n n_nulls = df2.isnull().sum()#.to_frame('N_Nulls')\r\n n_nulls_porc = n_nulls / float(len(df2))\r\n a = pd.Series(['{} ({:.1%})'.format(x, y) for x, y in zip(n_nulls, n_nulls_porc)],\r\n index = df2.columns, name = 'N_Nulls')\r\n dc = dc.join(a)\r\n dc = dc.join(df2.nunique().to_frame('N_Unicos'))\r\n if n_vals > 0:\r\n dc = dc.join(cls.n_values(df2, cols, n_vals))\r\n if summary:\r\n dc = dc.join(df2.describe().T)\r\n dc.fillna('', inplace = True)\r\n return dc\r\n \r\n @classmethod\r\n def print_basic_info(cls, df):\r\n \"\"\"\r\n Returns a basic infor of a dataframe like the nrows, ncols, and the columns.\r\n df: Dataframe.\r\n \"\"\"\r\n try:\r\n namespace = globals()\r\n df_name = [name for name in namespace if namespace[name] is df][0]\r\n except:\r\n df_name = ''\r\n shape = '{:,}'.format(df.shape[0]) + ' | ' + '{:,}'.format(df.shape[1])\r\n print('{} ({}):\\n'.format(df_name, shape),list(df.columns))\r\n\r\n @classmethod\r\n def rolling_mean(cls, df, columns, column_id, n = 0, min_periods = 3):\r\n \"\"\"\r\n Returns a DataFrame with the rolling mean of some columns.\r\n df: Dataframe.\r\n columns: (str, list) Column names to apply the rolling mean.\r\n column_id: (str, list) Column names to use as a grouper.\r\n n: Window for the rolling. 0 is equivalent to a rolling historical mean.\r\n min_periods: Minimum number of periods to show the rolling.\r\n \"\"\"\r\n# group_by_data = df.set_index(column_id, append=True).groupby(level=1)\r\n# resulting_series_with_mean = group_by_data[columns].apply(\r\n# pd.rolling_mean, n, 1).reset_index(column_id)\r\n if isinstance(columns, str):\r\n columns = [columns]\r\n if isinstance(column_id, str):\r\n column_id = [column_id]\r\n ent = columns + column_id\r\n if n > 0:\r\n return df[ent].groupby(column_id).rolling(min_periods=min_periods,window=n).\\\r\n mean().reset_index(drop=True)[columns]\r\n else:\r\n return df[ent].groupby(column_id).expanding(min_periods=min_periods).\\\r\n mean().reset_index(drop=True)[columns]\r\n #resulting_series_with_mean[columns]\r\n\r\n @classmethod\r\n def rolling_sum(cls, df, columns, column_id, n = 0, min_periods = 3):\r\n \"\"\"\r\n Returns a DataFrame with the rolling sum of some columns.\r\n df: Dataframe.\r\n columns: (str, list) Column names to apply the rolling sum.\r\n column_id: (str, list) Column names to use as a grouper.\r\n n: Window for the rolling. 0 is equivalent to a rolling historical sum.\r\n min_periods: Minimum number of periods to show the rolling.\r\n \"\"\"\r\n# group_by_data = df.set_index(column_id, append=True).groupby(level=1)\r\n# resulting_series_with_mean = group_by_data[columns].apply(\r\n# pd.rolling_mean, n, 1).reset_index(column_id)\r\n# return resulting_series_with_mean[columns]\r\n if isinstance(columns, str):\r\n columns = [columns]\r\n if isinstance(column_id, str):\r\n column_id = [column_id]\r\n ent = columns + column_id\r\n if n > 0:\r\n return df[ent].groupby(column_id).rolling(min_periods=min_periods,window=n).\\\r\n sum().reset_index(drop=True)[columns]\r\n else:\r\n return df[ent].groupby(column_id).expanding(min_periods=min_periods).\\\r\n sum().reset_index(drop=True)[columns]\r\n\r\n @classmethod\r\n def get_categorical_columns(cls, df, columns_to_exclude = []):\r\n \"\"\"\r\n Returns a list with the categorical columns.\r\n df: Dataframe.\r\n columns_to_exclude: (str, list) Column names to exclude (like IDs, Target, text, descriptions, etc).\r\n \"\"\"\r\n if isinstance(columns_to_exclude, str):\r\n columns_to_exclude = [columns_to_exclude]\r\n return \\\r\n [x for x in list(df.select_dtypes(include=['object', 'category']))\r\n if x not in columns_to_exclude]\r\n\r\n @classmethod\r\n def get_numerical_columns(cls, df, columns_to_exclude = []):\r\n \"\"\"\r\n Returns a list with the numerical columns.\r\n df: Dataframe.\r\n columns_to_exclude: (str, list) Column names to exclude (like IDs, Target, text, descriptions, etc).\r\n \"\"\"\r\n if isinstance(columns_to_exclude, str):\r\n columns_to_exclude = [columns_to_exclude]\r\n return \\\r\n [x for x in list(df.select_dtypes(include=['integer', 'floating']))\r\n if x not in columns_to_exclude]\r\n\r\n @classmethod\r\n def get_columns_by_types(cls, df, columns_to_exclude = [], ordinal_columns = [],\r\n print_flag=False):\r\n \"\"\"\r\n Returns a tuple with the valid, categorical, numerical and nomial columns.\r\n Example: cols, cols_cat, cols_num, cols_nom = ra_df.get_columns_by_types(df, columns_to_exclude, ordinal_columns, print_flag = True)\r\n df: Dataframe.\r\n columns_to_exclude: (str, list) Column names to exclude (like IDs, Target, text, descriptions, etc).\r\n ordinal_columns: (str, list) Column names of the ordinal variables.\r\n \"\"\"\r\n if isinstance(columns_to_exclude, str):\r\n columns_to_exclude = [columns_to_exclude]\r\n if isinstance(ordinal_columns, str):\r\n columns_to_exclude = [ordinal_columns]\r\n valid_columns = [x for x in df.columns if x not in columns_to_exclude]\r\n categorical_columns = \\\r\n cls.get_categorical_columns(df, columns_to_exclude)\r\n numerical_columns = \\\r\n cls.get_numerical_columns(df, columns_to_exclude)\r\n other_columns = \\\r\n [x for x in valid_columns if x not in categorical_columns\r\n and x not in numerical_columns]\r\n nominal_columns = \\\r\n [x for x in categorical_columns if x not in ordinal_columns]\r\n\r\n if print_flag:\r\n print('Categorical:\\n', categorical_columns)\r\n print('\\nCategorical - Ordinal:\\n', ordinal_columns)\r\n print('\\nCategorical - Nominal:\\n', nominal_columns)\r\n print('\\nNumerical:\\n', numerical_columns)\r\n print('\\nOther:\\n', other_columns)\r\n print('\\nExclude:\\n', columns_to_exclude)\r\n\r\n return valid_columns, categorical_columns, \\\r\n numerical_columns, nominal_columns\r\n\r\n#---------------------------------- Format Utils ----------------------------------#\r\nclass FormatUtils(object):\r\n\r\n __str_date_format = '%d/%m/%Y'\r\n\r\n __dict_datetimes = {}\r\n __dict_dateints = {}\r\n\r\n @classmethod\r\n def get_year(cls, period):\r\n \"\"\"\r\n Returns the year of a period.\r\n period: (int) Period.\r\n \"\"\"\r\n return period // 100\r\n\r\n @classmethod\r\n def get_month(cls, period):\r\n \"\"\"\r\n Returns the month of a period.\r\n period: (int) Period.\r\n \"\"\"\r\n return period % 100\r\n\r\n @classmethod\r\n def gap_in_months_for_periods(cls, period_1, period_2):\r\n \"\"\"\r\n Returns the number of month missed between two periods.\r\n period_1: (int) First period.\r\n period_2: (int) Second period.\r\n \"\"\"\r\n year_1 = cls.get_year(period_1)\r\n year_2 = cls.get_year(period_2)\r\n month_1 = cls.get_month(period_1)\r\n month_2 = cls.get_month(period_2)\r\n\r\n if year_1 == year_2:\r\n basic_difference = abs(month_1 - month_2) - 1\r\n if basic_difference < 0:\r\n basic_difference = 0\r\n return basic_difference\r\n elif year_1 > year_2:\r\n greater_year_dif = month_1\r\n smaller_year_dif = 12 - month_2\r\n basic_difference = greater_year_dif + smaller_year_dif\r\n additional_months_difference = (year_1 - year_2 - 1) * 12\r\n return basic_difference + additional_months_difference - 1\r\n elif year_1 < year_2:\r\n greater_year_dif = month_2\r\n smaller_year_dif = 12 - month_1\r\n basic_difference = greater_year_dif + smaller_year_dif\r\n additional_months_difference = (year_2 - year_1 - 1) * 12\r\n return basic_difference + additional_months_difference - 1\r\n\r\n @classmethod\r\n def get_difference_in_months(cls, datetime1, datetime2, dif_base_on_days = False):\r\n \"\"\"\r\n Returns the difference (int) from two datetimes:\r\n datetime1: (datetime) First datetime.\r\n datetime2: (datetime) Second datetime.\r\n dif_base_on_days: Should the difference be based on the exact dates or \\\r\n based on the months and years of the dates.\r\n \"\"\"\r\n if dif_base_on_days:\r\n difference = datetime1 - datetime2\r\n difference = abs(difference)\r\n return difference.days//30\r\n else:\r\n dif_years = datetime1.year - datetime2.year\r\n dif_months = datetime1.month - datetime2.month\r\n return abs(dif_years*12 + dif_months)\r\n\r\n @classmethod\r\n def date_to_integer(cls, raw_date, format_date = None, format_integer = '%Y%m'):\r\n \"\"\"\r\n Return an integer in format (YYYYMM by default) of a date.\r\n raw_date: Date to convert.\r\n format_date: (str pattern) Format of the raw_date. Default -> automatic detection.\r\n format_integer: (str pattern) Format required to of the conversion. (YYYYMM by default) \r\n \"\"\"\r\n# if raw_date in cls.__dict_dateints:\r\n# return cls.__dict_dateints[raw_date]\r\n# else:\r\n# datetime_obj = datetime.strptime(raw_date, cls.__str_date_format)\r\n# if datetime_obj.month > 9:\r\n# current_month = datetime_obj.month\r\n# else:\r\n# current_month = '0'+str(datetime_obj.month)\r\n#\r\n# formatted_date = '{year}{month}'.format(year=datetime_obj.year,\r\n# month=current_month)\r\n# integer_date = int(formatted_date)\r\n# cls.__dict_dateints[raw_date] = integer_date\r\n# return integer_date\r\n return pd.to_datetime(raw_date, format=format_date).strftime(format_integer)\r\n \r\n @classmethod\r\n def make_datetime(cls, string_series, format=None, dayfirst=True):\r\n \"\"\"\r\n Return a datetime series or string formated by a pattern.\r\n string_series: (str,series) Object to convert.\r\n format: (str pattern) Format of the object.\r\n dayfirst: Should the string_series have the day first ?\r\n \"\"\"\r\n if isinstance(string_series, str):\r\n if len(string_series) == 6:\r\n string_series = str(string_series) + '01'\r\n return pd.to_datetime(string_series, format=format, dayfirst=dayfirst)\r\n else:\r\n if len(string_series[0]) == 6:\r\n string_series = string_series.map(lambda x: str(x) + '01')\r\n dates = {date:pd.to_datetime(date, format=format, dayfirst=dayfirst) for date in string_series.unique()}\r\n return string_series.map(dates)\r\n\r\n# @classmethod\r\n# def get_datetime(cls, raw_date):\r\n# if raw_date in cls.__dict_datetimes:\r\n# return cls.__dict_datetimes[raw_date]\r\n# else:\r\n# new_datetime = datetime.strptime(raw_date, cls.__str_date_format)\r\n# cls.__dict_datetimes[raw_date] = new_datetime\r\n# return new_datetime\r\n","sub_path":"rq_test1/ra_utils.py","file_name":"ra_utils.py","file_ext":"py","file_size_in_byte":14252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"365219404","text":"\"\"\"\n// Time Complexity :o(n)\n// Space Complexity : constant\n// Did this code successfully run on Leetcode : yes\n// Any problem you faced while coding this : no\n\n\n// Your code here along with comments explaining your approach\n\"\"\"\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n if not nums or len(nums) < 2: \n return 0\n \n jumps = 1 #if length is more than 1, then at least 1 jump is required\n cur_int = next_int = nums[0] #initial values\n \n for i in range(1,len(nums)):\n if nums[i] + i > next_int: #setting next interval to the next max value\n next_int = nums[i] + i \n \n if i < len(nums)-1 and i == cur_int:#if we have visited all the values in current interval, and current interval isnt till the last index\n cur_int = next_int #update cur_int and increase jump \n jumps += 1\n \n return jumps","sub_path":"Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"554575327","text":"#A\na = int(input())\nb = int(input())\nr = b - a%b\n\nif r == b: print(0)\nelse: print(r)\n\n#b\nn,x =map(int,input().split())\na = list(map(int,input().split()))\nallsum=0\nfor i,v in enumerate(a):\n if x & (1< h2:\n hospitalized = str(h1)\n else:\n hospitalized = str(h2)\n except Exception as e:\n print(\"hospitalized errors...: \"+str(e))\n hospitalized = data['Number Hospitalised'][1]\n\n try:\n if data['Active Cases'][0] != '':\n a1 = int(data['Active Cases'][0].replace(\",\", \"\"))\n\n if data['Active Cases'][1] != '':\n a2 = int(data['Active Cases'][1].replace(\",\", \"\"))\n\n if a1 > a2:\n activeCases = str(a1)\n else:\n activeCases = str(a2)\n except Exception as e:\n print(\"active cases errors...: \"+str(e))\n activeCases = data['Active Cases'][1]\n\n print(\"chadddd \"+activeCases)\n print(\"chadddd \"+str(a1) + \" \" + str(a2))\n\n print(\"hospitalized:::: \" + hospitalized)\n print(\"cases:::: \" + activeCases)\n\n if not doingWeeklyReport:\n Tweet(data['Confirmed Cases'][0], data['Deaths'][0], data['Recovered'][0], data['Confirmed Cases']\n [1], date, activeCases, hospitalized, data['Deaths'][1], testsDone, positivityRate, data['Recovered'][1])\n\n lastPull = today\n setDate(lastPull)\n else:\n print(\"doing weekly Report\")\n with open(weeklyReportName+'.csv', 'a', newline='') as csvfile:\n spamwriter = csv.writer(\n csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_NONE\n )\n spamwriter.writerow([data['Confirmed Cases'][0], data['Deaths'][0][0:3].strip(), data['Recovered'][0], data['Confirmed Cases']\n [1], date, data['Active Cases'][1], hospitalized, data['Deaths'][1], testsDone, positivityRate])\n else:\n print(\"Scrape already done today... I'm gonna sleep!\")\n print(\"---------------------------------------------\")\n except Exception as e:\n print(str(e))\n finally:\n driver.close()\n\n\ndef WeeklyReport():\n global doingWeeklyReport\n global weeklyReportName\n\n doingWeeklyReport = True\n\n x = datetime.datetime.now()\n y = x - datetime.timedelta(days=7)\n weeklyReportName = \"Week of \"+str(y).split(\" \")[0]\n\n # make headers\n with open(weeklyReportName+'.csv', 'a', newline='') as csvfile:\n spamwriter = csv.writer(\n csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_NONE\n )\n spamwriter.writerow(['Confirmed Cases', 'Deaths', 'Recovered', 'Confirmed Cases', \"Date\",\n 'Active Cases', \"Hospitalized\", 'Overall Deaths', \"Tests Done\", \"Positivity Rate\"])\n\n for x in range(7):\n Scrape(x+1)\n\n print(\"Report Created: \" + weeklyReportName+\".csv\")\n doingWeeklyReport = False\n\nScrape()\nschedule.every(1).minutes.do(Scrape)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":11324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"153338537","text":"\"\"\"Shaffak URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom sserver.sitemap import CategorySitemap, NewsSitemap\nfrom sserver.views import index, pakistan, news_details, get_page_news_json, post_on_fb, video, video_details\nfrom django.contrib.sitemaps.views import sitemap\n\nsitemaps = {\n \"categories\": CategorySitemap,\n \"news\": NewsSitemap,\n}\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', index, name='index'),\n url(r'^news/(?P[\\w-]+)/$', pakistan, name='news_page'),\n url(r'^json/news/(?P[\\w-]+)/$', get_page_news_json, name='news_json'),\n url(r'^news/details/(?P[\\w-]+)/$', news_details, name='news_details'),\n url(r'^videos/$', video, name='videos'),\n url(r'^video/(?P\\d+)/details/$', video_details, name='video-details'),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n url(r'^ckeditor/', include('ckeditor_uploader.urls')),\n url(r'^news/post_on_fb/(?P[\\w-]+)/$', post_on_fb, name='post_on_fb'),\n]\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"Shaffak/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"29801087","text":"import sys\nimport os\nimport re\n\nimport argparse\n\nos.environ['KERNEL_VERSION'] = '5.1.0'\nos.environ['DEFAULT_PLAYGROUND_PATH'] = '/projects/icp_kernel/playground/'\nos.environ['DEFAULT_METADATA_DIR'] = os.environ['DEFAULT_PLAYGROUND_PATH']+ 'metadata/nonpreemptive/'\nos.environ['DEFAULT_QUEMU_DIR'] = os.environ['DEFAULT_PLAYGROUND_PATH']+ 'metadata/nonpreemptive/'\nos.environ['DEFAULT_SYSTEM_MAP'] = os.environ['DEFAULT_QUEMU_DIR'] +'System.map-'+ os.environ['KERNEL_VERSION']\nos.environ['DEFAULT_KERNEL_DUMP'] = os.environ['DEFAULT_QUEMU_DIR'] + 'vmlinux_dump'\nos.environ['DEFAULT_COVERAGE_MAP'] = os.environ['DEFAULT_METADATA_DIR'] + 'Coverage.Map'\nos.environ['DEFAULT_PROFILE_MAP'] = os.environ['DEFAULT_METADATA_DIR'] + 'Profile.Map'\nos.environ['DEFAULT_OUTPUT_FILE'] = 'Decoded.Profiles'\n\n\nProfileMap = []\nCoverageMap = []\nSymbolTable = []\nObjectMap = []\n\ncoverageSize = 0\nsymbolSize = 0\nobjectSize = 0\nprofileSize = 0\n\nOUTPUT_DIRECT = 0x2\nOUTPUT_INDIRECT = 0x1\n\noutputType = 0x3\n\ndef generateNewPathDefaults(metadata_dir):\n os.environ['DEFAULT_METADATA_DIR'] = os.environ['DEFAULT_PLAYGROUND_PATH']+ 'metadata/'+ metadata_dir +\"/\"\n os.environ['DEFAULT_QUEMU_DIR'] = os.environ['DEFAULT_PLAYGROUND_PATH']+ 'metadata/'+ metadata_dir +\"/\"\n os.environ['DEFAULT_SYSTEM_MAP'] = os.environ['DEFAULT_QUEMU_DIR'] +'System.map-'+ os.environ['KERNEL_VERSION']\n os.environ['DEFAULT_KERNEL_DUMP'] = os.environ['DEFAULT_QUEMU_DIR'] + 'vmlinux_dump'\n os.environ['DEFAULT_COVERAGE_MAP'] = os.environ['DEFAULT_METADATA_DIR'] + 'Coverage.Map'\n os.environ['DEFAULT_PROFILE_MAP'] = os.environ['DEFAULT_METADATA_DIR'] + 'Profile.Map'\n os.environ['DEFAULT_OUTPUT_FILE'] = 'Decoded.Profiles'\n\ndef testPrintMap(_map):\n for elem in _map:\n print(elem)\n\ndef loadCoverageMap(filename):\n global coverageSize\n inputFile = open(filename)\n for line in inputFile:\n elements=line.strip().split(\"||||\")\n index_1 = elements[1].index(\"@\")\n index_2 = elements[1].index(\")\")\n index_p1 = elements[0].index(\"@\")\n index_p2 = elements[0].index(\"(\")\n # Entry has format (CallSiteTag, CallSiteParent, LLVMStringRepresentation, CoverageIndex) \n touple = (elements[1][index_1+19:index_2].strip() , elements[0][index_p1+1:index_p2], elements[2].strip(), int(elements[3]))\n CoverageMap.append(touple) \n coverageSize = len(CoverageMap)\n inputFile.close()\n\ndef loadSymbolTable(filename):\n global symbolSize\n inputFile = open(filename)\n for line in inputFile:\n elements = line.strip().split(\" \")\n # Entry has format (SymbolAddress, SymbolType, SymbolName) \n touple = (elements[0], elements[1], elements[2])\n SymbolTable.append(touple)\n symbolSize = len(SymbolTable)\n inputFile.close()\n\ndef getType(assemblyFormat):\n index= assemblyFormat.find('callq 0x')\n if (index >= 0):\n return 1\n else:\n return 0\n\ndef loadKernelObject(filename, instFilter):\n global objectSize\n inputFile = open(filename)\n for line in inputFile:\n elements = line.replace(\"\\t\",\" \").strip().split(\":\")\n if len(elements) >= 2:\n split_index = elements[1].find(instFilter)\n if split_index >= 0:\n call_signature_flat = elements[1][split_index:len(elements[1])]\n call_signature = re.sub(\"\\s\\s+\" , \" \", call_signature_flat)\n # Entry has format CSAddress, direct/indirect(0/1), AssemblyFormatOfCall(not used)\n touple = (elements[0], getType(call_signature), call_signature)\n ObjectMap.append(touple)\n objectSize = len(ObjectMap)\n inputFile.close()\n\ndef loadProfileData(filename):\n global profileSize\n inputFile = open(filename)\n for line in inputFile:\n elements=line.strip().split(\" \")\n touple = (elements[0], int(elements[1]) , elements[2], int(elements[3]), elements[4])\n ProfileMap.append(touple)\n profileSize = len(ProfileMap)\n inputFile.close()\n\ndef getFunctionName(functionAddress):\n funcNames = list(filter(lambda x: x[0] == functionAddress, SymbolTable))\n return list(map(lambda x: x[2], funcNames))\n\ndef getParentRecursively(callsite, inferior, superior):\n if inferior == superior:\n return SymbolTable[superior][0]\n middle = int((inferior+superior)/2)\n touple_1 = SymbolTable[middle]\n touple_2 = SymbolTable[min(middle+1, symbolSize-1)]\n hexnumber_1 = int(touple_1[0], 16)\n hexnumber_2 = int(touple_2[0], 16)\n address = int(callsite, 16)\n if hexnumber_1 <= address and address < hexnumber_2:\n return touple_1[0]\n elif hexnumber_2 <= address:\n return getParentRecursively(callsite, min(middle+1, symbolSize-1), superior)\n elif address < hexnumber_1:\n return getParentRecursively(callsite, inferior, middle)\n\ndef getParentName(callsite):\n parentAddress = getParentRecursively(callsite, 0, symbolSize - 1 )\n parentList = list(filter(lambda x: x[0] == parentAddress, SymbolTable))\n return list(map(lambda x: x[2], parentList))\n\ndef getObjectSignatureRecursively(callsite, inferior, superior):\n if inferior == superior:\n return ObjectMap[superior]\n middle = int((inferior+superior)/2)\n hexnumber = int(ObjectMap[middle][0], 16)\n address = int(callsite, 16)\n if hexnumber == address:\n return ObjectMap[middle]\n elif hexnumber < address:\n return getObjectSignatureRecursively(callsite, min(middle+1, objectSize-1), superior)\n else:\n return getObjectSignatureRecursively(callsite, inferior, middle)\n\ndef getObjectSignature(callsite):\n return getObjectSignatureRecursively(callsite, 0, objectSize-1)\n\ndef getCoverageRecursive(tag, inferior, superior):\n if inferior == superior:\n return CoverageMap[superior]\n middle = int((inferior+superior)/2)\n intEntry = int(CoverageMap[middle][0])\n intTag = int(tag)\n if intEntry == intTag:\n return CoverageMap[middle]\n if intEntry < intTag:\n return getCoverageRecursive(tag, min(middle+1, coverageSize-1), superior)\n else:\n return getCoverageRecursive(tag, inferior, middle)\n\ndef getCoverageData(tag):\n return getCoverageRecursive(tag, 0, coverageSize-1)\n\ndef decodeProfile(profile):\n child = getFunctionName(profile[0])\n parent = getParentName(profile[2])\n callType = getObjectSignature(profile[2])\n coverage = getCoverageData(profile[4])\n\n if parent[0] == '__brk_limit':\n return (None, 'invalid parent', child , parent, callType[1], profile[3], coverage[3], coverage[2], coverage[1] )\n\n if coverage[1] not in parent:\n return (None, 'unknown parent', child , parent, callType[1], profile[3], coverage[3], coverage[2], coverage[1] )\n # Element format childAliases parentAliases callType executionCount, coverageId, llvmStringRepresentation\n return (child, parent, callType[1], profile[3], coverage[3], coverage[2] )\n\ndef decodeProfileMap(profileMap):\n return list(map(decodeProfile, profileMap))\n\ndef globalWeightAnalysis(profiles):\n profileWeight = 0\n callSiteMap = {}\n for profile in profiles:\n profileWeight += profile[3]\n callSiteMap[profile[0]] = profile[1]\n totalWeight = 0\n for elem in callSiteMap.values():\n totalWeight += elem\n print(\"Number of collected profiles: \"+str(len(profiles)))\n print(\"Overall execution count(all arcs): \"+ str(profileWeight))\n print(\"Brute execution count(all callsites): \"+ str(totalWeight))\n\ndef typeFiltering(profiles, outputTy):\n iProfiles = []\n dProfiles = []\n if (outputTy & OUTPUT_INDIRECT):\n iProfiles = list(filter(lambda x: x[2] == 0, profiles))\n if (outputTy & OUTPUT_DIRECT):\n dProfiles = list(filter(lambda x: x[2] == 1, profiles))\n return iProfiles+dProfiles\n\n#Parser\n\ndef writeString(out, string):\n out.write(string)\n \ndef writeInt(out, integer):\n out.write(str(integer))\n\ndef writeStruct(out, elements):\n writeElement(out, elements[0])\n for element in elements[1:]:\n out.write(\"?\")\n writeElement(out, element)\n\ndef writeTuple(out, elements):\n writeElement(out, elements[0])\n for element in elements[1:]:\n out.write(\"|\")\n writeElement(out, element)\n\ndef writeElement(out, element):\n if type(element) == str:\n writeString(out, element)\n return\n if type(element) == int:\n writeInt(out, element)\n return\n if type(element) == list:\n writeStruct(out, element)\n return\n if type(element) == tuple:\n writeTuple(out, element)\n return \n\ndef writeOutput(filename, profiles):\n out = open(filename, 'w')\n for profile in profiles:\n writeElement(out, profile)\n out.write(\"\\n\")\n out.close()\n\ndef parseArguments():\n parser = argparse.ArgumentParser(description='VUProfiler offline analysis.')\n parser.add_argument('-c','--coverage_map', nargs='?', \n const=os.environ['DEFAULT_COVERAGE_MAP'], \n default=os.environ['DEFAULT_COVERAGE_MAP'],\n help='Path to the file used to load the coverage map')\n \n parser.add_argument('-s','--system_map', nargs='?', \n const=os.environ['DEFAULT_SYSTEM_MAP'], \n default=os.environ['DEFAULT_SYSTEM_MAP'],\n help='Path to the file used to load the kernel System.map')\n\n parser.add_argument('-k','--kernel_dump', nargs='?', \n const=os.environ['DEFAULT_KERNEL_DUMP'], \n default=os.environ['DEFAULT_KERNEL_DUMP'],\n help='Path to the file used to load the kernel object dump')\n\n parser.add_argument('-p','--profile_map', nargs='?', \n const=os.environ['DEFAULT_PROFILE_MAP'], \n default=os.environ['DEFAULT_PROFILE_MAP'],\n help='Path to the file used to load the benchmarked profiles')\n\n parser.add_argument('--prune', nargs='?', \n const=None, \n default=None,\n help='Prune until you have PROFILE_MAP% most expensive profiles')\n\n parser.add_argument('-o','--out', nargs='?', \n const=os.environ['DEFAULT_OUTPUT_FILE'] , \n default=os.environ['DEFAULT_OUTPUT_FILE'] ,\n help='Prune until you have PROFILE_MAP% most expensive profiles')\n\n parser.add_argument('-f','--folder', nargs='?', \n const=\"\" , \n default=\"\" ,\n help='Dummy argument.')\n\n #parser.add_argument()\n parser.add_argument('-i','--indirect', action='store_true', help=\"Output indirect call profiles\")\n parser.add_argument('-d','--direct', action='store_true', help=\"Output direct call profiles\")\n parser.add_argument('-t','--test', nargs='*', choices=['coverage-map', 'system-map', 'kernel-dump', 'profile-map'], help=\"Test profiler components\")\n\n \n\n return parser.parse_args()\n\n\ndef main():\n global outputType\n print(\"Initializing profiler data structures...\")\n if len(sys.argv) >= 3 and (sys.argv[1] == \"-f\" or sys.argv[1] == \"--folder\") :\n generateNewPathDefaults(sys.argv[2])\n\n \n\n paramList = parseArguments()\n \n if paramList.test != None and len(paramList.test) != 0:\n print('Executing test framework')\n for test in paramList.test:\n if (test == 'coverage-map'):\n loadCoverageMap(paramList.coverage_map)\n testPrintMap(CoverageMap)\n print(\"Size of CoverageMap is \"+str(coverageSize))\n if (test == 'system-map'):\n loadSymbolTable(paramList.system_map)\n testPrintMap(SymbolTable)\n if (test == 'kernel-dump'):\n loadKernelObject(paramList.kernel_dump,'call')\n testPrintMap(ObjectMap)\n if (test == 'profile-map'):\n loadProfileData(paramList.profile_map)\n testPrintMap(ProfileMap)\n print(\"Size of ProfileMap is \"+str(profileSize))\n return \n\n print(paramList)\n print(\"Loading Coverage Map...\")\n loadCoverageMap(paramList.coverage_map)\n\n print(\"Loading Symbol Table...\") \n loadSymbolTable(paramList.system_map)\n \n print(\"Loading Object Dump(direct/indirect callsite information)...\")\n loadKernelObject(paramList.kernel_dump,'call')\n\n print(\"Loading profiling candidates...\")\n loadProfileData(paramList.profile_map) \n\n print(\"Order profile map based on execution count(descending)...\")\n ProfileMap.sort(key= lambda tup: -tup[3])\n\n print(\"Global Weight analysis...\")\n globalWeightAnalysis(ProfileMap)\n\n DecodedMap = []\n DecodedMap = decodeProfileMap(ProfileMap)\n \n falsePositives = list(filter(lambda x: x[0] == None, DecodedMap))\n print(\"Eliminated \"+str(len(falsePositives))+\" false positives...\")\n\n #for elem in falsePositives:\n # print(elem)\n \n DecodedMap = list(filter(lambda x: x[0] != None, DecodedMap))\n\n if (paramList.indirect == True):\n if (outputType == 0x3):\n outputType = OUTPUT_INDIRECT\n if (paramList.direct == True):\n if (outputType == 0x3):\n outputType = OUTPUT_DIRECT\n else:\n outputType += OUTPUT_DIRECT\n\n DecodedMap = typeFiltering(DecodedMap, outputType)\n DecodedMap.sort(key= lambda tup: -tup[3])\n\n pruningInterval = len(DecodedMap)\n if (paramList.prune != None):\n print(\"TODO must implement pruning(modifies pruningInterval)\")\n \n #testPrintMap(DecodedMap)\n print('Writing output to file '+paramList.out+'...')\n writeOutput(os.environ['DEFAULT_METADATA_DIR']+paramList.out, DecodedMap[0: pruningInterval])\n \n\n #print(paramList)\n\nmain()\n","sub_path":"include/profiler/vuprofiler_offline_analysis.py","file_name":"vuprofiler_offline_analysis.py","file_ext":"py","file_size_in_byte":13321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"588916704","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# Author: Lucas\n# Date: 2019-08-20 23:57:26\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def convertBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n self.s = 0\n self.last = None\n self.count = 1\n self.reverse_order(root)\n return root\n\n def reverse_order(self, node):\n if node is None:\n return\n self.reverse_order(node.right)\n if self.last is None:\n self.last = node.val\n elif self.last == node.val:\n self.count += 1\n else:\n self.s += self.last * self.count\n self.last = node.val\n self.count = 1\n node.val += self.s\n self.reverse_order(node.left)\n","sub_path":"501-600/538_ConvertBSTToGreaterTree/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"649522101","text":"# 关于self的案例\nclass A():\n name = \"liuyang\"\n age = 18\n\n def __init__(self):\n self.name = \"aaa\"\n self.age = 2222\n def say(self):\n print(self.name)\n print(self.age)\nclass B():\n name = \"bbb\"\n age =90\n\na = A()\n# 此时,系统会默认把a作为第一个参数传入函数\na.say()\n\n# 此时,self被a替换\nA.say(a)\n# 同样可以把A自己作为参数传入\nA.say(A)\n\n# 此时,传入的是类实例B,因为B具有name和age的属性,所以不会报错\nA.say(B)\n\n# 以上代码,利用了鸭子模型","sub_path":"05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"225290364","text":"# -*- coding: utf-8 -*-\nimport keras\n# ニューラルネットワークのモデルの定義\nfrom keras.models import Sequential\n# 畳み込み、プーリング\nfrom keras.layers import Conv2D, MaxPooling2D\n# 活性化関数、ドロップアウト、異次元変換、連結構造\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.utils import np_utils\nimport numpy as np\n\nclasses = [\"monkey\", \"boar\", \"crow\"]\nnum_classes = len(classes)\nimage_size = 50\n\n# メイン関数を定義\ndef main():\n X_train, X_test, y_train, y_test = np.load(\"./animal_aug.npy\",allow_pickle=True)\n # 01で表現\n X_train = X_train.astype(\"float\") / 256\n X_test = X_test.astype(\"float\") / 256\n # one-hot-vector: 正解は1、他は0に。([0,0,1]等、正解を1にする)\n y_train = np_utils.to_categorical(y_train, num_classes)\n y_test = np_utils.to_categorical(y_train, num_classes)\n\n model = model_train(X_train, y_train)\n # 評価\n model_eval(model, X_train, y_test)\n\n# https://keras.io/\n# 確認\n\ndef model_train(X, y):\n # 定義\n model = Sequential()\n # 32個, 3*3,畳み込み結果が同じサイズになるようにピクセルを左右に足す。 ,画像データの形状(1番目以降\n # gen_data.pyで確認できる ⇒ print(X_train.shape)\n model.add(Conv2D(32, (3,3), padding='same', input_shape=X.shape[1:]))\n # 正だけ通して負は消す。\n model.add(Activation('relu'))\n # 2層目 3*3\n model.add(Conv2D(32,(3,3)))\n model.add(Activation('relu'))\n # layer3\n model.add(MaxPooling2D(pool_size=(2,2)))\n # 25% cut\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64,(3,3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(64,(3,3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # 全結合\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(Dense(3))\n model.add(Activation('softmax'))\n # 「decay」学習レートを下げる\n opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)\n # 誤差\n model.compile(loss='categorical_crossentropy'\n ,optimizer=opt\n ,metrics=['accuracy'])\n\n model.fit(X, y, batch_size=32, epochs=100)\n\n model.save('./animal_cnn_aug.h5')\n\n return model\n\ndef model_eval(mode, X, y):\n score = model.evaluate(X, y, verbose=1)\n print('Test Loss: ', scores[0])\n print('Test Accuracy: ', scores[1])\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"animal_classifier/角度・反転/animal_cnn_aug.py","file_name":"animal_cnn_aug.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"517326074","text":"# 9.\tНайти максимальный элемент среди минимальных элементов столбцов матрицы.\n\nfrom random import randint\n\nN = 5\nM = 10\nEXT_LST = []\nfor i in range(N):\n b = []\n for j in range(M):\n b.append(randint(0, 10))\n print(f\"{b[j]:4d}\", end='')\n EXT_LST.append(b)\n print()\n\nfor i in range(M):\n print(f\" ---\", end='')\nprint()\n\nMIN_LST = []\nfor i in range(M):\n mi = 0\n for j in range(N):\n if EXT_LST[j][i] < EXT_LST[mi][i]:\n mi = j\n MIN_LST.append(EXT_LST[mi][i])\n\nfor i in MIN_LST:\n print(f\"{i:4d}\", end='')\nprint(' |', max(MIN_LST))\n","sub_path":"Lesson_3/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"544202199","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('stocks', '0005_auto_20150417_1441'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='stock',\n name='state',\n field=models.IntegerField(default=0, choices=[(0, b'\\xe6\\xad\\xa3\\xe5\\xb8\\xb8'), (1, b'\\xe7\\xa6\\x81\\xe6\\xad\\xa21'), (2, b'\\xe5\\x81\\x9c\\xe7\\x89\\x8c')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"stocks/migrations/0006_auto_20150518_2030.py","file_name":"0006_auto_20150518_2030.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"240223427","text":"import numpy as np \nfrom pytplot import get_data, store_data\n\ndef mms_fgm_remove_flags(probe, data_rate, level, instrument, suffix=''):\n \"\"\"\n This function removes data flagged by the FGM 'flag' variable (flags > 0), \n in order to only show science quality data by default.\n \n Parameters:\n probe : str or list of str\n probe or list of probes, valid values for MMS probes are ['1','2','3','4']. \n\n data_rate : str or list of str\n instrument data rates for FGM include 'brst' 'fast' 'slow' 'srvy'. The\n default is 'srvy'.\n\n level : str\n indicates level of data processing. the default if no level is specified is 'l2'\n\n suffix: str\n The tplot variable names will be given this suffix. By default, \n no suffix is added.\n\n \"\"\"\n if not isinstance(probe, list): probe = [probe]\n if not isinstance(data_rate, list): data_rate = [data_rate]\n if not isinstance(level, list): level = [level]\n\n for this_probe in probe:\n for this_dr in data_rate:\n for this_lvl in level:\n\n times, flags = get_data('mms'+this_probe+'_'+instrument+'_flag_'+this_dr+'_'+this_lvl+suffix)\n times, gse_data = get_data('mms'+this_probe+'_'+instrument+'_b_gse_'+this_dr+'_'+this_lvl+suffix)\n times, gsm_data = get_data('mms'+this_probe+'_'+instrument+'_b_gsm_'+this_dr+'_'+this_lvl+suffix)\n times, dmpa_data = get_data('mms'+this_probe+'_'+instrument+'_b_dmpa_'+this_dr+'_'+this_lvl+suffix)\n times, bcs_data = get_data('mms'+this_probe+'_'+instrument+'_b_bcs_'+this_dr+'_'+this_lvl+suffix)\n\n flagged_data = np.where(flags != 0.0)[0]\n gse_data[flagged_data] = np.nan\n gsm_data[flagged_data] = np.nan\n dmpa_data[flagged_data] = np.nan\n bcs_data[flagged_data] = np.nan\n\n store_data('mms'+this_probe+'_'+instrument+'_b_gse_'+this_dr+'_'+this_lvl+suffix, data={'x': times, 'y': gse_data})\n store_data('mms'+this_probe+'_'+instrument+'_b_gsm_'+this_dr+'_'+this_lvl+suffix, data={'x': times, 'y': gsm_data})\n store_data('mms'+this_probe+'_'+instrument+'_b_dmpa_'+this_dr+'_'+this_lvl+suffix, data={'x': times, 'y': dmpa_data})\n store_data('mms'+this_probe+'_'+instrument+'_b_bcs_'+this_dr+'_'+this_lvl+suffix, data={'x': times, 'y': bcs_data})","sub_path":"pyspedas/mms/fgm/mms_fgm_remove_flags.py","file_name":"mms_fgm_remove_flags.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"455299631","text":"# /usr/bin/env python\n# coding=utf-8\nimport time\nimport datetime\nfrom tqdm import trange\nimport utils\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nimport os\nimport logging\nfrom utils import set_logger, Params\nfrom ZEN import WEIGHTS_NAME, CONFIG_NAME\nimport torch\nimport json\nimport numpy as np\nimport random\nfrom ZEN import BertTokenizer, ZenForPreTraining, ZenConfig\nfrom ZEN import WarmupLinearSchedule, BertAdam\nfrom torch.utils.data import DataLoader, DistributedSampler, RandomSampler\nfrom dataloader import PregeneratedDataset\n\nparser = ArgumentParser()\nparser.add_argument(\"--learning_rate\", default=3e-5, type=float, help=\"The initial learning rate for Adam.\")\nparser.add_argument('--multi_gpu', action='store_true', help='是否使用多GPU')\nparser.add_argument(\"--epochs\", type=int, default=10, help=\"Number of epochs to train for\")\nparser.add_argument('--seed', type=int, default=2020, help=\"random seed for initialization\")\nparser.add_argument('--scratch', action='store_true', help=\"Whether to train from scratch\")\nparser.add_argument('--save_name', type=str, default=\"zen\",\n help=\"The prefix used for saving the remote model\")\nparser.add_argument('--gradient_accumulation_steps', type=int, default=2,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\nparser.add_argument(\"--train_batch_size\", default=256, type=int,\n help=\"Total batch size for training.\")\nparser.add_argument(\"--warmup_proportion\", default=0.1, type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n\nparser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\nparser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"local_rank for distributed training on gpus\")\nparser.add_argument('--loss_scale', type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\"\n \"0 (default value): dynamic loss scaling.\"\n \"Positive power of 2: static loss scaling value.\")\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3,4,5,6,7\"\n\n\ndef train(model, train_dataloader, epoch, optimizer, params, global_step, warmup_schedule=None):\n \"\"\"\n :param epoch: current epoch\n :param global_step: current total step\n :param warmup_schedule: for fp16 warmup\n :return:\n \"\"\"\n model.train()\n # 记录平均损失\n loss_avg = utils.RunningAverage()\n\n t = trange(len(train_dataloader), desc=f\"Epoch {epoch}\")\n for step, _ in enumerate(t):\n batch = next(iter(train_dataloader))\n batch = tuple(t.to(params.device) for t in batch)\n input_ids, input_mask, segment_ids, lm_label_ids, is_next, ngram_ids, ngram_masks, ngram_positions, \\\n ngram_starts, ngram_lengths, ngram_segment_ids = batch\n\n loss = model(input_ids,\n ngram_ids,\n ngram_positions,\n segment_ids,\n ngram_segment_ids,\n input_mask,\n ngram_masks,\n lm_label_ids,\n is_next)\n\n if params.n_gpu > 1 and args.multi_gpu:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n loss_avg.update(loss.item() * args.gradient_accumulation_steps)\n t.set_postfix_str(f\"Loss: {loss_avg():.5f}\")\n\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_schedule.get_lr(step=global_step)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n # Save a trained model\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%m%d%H%M%S')\n # dir to save\n saving_path = Path(os.path.join(params.pretrain_model_dir, args.save_name + st + \"_epoch_\" + str(epoch)))\n if saving_path.is_dir() and list(saving_path.iterdir()):\n logging.warning(f\"Output directory ({saving_path}) already exists and is not empty!\")\n saving_path.mkdir(parents=True, exist_ok=True)\n\n logging.info(\"***** Saving fine-tuned model ***** \")\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\n output_model_file = os.path.join(saving_path, WEIGHTS_NAME)\n output_config_file = os.path.join(saving_path, CONFIG_NAME)\n\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n\n\nif __name__ == '__main__':\n params = Params()\n set_logger(save=True, log_path=params.pretrain_model_dir)\n # get data epochs\n samples_per_epoch = []\n for i in range(args.epochs):\n epoch_file = params.pretrain_data_dir / f\"epoch_{i}.json\"\n metrics_file = params.pretrain_data_dir / f\"epoch_{i}_metrics.json\"\n if epoch_file.is_file() and metrics_file.is_file():\n metrics = json.loads(metrics_file.read_text())\n samples_per_epoch.append(metrics['num_training_examples'])\n else:\n if i == 0:\n exit(\"No training data was found!\")\n print(f\"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).\")\n print(\"This script will loop over the available data, but training diversity may be negatively impacted.\")\n num_data_epochs = i\n break\n else:\n num_data_epochs = args.epochs\n\n if args.local_rank == -1 and args.multi_gpu:\n params.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n params.device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n\n logging.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n params.device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n # 实际的batch_size,用了梯度累加的结果\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n # set random seed\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path=params.corpus_dir,\n do_lower_case=False)\n\n # get total steps\n total_train_examples = 0\n for i in range(args.epochs):\n # The modulo takes into account the fact that we may loop over limited epochs of data\n total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]\n num_train_optimization_steps = int(\n total_train_examples / args.train_batch_size / args.gradient_accumulation_steps)\n if args.local_rank != -1:\n num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()\n\n if args.scratch:\n config = ZenConfig(vocab_size_or_config_json_file=1446,\n word_vocab_size=1979,\n hidden_size=768,\n num_hidden_layers=6,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n num_hidden_word_layers=3)\n model = ZenForPreTraining(config)\n else:\n model = ZenForPreTraining.from_pretrained(params.bert_model_dir)\n\n if args.fp16:\n model.half()\n model.to(params.device)\n\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n model = DDP(model)\n elif n_gpu > 1 and args.multi_gpu:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n global_step = 0\n logging.info(\"***** Running training *****\")\n logging.info(\" Num examples = %d\", total_train_examples)\n logging.info(\" Batch size = %d\", args.train_batch_size)\n logging.info(\" Num steps = %d\", num_train_optimization_steps)\n for epoch in range(args.epochs):\n # get dataloader\n epoch_dataset = PregeneratedDataset(epoch=epoch,\n training_path=params.pretrain_data_dir,\n tokenizer=tokenizer,\n num_data_epochs=num_data_epochs,\n do_ngram=True,\n fp16=args.fp16)\n if args.local_rank == -1:\n train_sampler = RandomSampler(epoch_dataset)\n else:\n train_sampler = DistributedSampler(epoch_dataset)\n train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n train(model, train_dataloader, epoch, optimizer, params, global_step, warmup_schedule=None)\n","sub_path":"pretrain_zen/run_pretrain.py","file_name":"run_pretrain.py","file_ext":"py","file_size_in_byte":11867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"3341114","text":"from AttributeMapper import AttributeMapper\nfrom QueryBuilder import QueryBuilder\n\nclass ObjectMetaMapper(AttributeMapper):\n def __init__(self,fieldName,options={}):\n AttributeMapper.__init__(self,fieldName,options)\n self.queryBuilder=QueryBuilder()\n return\n\n def processElement(self, sqlQueries, data,msgKey):\n mapTable = self.getOption('mapTable')\n mappingTemplate={}\n srcID=self.getOption('srcID')\n destID=self.getOption('destID')\n mappingTemplate.update({'srcID': AttributeMapper(srcID, {}),\n 'destID': AttributeMapper(destID, {}),\n })\n jsonDataList=[]\n if type(data) is not list:\n jsonDataList.append(data)\n else:\n jsonDataList=data\n sqlQueries = self.deleteElement(sqlQueries, msgKey)\n for jsonData in jsonDataList:\n sqlData={}\n lookupQuery=self.queryBuilder.buildSelect(self.getOption('lookupTable'),['msgKey'],self.getOption('lookupColumn')+\"='\"+str(jsonData['objectId'])+\"'\")\n sqlData.update({\n 'destID' : jsonData['objectId'],\n 'srcID': msgKey\n })\n sqlQueries = self.applyTransformation(mapTable, mappingTemplate, sqlData, sqlQueries)\n return sqlQueries\n\n def applyTransformation(self, tableName, mappingTemplate, data, sqlQueries):\n insertDict = {}\n for k, v in mappingTemplate.items():\n fieldName = v.getFieldName()\n if k in data:\n jsonValue = data[k]\n if (v.isComplexElement()):\n sqlQueries = v.processElement(sqlQueries, jsonValue, data['msgKey'])\n else:\n value = v.getTransformedValue(jsonValue)\n insertDict[fieldName] = value\n else:\n insertDict[fieldName] = None\n qry = self.queryBuilder.buildInsert(tableName, insertDict)\n sqlQueries.append(qry)\n return sqlQueries\n\n def deleteElement(self, sqlQueries, msgKey):\n table = self.getOption('mapTable')\n srcID=self.getOption('srcID')\n qry = self.queryBuilder.buildDelete(table, srcID +\" = '\" + msgKey + \"'\")\n sqlQueries.append(qry)\n return sqlQueries\n\n def getTransformedValue(self,value):\n return value.get('objectId')\n","sub_path":"code/Service/ObjectMetaMapper.py","file_name":"ObjectMetaMapper.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"626305113","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\nCreated on Sep 11, 2018\n\n@author: yytang\n\n1. get the first page url\n2. get the pictures of the current page\n3. get the next page url\n\"\"\"\nimport abc\nimport os\n\nimport scrapy\n\nfrom comics_crawler.items import ComicsCrawlerItem\n# from comics_crawler.utils.misc import get_spider_name_from_domain\n\n\nclass ComicSpider(scrapy.Spider):\n \"\"\"\n classdocs\n\n example: https://www.wnacg.org/photos-index-aid-57940.html\n \"\"\"\n dom = 'www.comics.com'\n allowed_domains = [dom]\n name = allowed_domains[0]\n comic_name = ''\n\n @abc.abstractmethod\n def parse_title(self, response):\n return ''\n\n @abc.abstractmethod\n def parse_page(self, response):\n return [], None\n\n def polish_url(self, url):\n url = url.strip('\\n').strip()\n # pattern = 'http://www.177pic.info/html/[\\d|\\/]+.html'\n # url = re.search(pattern, url).group(0)\n return url\n\n def parse_next_page(self, response):\n return None\n\n def parse_image(self, response):\n yield None\n\n def __init__(self, *args, **kwargs):\n super(ComicSpider, self).__init__(*args, **kwargs)\n self.images_root = kwargs['images_root']\n urls = kwargs['urls']\n self.start_urls = [self.polish_url(url) for url in urls]\n print(self.start_urls)\n\n def parse(self, response):\n start_image_index_key = 'start_index'\n if start_image_index_key in response.meta:\n start_image_index = response.meta[start_image_index_key]\n else:\n start_image_index = 1\n\n title_key = 'comics_title'\n if title_key in response.meta:\n title = response.meta[title_key]\n else:\n title = self.parse_title(response)\n\n page_urls, is_page = self.parse_page(response)\n\n for picture_index, page_url in enumerate(page_urls):\n page_url = response.urljoin(page_url)\n image_name = f\"{title}/{start_image_index + picture_index:03d}.jpg\"\n image_name = os.path.join(self.comic_name, image_name)\n image_path = os.path.join(self.images_root, image_name)\n if os.path.isfile(image_path):\n continue\n item = ComicsCrawlerItem()\n item['Referer'] = response.url\n item['image_name'] = image_name\n\n if is_page:\n request = scrapy.Request(page_url, callback=self.parse_image)\n request.meta['item'] = item\n yield request\n else:\n item['image_urls'] = [page_url]\n yield item\n\n next_page_url = self.parse_next_page(response)\n if next_page_url:\n request = scrapy.Request(next_page_url, callback=self.parse)\n request.meta[start_image_index_key] = len(page_urls) + start_image_index\n request.meta[title_key] = title\n yield request\n","sub_path":"comics_crawler/spiders/comic_spider.py","file_name":"comic_spider.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"203834518","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nimport numpy as np\n\ndef run_model(model,running_mode='train', train_set=None, valid_set=None, test_set=None,\n batch_size=1, learning_rate=0.01, n_epochs=1, stop_thr=1e-4, shuffle=True, device=torch.device('cuda')):\n \"\"\"\n This function either trains or evaluates a model.\n\n training mode: the model is trained and evaluated on a validation set, if provided.\n If no validation set is provided, the training is performed for a fixed\n number of epochs.\n Otherwise, the model should be evaluted on the validation set\n at the end of each epoch and the training should be stopped based on one\n of these two conditions (whichever happens first):\n 1. The validation loss stops improving.\n 2. The maximum number of epochs is reached.\n\n testing mode: the trained model is evaluated on the testing set\n\n Inputs:\n\n model: the neural network to be trained or evaluated\n running_mode: string, 'train' or 'test'\n train_set: the training dataset object generated using the class MyDataset\n valid_set: the validation dataset object generated using the class MyDataset\n test_set: the testing dataset object generated using the class MyDataset\n batch_size: number of training samples fed to the model at each training step\n learning_rate: determines the step size in moving towards a local minimum\n n_epochs: maximum number of epoch for training the model\n stop_thr: if the validation loss from one epoch to the next is less than this\n value, stop training\n shuffle: determines if the shuffle property of the DataLoader is on/off\n\n Outputs when running_mode == 'train':\n\n model: the trained model\n loss: dictionary with keys 'train' and 'valid'\n The value of each key is a list of loss values. Each loss value is the average\n of training/validation loss over one epoch.\n If the validation set is not provided just return an empty list.\n acc: dictionary with keys 'train' and 'valid'\n The value of each key is a list of accuracies (percentage of correctly classified\n samples in the dataset). Each accuracy value is the average of training/validation\n accuracies over one epoch.\n If the validation set is not provided just return an empty list.\n\n Outputs when running_mode == 'test':\n\n loss: the average loss value over the testing set.\n accuracy: percentage of correctly classified samples in the testing set.\n\n Summary of the operations this function should perform:\n 1. Use the DataLoader class to generate training, validation, or test data loaders\n 2. In the training mode:\n - define an optimizer (we use SGD in this homework)\n - call the train function (see below) for a number of epochs untill a stopping\n criterion is met\n - call the test function (see below) with the validation data loader at each epoch\n if the validation set is provided\n\n 3. In the testing mode:\n - call the test function (see below) with the test data loader and return the results\n\n \"\"\"\n\n if running_mode == 'train':\n\n # Variables to return\n loss = {}\n loss['train'] = []\n loss['valid'] = []\n\n accuracy = {}\n accuracy['train'] = []\n accuracy['valid'] = []\n\n train_dataloader = torch.utils.data.DataLoader(train_set\n , batch_size=batch_size\n , shuffle=shuffle # I think for the test dataset I don't need shuffle=True\n , num_workers=2) \n\n if valid_set:\n valid_dataloader = torch.utils.data.DataLoader(valid_set\n , batch_size=batch_size\n , shuffle=shuffle # I think for the test dataset I don't need shuffle=True\n , num_workers=2) \n\n # Solver necessary for training\n solver = optim.SGD(params=model.parameters() ,lr=learning_rate)\n\n # Training the given number of epochs\n for epoch in range(n_epochs):\n print(f\"STARTING EPOCH #{epoch}\")\n model, epoch_train_loss, epoch_train_acc = _train(model, train_dataloader, solver, device)\n\n loss['train'].append(epoch_train_loss)\n accuracy['train'].append(epoch_train_acc)\n\n # VALIDATION\n if valid_set:\n\n epoch_valid_loss, epoch_valid_acc = _test(model, valid_dataloader, device)\n\n loss['valid'].append(epoch_valid_loss)\n accuracy['valid'].append(epoch_valid_acc)\n\n # BREAK CONDITION\n if len(accuracy['valid']) > 1:\n new_valid_loss = loss['valid'][-1]\n old_valid_loss = loss['valid'][-2] # Taking -2, because the new_valid_loss has already been added\n\n if old_valid_loss - new_valid_loss < stop_thr:\n print(\"REACHED STOP CONDITION\")\n print(\"OLD_VALID_LOSS \", old_valid_loss)\n print(\"NEW_VALID_LOSS \", new_valid_loss)\n break \n\n return model, loss, accuracy\n\n ###################\n #### TEST MODE ####\n ###################\n\n if running_mode == 'test':\n\n loss = {}\n accuracy = {}\n\n test_dataloader = torch.utils.data.DataLoader(test_set\n , batch_size=batch_size\n , shuffle=shuffle # I think for the test dataset I don't need shuffle=True\n , num_workers=2) \n\n loss, accuracy = _test(model, test_dataloader, device)\n\n return loss, accuracy\n\n\ndef _train(model,data_loader,optimizer,device=torch.device('cuda')):\n\n \"\"\"\n This function implements ONE EPOCH of training a neural network on a given dataset.\n Example: training the Digit_Classifier on the MNIST dataset\n Use nn.CrossEntropyLoss() for the loss function\n\n\n Inputs:\n model: the neural network to be trained\n data_loader: for loading the netowrk input and targets from the training dataset\n optimizer: the optimiztion method, e.g., SGD\n device: we run everything on CPU in this homework\n\n Outputs:\n model: the trained model\n train_loss: average loss value on the entire training dataset\n train_accuracy: average accuracy on the entire training dataset\n \"\"\"\n minibatches = len(data_loader)\n\n epoch_train_loss = 0.0\n correct = 0\n total = 0 \n model.to(device) \n\n for batch, labels in data_loader: # Per this doc, (batch,labels) = trainloader.__getitem__(); https://pytorch.org/docs/stable/torchvision/datasets.html#cifar\n\n # moving batch/labels onto the gpu/cpu\n batch, labels = batch.to(device), labels.to(device)\n \n # zeroing the parameters of the model \n # because we want to optimize them\n optimizer.zero_grad()\n \n # forward pass\n # getting the predictions from our model by passing in a mini-batch\n # the ouput will have shape (mini-batch-size, number-of-classes)\n # where each element of output is the probabliity of that example being\n # the classification correspoding to the index of the value\n output = model(batch.float()) # <---- Added .float() as a modification to avoid Runtime error per this post: https://stackoverflow.com/questions/56741087/how-to-fix-runtimeerror-expected-object-of-scalar-type-float-but-got-scalar-typ\n \n # Calculating loss\n loss_function = nn.CrossEntropyLoss()\n loss = loss_function(output, labels.long()) # <---- Added .long() per a CampusWire discussion in post #2505\n \n # Calculating accuracy\n predicted = torch.max(output.data, 1)[1]\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n # backward pass\n loss.backward()\n \n # optimize the parameters\n optimizer.step()\n \n # add the loss of a mini-batch to the list of epoch loss\n epoch_train_loss += loss.item()\n\n # TRAINING METRICS\n train_loss = epoch_train_loss/minibatches\n train_accuracy = 100 * correct / total\n\n return model, train_loss, train_accuracy\n\n\ndef _test(model, data_loader, device=torch.device('cuda')):\n \"\"\"\n This function evaluates a trained neural network on a validation set\n or a testing set.\n Use nn.CrossEntropyLoss() for the loss function\n\n Inputs:\n model: trained neural network\n data_loader: for loading the netowrk input and targets from the validation or testing dataset\n device: we run everything on CPU in this homework\n\n Output:\n test_loss: average loss value on the entire validation or testing dataset\n test_accuracy: percentage of correctly classified samples in the validation or testing dataset\n \"\"\"\n correct = 0\n total = 0\n model = model.to(device)\n\n loss_function = nn.CrossEntropyLoss()\n minibatches = len(data_loader)\n epoch_loss = 0.0\n # since we're testing the model we don't need to perform backprop\n with torch.no_grad(): # Disables gradient calculation to save memory; we're sure we won't be using Tensor.backward()\n for batch, labels in data_loader:\n \n batch, labels = batch, labels # So we don't modify original dataset?\n # moving batch/labels onto the gpu/cpu\n batch, labels = batch.to(device), labels.to(device)\n\n output = model(batch.float()) # <---- Added .float() to solve for RuntimeError\n # this gives us the index with the highest value outputed from the last layer\n # which coressponds to the most probable label/classification for an image\n\n loss = loss_function(output, labels.long())\n epoch_loss += loss.item()\n\n predicted = torch.max(output.data, 1)[1]\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n test_accuracy = 100 * correct / total\n test_loss = epoch_loss/minibatches\n \n\n return test_loss, test_accuracy\n\n","sub_path":"ML - Convolutional Networks and Text Classification with GloVe Embeddings/colab_upload/run_torch_model_CUDA.py","file_name":"run_torch_model_CUDA.py","file_ext":"py","file_size_in_byte":10605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"129835317","text":"import os\nimport sys\n\nmodule_path = os.path.abspath(os.path.join('../../../..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nimport unittest\nimport pandas as pd\nfrom graph.completion.deepPath.deep_path import DeepPath\nfrom graph.completion.deepPath.main import main\nclass TestGraphCompletionMethods(unittest.TestCase):\n def setUp(self):\n self.deepPath = DeepPath()\n self.input_file = \"./sample_input.txt\"\n self.output_file = main(self.input_file)\n \n def test_read_dataset(self) :\n return_dict = self.deepPath.read_dataset(self.input_file)\n self.assertTrue(return_dict, dict)\n self.assertTrue(return_dict.values(), list)\n self.assertTrue(type(list(return_dict.values())[0]), str)\n\n def test_evaluate(self):\n return_dict = self.deepPath.read_dataset(self.input_file)\n self.deepPath.train(return_dict)\n self.deepPath.predict(return_dict)\n evaluation = self.deepPath.evaluate(return_dict)\n self.assertIn(\"MAP\", evaluation)\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"graph/completion/deepPath/test/test_deepPath.py","file_name":"test_deepPath.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"31771784","text":"import numpy as np\n\nf = open(\"/home/dmitron/code/adventofcode/aoc6.txt\",'r')\ns = f.read()\nG = np.zeros((1000,1000))\nlines = s.split(\"\\n\")\nlines.pop()\n\nfor line in lines:\n if \"toggle\" in line:\n word = line.split(\" \")\n coords = word[1].split(\",\")\n x1,y1 = coords[0],coords[1]\n coords = word[3].split(\",\")\n x2,y2 = coords[0], coords[1]\n for i in range(int(x1),int(x2)+1):\n for j in range(int(y1),int(y2)+1):\n if G[i,j] == 1.0:\n G[i,j] = 0.0\n else:\n G[i,j] = 1.0\n elif \"turn off\" in line:\n word = line.split(\" \")\n coords = word[2].split(\",\")\n x1,y1 = coords[0], coords[1]\n coords = word[4].split(\",\")\n x2,y2 = coords[0], coords[1]\n for i in range(int(x1),int(x2)+1):\n for j in range(int(y1),int(y2)+1):\n G[i,j] = 0.0\n else:\n word = line.split(\" \")\n coords = word[2].split(\",\")\n x1,y1 = coords[0], coords[1]\n coords = word[4].split(\",\")\n x2,y2 = coords[0], coords[1]\n for i in range(int(x1),int(x2)+1):\n for j in range(int(y1),int(y2)+1):\n G[i,j] = 1.0\n\ncount = 0\nfor i in G:\n for j in i:\n if j == 1.0:\n count += 1\n\nprint(count)\n","sub_path":"aoc6.py","file_name":"aoc6.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"485206680","text":"import itertools\n\ndef number_from_perm(perm):\n return sum([ val*10**(len(perm)-1-ind) for ind, val, in enumerate(perm) ])\n\n\ndef str_from_perm(perm):\n return str(perm).replace(\", \", \"\")[1:-1]\n\n\ndef concat_product(num):\n i = 2\n res = str(num)\n while len(res) < 9:\n res += str(num * i)\n i += 1\n return res\n\n\ndef check(perm):\n str_perm = str_from_perm(perm)\n for i in range(1, len(perm)//2+1):\n if concat_product(number_from_perm(perm[:i])) == str_from_perm(perm):\n return True\n return False\n\n\ndef main():\n \n for i in itertools.permutations([9,8,7,6,5,4,3,2,1], 9):\n if check(i):\n print(number_from_perm(i))\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"p1-50/p38.py","file_name":"p38.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"347135600","text":"import os\nimport filecmp\n\n\ndef insert_between(dest, text, position):\n return (dest[:position + 1] + text + dest[position + 1:])\n\n\ndef dir_cmp(dir1, dir2):\n '''Compares two directories. Returns True in case there are equal.'''\n dirs_cmp = filecmp.dircmp(dir1, dir2)\n # if any([dirs_cmp.left_only,\n # dirs_cmp.right_only,\n # dirs_cmp.funny_files]):\n # return False\n if len(dirs_cmp.left_only) > 0 or \\\n len(dirs_cmp.right_only) > 0 or \\\n len(dirs_cmp.funny_files) > 0:\n return False\n (_, mismatch, errors) = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files,\n shallow=False)\n if len(mismatch) > 0 or len(errors) > 0:\n return False\n for common_dir in dirs_cmp.common_dirs:\n new_dir1 = os.path.join(dir1, common_dir)\n new_dir2 = os.path.join(dir2, common_dir)\n if not dir_cmp(new_dir1, new_dir2):\n return False\n return True\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"639142168","text":"# -- coding: utf-8 --\nfrom getroute import *\nimport numpy as np\nimport serial\nimport cv2\nimport glob\nimport time\nstickin = time.time()\n# 摄像头畸变矫正函数,传入图像,出来矫正系数\ndef camerajiaozheng1(s='pic150\\\\*.png'):\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n objp = np.zeros((6*7,3), np.float32)\n objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)\n objpoints = []\n imgpoints = []\n images = glob.glob(s)\n for fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (7,6),None)\n if ret == True:\n objpoints.append(objp)\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n imgpoints.append(corners2)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\n return ret, mtx, dist, rvecs, tvecs\nret,mtx,dist,rvecs,tvecs=camerajiaozheng1()\n# ret,mtx,dist,rvecs,tvecs = 0.164165913373,np.array([[684.02167337,0.0,361.31274026],[0.0,683.56601082,235.27341269],[0.0 ,0.0,1.0]]),np.array([[-0.48922554, -1.48290388 ,-0.01576108, -0.02200881 ,7.25511877]]),[[ 0.37417391],[-0.03346785],[-1.54973317]],[[-4.01649789],[ 2.1647331 ],[15.21416375]]\ndef camerajiaozheng2():\n h, w = img_gray.shape[:2]\n newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))\n mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5)\n dst = cv2.remap(img_gray, mapx, mapy, cv2.INTER_LINEAR)\n x, y, w, h = roi\n dst = dst[y:y + h, x:x + w]\n return dst\nser = serial.Serial()\nser.baudrate = 115200\nser.port = 'COM5'\nser.open()\ncap = cv2.VideoCapture(0)\ncap.set(3,640)\ncap.set(4,480)\ncap.set(5,120)\nprint(cap.get(3),cap.get(4)) #640 480\n\n# qizi = [[0, 1, 0, 0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0, 0, 0, 0],\n# [0, 1, 0, 1, 0, 0, 0, 0],\n# [0, 1, 0, 0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0, 0, 0, 0],\n# [1, 1, 1, 0, 0, 0, 0, 0],\n# [0, 1, 0, 0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0, 0, 0, 0]]\n\nwhile(1):\n try:\n ret, frame = cap.read()\n cv2.imshow('frame', frame)\n img_gray = frame\n # 摄像头畸变矫正\n dst = camerajiaozheng2()\n # dst为畸变矫正后的灰度图像\n cv2.imshow('dst1',dst)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except:\n pass\n\n\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"tof/correct.py","file_name":"correct.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"549595138","text":"from itertools import islice\r\n\r\nimport numpy as np\r\nfrom sklearn.utils.validation import check_X_y, check_random_state\r\n\r\n\r\nclass SimpleBatchFeeder(object):\r\n \"\"\"A simple batch \"feeder\" that outputs collections of rows of `X` and `y`.\r\n\r\n The number of rows is specified by `batch_size` (default all\r\n rows). Importantly, the sequence of batches is *completely* determined by\r\n `batch_size`, `start`, and `seed`.\"\"\"\r\n def __init__(self, X, y, dtype=np.float32,\r\n batch_size=None, start=0, seed=None):\r\n self.X, self.y = check_X_y(X, y, dtype=dtype, multi_output=True)\r\n num_rows, _ = self.X.shape\r\n\r\n if batch_size is None:\r\n batch_size = num_rows\r\n if batch_size <= 0:\r\n raise ValueError('batch_size <= 0 (= {})'.format(batch_size))\r\n self.batch_size = batch_size\r\n\r\n # Initialize the indices iterator (skipping the first `start` items).\r\n random_state = check_random_state(seed)\r\n i = enumerate(_infinite_shuffled_indices(num_rows, random_state))\r\n if start < 0:\r\n raise ValueError('start <= 0 (= {})'.format(start))\r\n self._i = islice(i, start, None)\r\n\r\n def next_batch(self):\r\n # Take the next `batch_size` row indices from the (infinite) iterator\r\n # `_i`.\r\n batch = list(islice(self._i, self.batch_size))\r\n\r\n # `i` (below) is the index of the last row index in the `batch` ...\r\n assert batch\r\n i, _ = batch[-1]\r\n # ... and `next_start` is the value such that if another instance of\r\n # `BatchFeeder` was constructed with identical `X`, `y`, `batch_size`,\r\n # and `seed`, using `start=next_start` would guarantee that the new\r\n # `BatchFeeder` and *this* instance would yield identical batches from\r\n # this point on.\r\n next_start = i + 1\r\n\r\n row_indices = [r for _, r in batch]\r\n\r\n return next_start, (self.X[row_indices], self.y[row_indices])\r\n\r\n\r\ndef _infinite_shuffled_indices(n, random_state):\r\n \"\"\"A generator that repeatedly yields integers in the half-open interval\r\n `[0, n)`, where integers are repeated in a sequence of \"epochs\". The\r\n (infinite) sequence generated by this generator is entirely determined by\r\n the initial state of `random_state`.\"\"\"\r\n indices = np.arange(n)\r\n while True:\r\n random_state.shuffle(indices)\r\n for i in indices:\r\n yield i\r\n","sub_path":"batch_feeder.py","file_name":"batch_feeder.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"463804981","text":"# Using request to load in parsing option on GET command\nfrom flask import Flask, request\nfrom flask_cors import CORS, cross_origin\n\nimport F8_master as fMaster\n# import datetime\nimport json\n\napp = Flask(__name__)\nCORS(app)\n\n# Defined endpoint at /getq\n@app.route('/getq')\n\ndef get_question():\n \n apiRun = False\n\n # subject, sou, and difficulty are search string arguments set-up for API\n if apiRun:\n subject = request.args.get('subject')\n sou = request.args.get('sou')\n diffString = request.args.get('difficulty')\n \n else:\n subject = 'mechanics'\n sou = 'SI'\n diffString = 'Challenging'\n\n echoback = fMaster.problemGen(subject, sou, diffString)\n returnString = json.dumps(echoback)\n \n return returnString\n\nif __name__ == '__main__':\n app.run(debug = True, use_reloader=True)\n","sub_path":"Backup08/qgen.py","file_name":"qgen.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"387884422","text":"import sys, urllib2, urllib\n\nfrom base_object import BaseObject\n\n\nclass KObject(BaseObject):\n\n\n def __init__(self, accept, username=\"\", password=\"\", uid=\"\", process_id=\"\", audioFilename=\"\", metadataFilename=\"\",\n transcriptFilename=\"\", service=None, item_id=None, count=None):\n BaseObject.__init__(\n self,\n accept,\n username=username,\n password=password,\n uid=uid,\n process_id=process_id,\n audioFilename=audioFilename,\n metadataFilename=metadataFilename,\n transcriptFilename=transcriptFilename,\n service=service,\n item_id=item_id,\n count=count\n )\n self.path = 'kobjects/'\n\n @BaseObject._reset_headers\n def get(self):\n print >> sys.stderr, 'making get request to: %s%s' % (self.dest, self.path + self.uid)\n request = urllib2.Request(self.dest + self.path + self.uid, headers=self.headers)\n BaseObject._execute(self, request)\n\n\n @BaseObject._reset_headers\n def delete(self):\n print >> sys.stderr, 'making delete request to: %s%s' % (self.dest, self.path + self.uid)\n request = urllib2.Request(self.dest + self.path + self.uid, headers=self.headers)\n request.get_method = lambda: 'DELETE'\n BaseObject._execute(self, request)\n\n\n @BaseObject._reset_headers\n def get_list(self):\n print >> sys.stderr, 'making get request to: %s%s' % (self.dest, self.path)\n\n data = {}\n\n if self.count:\n data.update({'count': self.count})\n\n if self.status:\n data.update({'status_filter': '-'.join(map(lambda x: str(x), self.status))})\n\n data = urllib.urlencode(data)\n url = \"%s/%s?%s\" % (self.dest, self.path, data)\n\n request = urllib2.Request(url, headers=self.headers)\n\n BaseObject._execute(self, request)\n\n # create a new K-Object\n @BaseObject._reset_headers\n def create(self):\n print >> sys.stderr, 'making post request to: %s%s' % (self.dest, self.path)\n self.datagen = {}\n request = urllib2.Request(self.dest + self.path, data=\"\", headers=self.headers)\n BaseObject._execute(self, request)","sub_path":"koemei/model/kobject.py","file_name":"kobject.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"314226172","text":"from django import forms\nfrom city_game import models\n\nclass Double_city_form(forms.ModelForm):\n class Meta:\n model = models.Double_city\n fields = ['name']\n labels = {'name': 'Город '}\n widgets = {'name': forms.TextInput(attrs={'placeholder': 'введите название города', 'size': '20'})}\n\n\n#еще раз пройтись по формам\n\nclass FormWord(forms.ModelForm):\n class Meta:\n model = models.WordForm # если вместо model указать models вываливается ошибка ModelForm has no model class specified.\n fields = ['word']\n\nclass FormGameCity(forms.ModelForm):\n class Meta:\n model = models.GameCity\n fields = ['name']\n","sub_path":"city_game/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"293573553","text":"import bpy,math,sys\nimport os\n\ndef DeleteObject(name_obj):\n bpy.ops.object.select_all(action='DESELECT')\n if name_obj in bpy.data.objects :\n bpy.data.objects[name_obj].select=True\n bpy.ops.object.delete()\n\n#essentially parameters to find the right file\nindex=int(sys.argv[-1])\nframenum=int(sys.argv[-2])\nqval=float(sys.argv[-3]) #e-3, + units\ndirname=sys.argv[-4]\n\n#scenes to render\nscenes=['scene1-nodata.blend','scene2-nodata.blend','scene3-nodata.blend','scene4-nodata.blend']\nscenes=['scene1-nodata.blend']\n\n#x3d file to import\nf='%s/contourQ-%07d-q=%.1f.x3d'%(dirname,framenum,qval)\n\nprint('processing file '+ f)\nprint('frame %03d'%framenum)\n\n#default name of the imported object in blender\n\nname_obj=\"ShapeIndexedFaceSet\"\n\nfor s in scenes:\n print(s)\n out_dir='%s/%s-%.1f'%(dirname,s.split('.')[0],qval)\n if not os.path.exists(out_dir): os.makedirs(out_dir)\n #open the scene\n bpy.ops.wm.open_mainfile(filepath=s)\n #import the x3d\n bpy.ops.import_scene.x3d(filepath=f)\n #remove lights coming from paraview\n for obj in bpy.data.objects:\n if obj.name[:4]==\"TODO\":\n print(\"deactivating light:, \",obj.name)\n bpy.data.objects[obj.name].hide_render=True\n \n contour = bpy.data.objects[name_obj]\n\n #put the object at the right place\n contour.location=(0.,-3.5,1.03)\n\n scn = bpy.context.scene\n\n for face in contour.data.polygons:\n face.use_smooth = True\n\n scn.frame_current=index\n scn.render.resolution_percentage=50\n\n #here 'MatQ' has been saved in the scene file: trick -> assign it to whatever objects that won't be rendered\n # in order to make sure it will stay in the file .blend\n contour.data.materials.append(bpy.data.materials['MatQ'])\n contour.data.materials['MatQ'].use_vertex_color_paint=True\n\n\n #exemple of possible change in the scripts\n #contour1.data.materials['MatQ'].diffuse_intensity=1.0 #0.7\n #contour1.data.materials['MatQ'].alpha=0.8\n #contour1.data.materials['MatQ'].diffuse_color=(0,0,0)\n #contour1.data.materials['MatQ'].specular_color=(1,1,1)\n #contour1.data.materials['MatQ'].use_transparency=False\n #contour1.data.materials['MatQ'].raytrace_mirror.use=True\n #contour1.data.materials['MatQ'].raytrace_mirror.fresnel=0.5\n\n\n #lamp=bpy.data.objects['Lamp']\n #lamp.data.shadow_color=(0.7,0.7,0.7) \n #lamp.data.shadow_method='NOSHADOW'\n\n #set path and image name\n scn.render.filepath=out_dir+'/%07d.png'%framenum\n \n #render\n bpy.ops.render.render(write_still=True)\n","sub_path":"examples/movies/render_blender.py","file_name":"render_blender.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"330149147","text":"with open('report.csv', 'r') as f:\n\tlines = f.readlines()\n\tf.close()\nheader = lines.pop(0)\nlines = [x.strip().split(',') for x in lines]\n\n\nactual_start = []\nend = []\nactual_end = []\n\nfor x in lines:\n\tactual_start.append(x[0:2])\n\tend.append(x[2:4])\n\tactual_end.append(x[4:6])\n\nwith open('report_mode.csv','w') as f:\n\tf.write('real\\n')\n\tfor i in range(len(actual_end)):\n\t\tf.write('{},{}\\n'.format(*actual_start[i]))\n\t\tf.write('{},{}\\n'.format(*actual_end[i]))\n\tf.write('ideal\\n')\n\tend.insert(0, [5,5])\n\tfor x in end:\n\t\tf.write('{},{}\\n'.format(*x))\n\tf.close()\n\n\n\n\n\n\n","sub_path":"scripts_misc/process_report.py","file_name":"process_report.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"259900199","text":"import time\nfrom urllib.parse import urlencode\nimport requests\nimport json\n\nurl = \"https://club.jd.com/comment/productPageComments.action?\"\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}\nparam_dict = {\n 'callback': \"fetchJSON_comment98\",\n \"productId\": 3598302,\n \"score\": 0,\n \"sortType\": 5,\n \"page\": 1,\n \"pageSize\": 10,\n \"isShadowSku\": 0,\n \"rid\": 0,\n \"fold\": 1\n}\na = int(input(\"请输入您获取的页码总数:\"))\nx = 0\nwhile x < a:\n param_dict['page'] = x\n response = requests.get(url + urlencode(param_dict), headers=headers)\n time.sleep(3)\n #新增\n response=response.text\n #截取{ 与 }之间的json数据\n begin=response.find(\"{\")\n end=response.rfind(\"}\")\n jsondata=response[begin:end+1]\n #新增结束\n data_dict = json.loads(jsondata)\n #在comments数组中进行遍历\n for item in data_dict[\"comments\"]:\n print(item[\"content\"])\n x = x + 1\n\n#例子a是4,现在这样抓到了第一页到第四页数据 x=0对应第一页 x=3对应第四页\n","sub_path":"Week-05/0504/京东评论抓取 韩昊鹏.py","file_name":"京东评论抓取 韩昊鹏.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"593029537","text":"# coding: utf-8\n\nimport socket\n\n# 1\n# 补全函数\ndef protocol_of_url(url):\n '''\n url 是字符串, 可能的值如下\n 'g.cn'\n 'g.cn/'\n 'g.cn:3000'\n 'g.cn:3000/search'\n 'http://g.cn'\n 'https://g.cn'\n 'http://g.cn/'\n\n 返回代表协议的字符串, 'http' 或者 'https'\n '''\n str = url.find('://')\n if str > 0:\n return url.split('://')[0]\n else:\n return None\n\n\n# 2\n# 补全函数\ndef host_of_url(url):\n '''\n url 是字符串, 可能的值如下\n 'g.cn'\n 'g.cn/'\n 'g.cn:3000'\n 'g.cn:3000/search'\n 'http://g.cn'\n 'https://g.cn'\n 'http://g.cn/'\n\n 返回代表主机的字符串, 比如 'g.cn'\n '''\n str = url.find('://')\n if str > 0:\n url2 = url.split('://')[1]\n else:\n url2 = url\n\n str2 = url2.find('/')\n if str2 == -1:\n host = url2\n else:\n host = url2[:str2]\n\n return host\n\n\n# 3\n# 补全函数\ndef port_of_url(url):\n '''\n url 是字符串, 可能的值如下\n 'g.cn'\n 'g.cn/'\n 'g.cn:3000'\n 'g.cn:3000/search'\n 'http://g.cn'\n 'https://g.cn'\n 'http://g.cn/'\n\n 返回代表端口的字符串, 比如 '80' 或者 '3000'\n 注意, 如上课资料所述, 80 是默认端口\n '''\n host = host_of_url(url)\n str3 = host.find(':')\n if str3 > 0:\n port = host[str3 + 1:]\n else:\n port = 80\n\n return port\n\n\n# 4\n# 补全函数\n\n\ndef path_of_url(url):\n '''\n url 是字符串, 可能的值如下\n 'g.cn'\n 'g.cn/'\n 'g.cn:3000'\n 'g.cn:3000/search'\n 'http://g.cn'\n 'https://g.cn'\n 'http://g.cn/'\n\n 返回代表路径的字符串, 比如 '/' 或者 '/search'\n 注意, 如上课资料所述, 当没有给出路径的时候, 默认路径是 '/'\n '''\n str = url.find('://')\n if str > 0:\n url2 = url.split('://')[1]\n else:\n url2 = url\n\n str2 = url2.find('/')\n if str2 == -1:\n path = '/'\n else:\n path = url2[str2:]\n\n return path\n\n\n# 4\n# 补全函数\ndef parsed_url(url):\n '''\n url 是字符串, 可能的值如下\n 'g.cn'\n 'g.cn/'\n 'g.cn:3000'\n 'g.cn:3000/search'\n 'http://g.cn'\n 'https://g.cn'\n 'http://g.cn/'\n 返回一个 tuple, 内容如下 (protocol, host, port, path)\n '''\n # proto = protocol_of_url(url)\n # host = host_of_url(url)\n # port = port_of_url(url)\n # path = path_of_url(url)\n # tuple_url = (proto, host, port, path)\n # return tuple_url\n\n # 判断协议\n str = url.find('://')\n if str > 0:\n proto = url.split('://')[0]\n url2 = url.split('://')[1]\n else:\n proto = 'None'\n url2 = url\n\n # 判断主机名和路径\n str2 = url2.find('/')\n if str2 == -1:\n host = url2\n path = '/'\n else:\n host = url2[:str2]\n path = url2[str2:]\n\n # 判断端口\n str3 = host.find(':')\n if str3 > 0:\n port = host[:str3 + 1]\n else:\n port = 80\n\n tuple_url = (proto, host, port, path)\n return tuple_url\n\n\n# 5\n# 把向服务器发送 HTTP 请求并且获得数据这个过程封装成函数\n# 定义如下\ndef get(url):\n '''\n 本函数使用上课代码 client.py 中的方式使用 socket 连接服务器\n 获取服务器返回的数据并返回\n 注意, 返回的数据类型为 bytes\n '''\n s = socket.socket()\n\n # 获取主机和端口\n host = host_of_url(url)\n port = port_of_url(url)\n\n # 连接上主机\n s.connect((host, port))\n\n ip, port = s.getsockname()\n print('本机的ip, port是 {} {}'.format(ip, port))\n\n http_request = 'GET / HTTP/1.1\\r\\nhost:{}\\r\\n\\r\\n'.format(host)\n\n request = http_request.encode('utf-8')\n print('请求 ', request)\n s.send(request)\n\n response = s.recv(1000)\n\n print('响应', response, '/n')\n\n print('响应的 str 格式', response.decode('utf-8'))\n\n\n# 使用\ndef main():\n url = 'http://movie.douban.com/top250'\n # print(parsed_url(url))\n r = get(url)\n print(r)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"web1/作业1.py","file_name":"作业1.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"436747713","text":"import pygame,sys\r\n\r\npygame.init()\r\n\r\n\r\n#screen\r\nscreen= pygame.display.set_mode((800,600))\r\n\r\n#rules\r\nfont=pygame.font.SysFont('MENU',36)\r\ni1=font.render('MENU',True,(0,255,0))\r\n\r\nf1=pygame.font.SysFont('Change in the direction of movement ends the game.',28)\r\ni2=font.render('Change in the direction of movement ends the game.',True,(250,0,0))\r\n\r\nf2=pygame.font.SysFont('Touching the edge results in movement from opp side.',28)\r\ni3=font.render('Touching the edge results in movement from opp side.',True,(250,0,0))\r\n\r\nf3=pygame.font.SysFont('Blue food makes the snake invulnerable and fast for 5 sec.',28)\r\ni4=font.render('Blue food makes the snake invulnerable and fast for 5 sec.',True,(250,0,0))\r\n\r\n\r\nf4=pygame.font.SysFont('Blue and white foods disappear after consumption of 2 reds.',28)\r\ni5=font.render('Blue and white foods disappear after consumption of 2 reds.',True,(250,0,0))\r\n\r\n\r\nf5=pygame.font.SysFont('White food results on slower speed and deduction of 3 points',28)\r\ni6=font.render('White food results on slower speed and deduction of 3 points',True,(250,0,0))\r\n\r\nf6=pygame.font.SysFont('A score of 10 unlocks a new level',28)\r\ni7=font.render('A score of 10 unlocks a new level',True,(250,0,0))\r\n\r\n\r\n#name and logo\r\npygame.display.set_caption(\"SNEK\")\r\nlogo=pygame.image.load(\"snake.png\")\r\npygame.display.set_icon(logo)\r\n\r\n\r\n\r\n#to quit\r\n\r\nwhile True:\r\n for i in pygame.event.get():\r\n if i.type==pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n \r\n \r\n #bg\r\n screen.fill((0,0,150))\r\n screen.blit(i1,(250,50))\r\n screen.blit(i2,(0,80))\r\n screen.blit(i3,(0,100))\r\n screen.blit(i4,(0,120))\r\n screen.blit(i5,(0,140))\r\n screen.blit(i6,(0,160))\r\n screen.blit(i7,(0,180))\r\n \r\n\r\n \r\n pygame.display.update()\r\n \r\n\r\n","sub_path":"smek.py","file_name":"smek.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"475565599","text":"import numpy as np\nfrom string import *\n\n#-------------------------------------------------------\n#This function returns to values for case of match or mismatch\ndef Diagonal(n1,n2,pt):\n if(n1 == n2):\n return pt['MATCH']\n else:\n return pt['MISMATCH']\n\n#------------------------------------------------------------ \n#This function gets the optional elements of the aligment matrix and returns the elements for the pointers matrix.\ndef Pointers(di,ho,ve):\n\n pointer = max(di,ho,ve) #based on python default maximum(return the first element).\n\n if(di == pointer):\n return 'D'\n elif(ho == pointer):\n return 'H'\n else:\n return 'V' \n\ndef NW(s1,s2,match = 1,mismatch = 0, gap = -5):\n penalty = {'MATCH': match, 'MISMATCH': mismatch, 'GAP': gap} #A dictionary for all the penalty valuse.\n n = len(s1) + 1 #The dimension of the matrix columns.\n m = len(s2) + 1 #The dimension of the matrix rows.\n al_mat = np.zeros((m,n),dtype = int) #Initializes the alighment matrix with zeros.\n p_mat = np.zeros((m,n),dtype = str) #Initializes the alighment matrix with zeros.\n #Scans all the first rows element in the matrix and fill it with \"gap penalty\"\n for i in range(m):\n al_mat[i][0] = penalty['GAP'] * i\n p_mat[i][0] = 'V'\n #Scans all the first columns element in the matrix and fill it with \"gap penalty\"\n for j in range (n):\n al_mat[0][j] = penalty['GAP'] * j\n p_mat [0][j] = 'H'\n #Fill the matrix with the correct values.\n\n p_mat [0][0] = 0 #Return the first element of the pointer matrix back to 0.\n for i in range(1,m):\n for j in range(1,n):\n di = al_mat[i-1][j-1] + Diagonal(s1[j-1],s2[i-1],penalty) #The value for match/mismatch - diagonal.\n ho = al_mat[i][j-1] + penalty['GAP'] #The value for gap - horizontal.(from the left cell)\n ve = al_mat[i-1][j] + penalty['GAP'] #The value for gap - vertical.(from the upper cell)\n al_mat[i][j] = max(di,ho,ve) #Fill the matrix with the maximal value.(based on the python default maximum)\n p_mat[i][j] = Pointers(di,ho,ve)\n print (np.matrix(al_mat))\n print (np.matrix(p_mat))\ns1=input(\"enter strin1\")\ns2=input(\"enter strin2\")\nNW(s1,s2)\n","sub_path":"global.py","file_name":"global.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"579922214","text":"from django import template\nfrom django.utils.html import format_html\n\nregister = template.Library()\nfrom porzotokApp import models\n@register.filter(name='hotel_image')\n\ndef hotel_image(value):\n\tem = models.Image.objects.filter(image_galary_details_id__image_galary_details_id = int(value))\n\tif len(em) != 0:\n\t\timg = em[0].Image\n\telse:\n\t\timg = \"\"\n\treturn img\n\n@register.filter(name='hotel_name_slug')\ndef hotel_name_slug(value):\n\tname = str(value).replace(' ', '-')\n\treturn name\n\n@register.filter(name='space_remove')\ndef space_remove(value):\n\tname = str(value).replace(' ', '-').replace(\"'\", \"\")\n\treturn name\n\n\n@register.filter(name='hotel_all_image')\ndef hotel_all_image(value):\n\tem = models.Image.objects.filter(image_galary_details_id__image_galary_details_id = int(value))\n\treturn em\n\n@register.filter(name='hotel_facilities')\ndef hotel_facilities(value):\n\tfacilities = models.HotelFacilites.objects.filter(hotel_id__hotel_id = int(value))\n\treturn facilities\n\n@register.filter(name='hotel_min_price')\ndef hotel_min_price(value):\n\ttry:\n\t\troom_price = min([each.price_id.offer_price for each in models.Room.objects.filter(hotel_id=value)])\n\texcept:\n\t\troom_price = 0\n\t\n\treturn room_price\n\n@register.filter(name='GetRooms')\ndef GetRooms(cart_id):\n\troom_info = []\n\ttry:\n\t\troom = models.RoomCartDetails.objects.get(cart_id__cart_id=int(cart_id))\n\n\texcept:\n\t\troom = []\n\n\treturn room\n@register.filter(name='GetRoomNumber')\ndef GetRoomNumber(cart_id):\n\troom_info = []\n\ttry:\n\t\troom = models.RoomCartDetails.objects.get(cart_id__cart_id=int(cart_id))\n\t\troom_no = room.room_id.room_no\n\texcept:\n\t\troom_no = \"\"\n\n\treturn room_no\n\n# total review count\n@register.filter(name='total_review_count')\ndef total_review_count(value):\n\ttry:\n\t\treview_count = len([each.review for each in models.Review.objects.filter(hotel_id=value)])\n\texcept:\n\t\treview_count = 0\n\treturn review_count\n\n# average rating count\n@register.filter(name='average_rating_count')\ndef average_rating_count(value):\n\ttry:\n\t\trating = [each.rating for each in models.Review.objects.filter(hotel_id=value)]\n\t\ttotal_rating = 0\n\t\tfor rate in rating:\n\t\t\ttotal_rating += rate\n\t\tavg_rating = round(total_rating / len(rating))\n\texcept:\n\t\tavg_rating = 0\n\treturn avg_rating\n\n# start rating range display\n@register.filter(name='range')\ndef filter_range(start, end): \n\treturn range(start, end)","sub_path":"porzotokProject/porzotokApp/templatetags/hotel_info.py","file_name":"hotel_info.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"492110783","text":"#! /usr/bin/python\n# coding:utf-8\nimport socket\nimport sys\n\nif __name__ == '__main__':\n try:\n \t s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error as msg:\n print('Failed to create socket')\n print('Error code: ' + str(msg[0]) + ',Error message: ' + str(msg[1]))\n sys.exit()\n print('Socket created')\n HOST = ''\n PORT = 8080\n try:\n s.bind((HOST, PORT)) \n except socket.error as msg:\n print('Bind failed.')\n print('Error code: ' + str(msg[0]) + ',Error Message: ' + str(msg[1]))\n sys.exit()\n print('Bind Complete')\n s.listen(2)\n print('Port is now listening')\n conn, addr = s.accept()\n print('Connection with ' + addr[0] + ':' + str(addr[1]))\n \n data = conn.recv(1024) \n if (len(data) >= 7):\n reply = ''\n else:\n reply = ''\n \n conn.sendall( reply.encode('utf-8'))\n conn.close()\n s.close()\n\n \n","sub_path":"s6.py","file_name":"s6.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"54457898","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nfrom CesarCipher import encrypt\nimport unittest\n\n\n''' simple test '''\n\nclass TestCesar(unittest.TestCase):\n \n \n def test(self):\n self.assertEqual(encrypt(4, \"A\"), [\"Ć\"])\n\n \nif __name__ == '__main__':\n unittest.main(exit=False)","sub_path":"CesarCipher_test.py","file_name":"CesarCipher_test.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"552214688","text":"\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass Scraper:\n \"\"\" Clase para extraer la información necesaria, en este caso del diario: El Comercio \"\"\"\n\n def __init__(self, url):\n self.url = url\n\n def main_scraper(self):\n page = requests.get(self.url)\n datos = BeautifulSoup(page.content, 'lxml')\n research = datos.find_all('div', class_ = 'story-item__information-box w-full') \n return research\n\n def header_scraper(self):\n titulares = []\n for dato in self.main_scraper():\n titulares.append(dato.h2.a.text)\n return titulares\n\n def description_scraper(self):\n descripcion = []\n for dato in self.main_scraper():\n descripcion.append(dato.p.text)\n return descripcion\n\n def link_scraper(self):\n links = []\n for dato in self.main_scraper():\n links.append('https://elcomercio.pe' + dato.h2.a['href'])\n return links","sub_path":"comercio_scraper.py","file_name":"comercio_scraper.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"562690008","text":"from typing import DefaultDict\nfrom PyQt5 import QtWidgets, QtCore, uic, QtGui, QtPrintSupport\nfrom pyqtgraph import PlotWidget, plot\nfrom PyQt5.uic import loadUiType\nfrom PyQt5.QtWidgets import * \nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom os import path\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport matplotlib.pyplot as plot\nimport librosa \nfrom pydub import AudioSegment\nfrom tempfile import mktemp\nimport librosa.display\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport imagehash\nfrom PyQt5.QtWidgets import QTableWidget \nimport pylab\nfrom comparee import compare\n\nMAIN_WINDOW,_=loadUiType(path.join(path.dirname(__file__),\"main.ui\"))\n\nclass MainApp(QMainWindow,MAIN_WINDOW):\n \n def __init__(self,parent=None):\n super(MainApp,self).__init__(parent)\n QMainWindow.__init__(self)\n self.setupUi(self)\n self.similarties={}\n self.Buttons= [self.Browse1 , self.Browse2 , self.Identify]\n self.Buttons[2].setDisabled(True) \n self.Table= [self.song_1,self.song_2,self.song_3,self.song_4,self.song_5,self.song_6,self.song_7,self.song_8,self.song_9,self.song_10]\n self.songs= [None,None]\n self.outMix= None\n self.Buttons[0].clicked.connect(lambda : self.readSong(1) )\n self.Buttons[1].clicked.connect(lambda : self.readSong(2) )\n self.Buttons[2].clicked.connect(self.songMixer)\n\n\n def readSong(self,songNumber):\n fileName= QFileDialog.getOpenFileName( self, 'choose the signal', os.getenv('HOME') ,\"mp3(*.mp3)\" ) \n self.path = fileName[0] \n \n if self.path ==\"\" :\n return\n modifiedAudio = AudioSegment.from_file( self.path , format=\"mp3\") # read mp3\n wname = mktemp('.wav') # use temporary file\n modifiedAudio.export(wname, format=\"wav\") # convert to wav\n\n if songNumber == 1:\n self.firstSongName.setText(os.path.splitext(os.path.basename(self.path))[0])\n self.song1Data,self.samplingFrequency1 = librosa.load(wname,sr=22050 ,mono=True ,offset=0.0 ,duration=60)\n \n self.songs[0]= self.song1Data\n self.Buttons[2].setDisabled(False) \n print(\"Song1 read \")\n #print(self.song1Data)\n\n elif songNumber == 2 :\n self.secondSongName.setText(os.path.splitext(os.path.basename(self.path))[0])\n self.song2Data,self.samplingFrequency2 =audioData,samplingFreq = librosa.load(wname,sr=22050 ,mono=True ,offset=0.0 ,duration=60)\n self.songs[1]= self.song2Data\n self.Buttons[2].setDisabled(False) \n print(\"song2 read\")\n\n for i in range (10):\n self.Table[i].clear()\n \n def songMixer(self) :\n sliderRatio = self.mixerSlider.value()/100\n\n if (self.songs[0] is not None) and (self.songs[1] is not None):\n self.outMix = self.songs[0] * sliderRatio + self.songs[1] * (1-sliderRatio)\n \n else:\n if self.songs[0] is not None : self.outMix = self.songs[0]\n if self.songs[1] is not None: self.outMix = self.songs[1]\n \n self.spectrogram()\n\n def spectrogram (self):\n \n \n Spectro_Path = 'mixSpectrogram.png'\n pylab.axis('off') # no axis\n pylab.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[]) # Remove the white edge\n D = librosa.amplitude_to_db(np.abs(librosa.stft(self.outMix)), ref=np.max)\n librosa.display.specshow(D, y_axis='linear')\n pylab.savefig(Spectro_Path, bbox_inches=None, pad_inches=0)\n pylab.close()\n self.features()\n\n def features (self): \n \n #mfcc \n pylab.axis('off') \n pylab.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[])\n SavePath = 'mfcc.png'\n feature1= librosa.feature.mfcc(y=self.outMix, sr=self.samplingFrequency1)\n #print(feature1)\n Image1=Image.fromarray(feature1)\n Hash1=imagehash.phash(Image1,hash_size=16)\n Hash1=str(Hash1)\n #print(Hash1)\n librosa.display.specshow(feature1.T,sr=self.samplingFrequency1 )\n pylab.savefig(SavePath, bbox_inches=None, pad_inches=0)\n pylab.close()\n\n #melspectrogram\n pylab.axis('off') \n pylab.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[]) \n SavePath ='melspectrogram.png'\n feature2= librosa.feature.melspectrogram(y=self.outMix, sr=self.samplingFrequency1)\n #print (feature2)\n Image2=Image.fromarray(feature2)\n Hash2=imagehash.phash(Image2,hash_size=16)\n Hash2=str(Hash2)\n #print(Hash2)\n librosa.display.specshow(feature2.T,sr=self.samplingFrequency1 )\n pylab.savefig(SavePath, bbox_inches=None, pad_inches=0)\n pylab.close()\n self.generateFingerprintUser(Hash1,Hash2)\n def generateFingerprintUser(self,featureHash1,featureHash2):\n\n SongHashesUser = {\"mfccHash\":'',\"melSpectrogramHash\" : \"\"}\n SongHashesUser[\"mfccHash\"] = str(featureHash1)\n SongHashesUser[\"melSpectrogramHash\"] = str(featureHash2)\n print(SongHashesUser[\"mfccHash\"])\n print(SongHashesUser[\"melSpectrogramHash\"])\n self.similarties=compare(SongHashesUser)\n print(self.similarties)\n self.Tableconstruct()\n\n def Tableconstruct(self):\n for i in range(len(self.Table)):\n self.Table[i].setText(str(self.similarties[i][0])+'--->Similarty index='+str(self.similarties[i][1])+'%')\n #print(self.similarties[i][i])\n \n \ndef main():\n app = QApplication(sys.argv)\n window = MainApp()\n window.show()\n sys.exit(app.exec_())\n\n\nif __name__=='__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"532057565","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom tinymce.models import HTMLField\n\n\nclass Mentor(models.Model):\n user = models.OneToOneField(\n User,\n on_delete=models.PROTECT,\n verbose_name=\"Пользователь\",\n )\n\n status = models.CharField(\n max_length=200,\n default=\"\",\n blank=True,\n verbose_name=\"Статус\",\n )\n\n bio = models.TextField(\n default=\"\",\n blank=True,\n verbose_name=\"Биография\",\n )\n\n class Meta:\n verbose_name = \"Преподаватель\"\n verbose_name_plural = \"Преподаватели\"\n\n def __str__(self):\n return f\"{self.__class__.__name__} <{self.user}>\"\n\n\nclass Student(models.Model):\n user = models.OneToOneField(\n User,\n on_delete=models.PROTECT,\n verbose_name=\"Пользователь\",\n )\n\n status = models.CharField(\n max_length=200,\n default=\"\",\n blank=True,\n verbose_name=\"Статус\",\n )\n\n bio = models.TextField(\n default=\"\",\n blank=True,\n verbose_name=\"Биография\",\n )\n\n class Meta:\n verbose_name = \"Студент\"\n verbose_name_plural = \"Студенты\"\n\n def __str__(self):\n return f\"{self.__class__.__name__} <{self.user}>\"\n\n\nclass Category(models.Model):\n title = models.CharField(\n max_length=150,\n default=\"\",\n verbose_name=\"Название\",\n )\n\n class Meta:\n verbose_name = \"Категория\"\n verbose_name_plural = \"Категории\"\n\n def __str__(self):\n return f\"{self.__class__.__name__} <{self.title}>\"\n\n\nclass Course(models.Model):\n responsible = models.ForeignKey(\n Mentor,\n on_delete=models.PROTECT,\n verbose_name=\"Отвественный\",\n related_name=\"courses\",\n )\n category = models.ForeignKey(\n Category,\n on_delete=models.SET_NULL,\n null=True,\n verbose_name=\"Категория\",\n related_name=\"courses\",\n )\n\n title = models.CharField(\n max_length=150,\n default=\"\",\n verbose_name=\"Название\",\n )\n description = HTMLField(\n default=\"\",\n blank=True,\n verbose_name=\"Описание курса\",\n )\n\n class Meta:\n verbose_name = \"Курс\"\n verbose_name_plural = \"Курс\"\n\n def __str__(self):\n return f\"{self.__class__.__name__} <{self.title}>\"\n\n\nclass Lesson(models.Model):\n mentor = models.ForeignKey(\n Mentor,\n on_delete=models.PROTECT,\n verbose_name=\"Преподаватель\",\n related_name=\"lessons\",\n )\n course = models.ForeignKey(\n Course,\n on_delete=models.SET_NULL,\n null=True,\n verbose_name=\"Курс\",\n related_name=\"lessons\",\n )\n tags = models.ManyToManyField(\n \"Tag\",\n verbose_name=\"Тэг\",\n related_name=\"lessons\",\n )\n\n title = models.CharField(\n max_length=150,\n default=\"\",\n verbose_name=\"Название\",\n )\n description = HTMLField(\n default=\"\",\n blank=True,\n verbose_name=\"Описание урока\",\n )\n text = HTMLField(\n default=\"\",\n blank=True,\n verbose_name=\"Текст занятия\",\n )\n\n class Meta:\n verbose_name = \"Занятие\"\n verbose_name_plural = \"Занятия\"\n\n def __str__(self):\n return f\"{self.__class__.__name__} <{self.title}>\"\n\n\nclass TagQuerySet(models.QuerySet):\n def popular(self):\n return self.annotate(\n lessons_count=models.Count('lessons')\n ).order_by('-lessons_count')\n\n\nclass Tag(models.Model):\n title = models.CharField(\n max_length=150,\n default=\"\",\n verbose_name=\"Название\",\n )\n\n objects = TagQuerySet.as_manager()\n\n class Meta:\n verbose_name = \"Тег\"\n verbose_name_plural = \"Теги\"\n\n def __str__(self):\n return f\"{self.__class__.__name__} <{self.title}>\"\n\n\nclass Schedule(models.Model):\n lesson = models.ForeignKey(\n Lesson,\n on_delete=models.CASCADE,\n verbose_name=\"Занятие\",\n related_name=\"schedules\",\n )\n\n student = models.ForeignKey(\n Student,\n on_delete=models.CASCADE,\n verbose_name=\"Студент\",\n related_name=\"schedules\",\n )\n\n lesson_time = models.DateTimeField(\n verbose_name=\"Дата занятия\",\n )\n\n class Meta:\n verbose_name = \"Расписание\"\n verbose_name_plural = \"Расписание\"\n\n def __str__(self):\n return (f\"{self.__class__.__name__} <{self.lesson}> <{self.student}> \"\n f\"at {self.lesson_time}\")\n\n","sub_path":"education/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"97859086","text":"# Pytest\nimport pytest\n\n# Werkzeug\nfrom werkzeug.test import Client\n\n# Retic\nfrom retic import App as app, Router\n\nPATHS = [\n (\"/\"),\n]\nPATHS_ROUTES = [\n (\"/endpoint\")\n]\nPATHS_SLASH = [\n (\"/examples/\")\n]\n\nCONTROLLERS = [\n # Check if the value is like as bytes, return string\n lambda req, res: res.ok({u'type': req.body.type, u'value': req.body.value if not isinstance(\n req.body.value, bytes) else req.body.value.decode(\"utf-8\")})\n]\n\n\n@pytest.fixture\ndef app_without_client():\n \"\"\"Clear the app\"\"\"\n app.clear()\n \"\"\"Returns an app client with routes\"\"\"\n _router = Router()\n for _path in PATHS+PATHS_SLASH+PATHS_ROUTES:\n \"\"\"define a new path using the response from a path definition\"\"\"\n _router \\\n .get(_path, *CONTROLLERS) \\\n .post(_path, *CONTROLLERS)\n app.use(_router)\n return app\n\n\n@pytest.mark.lib\ndef test_config_attribute():\n \"\"\"Clear the configuration\"\"\"\n app.config.clear()\n \"\"\"Check that the variables aren't exists\"\"\"\n assert app.config.get(\"APP_LANG\") == None\n assert app.config.get(\"APP_HOSTNAME\") == None\n \"\"\"we include a basic setting to app\"\"\"\n app.config.set(\"APP_LANG\", \"en_US\")\n app.config.set(\"APP_HOSTNAME\", \"localhost\")\n \"\"\"Check if this value was saved in the object\"\"\"\n assert app.config.get(\"APP_LANG\") == \"en_US\", \\\n \"The value from the configuration item is different to value saved\"\n assert app.config.get(\"APP_HOSTNAME\") == \"localhost\", \\\n \"The value from the configuration item is different to value saved\"\n\n\n@pytest.mark.lib\ndef test_config_attribute():\n \"\"\"Clear the configuration\"\"\"\n app.config.clear()\n \"\"\"We don't include a basic setting to app and check if this value wasn't saved in the object\"\"\"\n assert app.config.get(\"APP_LANG\") == None\n\n\n@pytest.mark.lib\ndef test_config_from_object():\n \"\"\"Clear the configuration\"\"\"\n app.config.clear()\n \"\"\"Check that the variables aren't exists\"\"\"\n assert app.config.get(\"APP_LANG\") == None\n \"\"\"Set the settings from an object\"\"\"\n app.config.from_object({u'APP_LANG': \"en_US\"})\n \"\"\"check if this value exists\"\"\"\n assert app.config.get(\"APP_LANG\") == \"en_US\", \\\n \"The value from the configuration item is different to value saved\"\n\n@pytest.mark.lib\ndef test_config_clear():\n \"\"\"Set the settings from an object\"\"\"\n app.config.from_object({u'APP_LANG': \"en_US\"})\n \"\"\"check if this value exists\"\"\"\n assert app.config.get(\"APP_LANG\") == \"en_US\", \\\n \"The value from the configuration item is different to value saved\"\n \"\"\"Clear the configuration\"\"\"\n app.config.clear()\n \"\"\"Check that the variables aren't exists\"\"\"\n assert app.config.get(\"APP_LANG\") == None\n\n\n\"\"\"Test about main App\"\"\"\n\n\n@pytest.mark.lib_api\n@pytest.mark.parametrize(\"path\", PATHS_ROUTES)\ndef test_request_clear_app(app_without_client, path):\n _app = Client(app_without_client.application)\n \"\"\"get a request when the app has routes\"\"\"\n app_iter, status, headers = _app.get(path)\n assert status.upper() == \"200 OK\"\n \"\"\"we clear the information of App\"\"\"\n app_without_client.clear()\n \"\"\"get a request when the app hasn't routes\"\"\"\n app_iter, status, headers = _app.get(path)\n assert status.upper() == \"404 NOT FOUND\"\n","sub_path":"test/test_lib_retic.py","file_name":"test_lib_retic.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"234754750","text":"from tkinter import *\r\nimport smtplib\r\nimport keys\r\nen_keys = keys.encypt_keys\r\n\r\n\r\ndef encrypt(text,key):\r\n code=list(text)\r\n for i in range(len(code)):\r\n code[i]=en_keys[key][code[i]]\r\n return(''.join(code))\r\n\r\n\r\ndef action():\r\n s = smtplib.SMTP('smtp.gmail.com' , 587)\r\n s.starttls()\r\n s.login('aj56.virus@gmail.com','bmdztsphkbixzyii')\r\n #apppwd for gmail: bmdztsphkbixzyii\r\n key= key_type.get()\r\n to = to_type.get()\r\n subject= encrypt(subject_type.get(),key)\r\n body= encrypt(body_type.get(),key)\r\n s.sendmail('aj56.virus@gmail.com',to ,'Subject:' + subject + '\\n\\n' + body)\r\n\r\ntop = Tk() \r\ntop.title('Send Mail')\r\ntop.geometry(\"400x300\") \r\n \r\nTo_label = Label(top, text = \"Recipient Email:\").place(x = 30,y = 50) \r\nSubject_label = Label(top, text = \"Subject:\").place(x = 30, y = 90) \r\nBody_label = Label(top, text = \"Body:\").place(x = 30, y = 130) \r\nKey_label = Label(top, text = \"Key(1-6):\").place(x = 30, y = 170) \r\n\r\n\r\nto_type = StringVar()\r\nsubject_type = StringVar()\r\nbody_type = StringVar()\r\nkey_type = IntVar()\r\n \r\nto_entry = Entry(textvariable = to_type , width = \"25\") \r\nsub_entry = Entry(textvariable = subject_type, width = \"25\") \r\nbody_entry = Entry(textvariable = body_type, width = \"25\") \r\nkey_entry = Entry(textvariable = key_type, width = \"25\").place(x=160,y=170)\r\n\r\n\r\nto_entry.place(x=160,y=50)\r\nsub_entry.place(x=160,y=90)\r\nbody_entry.place(x=160,y=130)\r\n\r\n\r\nsbmitbtn = Button(top, text = \"Submit\",background = \"pink\", foreground = \"blue\", command= action).place(x = 170, y = 210) \r\n \r\ntop.mainloop() \r\n","sub_path":"gcrypt main.py","file_name":"gcrypt main.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"197030451","text":"# Import only those methods \n# which are mentioned below, this way of \n# importing methods is efficient \nfrom tkinter import Tk, mainloop, LEFT, TOP \nfrom tkinter.ttk import *\n\n# Creating tkinter window with fixed geometry \nroot = Tk() \nroot.geometry('250x150') \n\n# This will create a LabelFrame \nlabel_frame = LabelFrame(root, text = 'This is Label Frame') \nlabel_frame.pack(expand = 'True', fill = 'both') \n\n# Buttons \nbtn1 = Button(label_frame, text = 'Button 1') \nbtn1.place(x = 30, y = 10) \nbtn2 = Button(label_frame, text = 'Button 2') \nbtn2.place(x = 130, y = 10) \n\n# Checkbuttons \nchkbtn1 = Checkbutton(label_frame, text = 'Checkbutton 1') \nchkbtn1.place(x = 30, y = 50) \nchkbtn2 = Checkbutton(label_frame, text = 'Checkbutton 2') \nchkbtn2.place(x = 30, y = 80) \n\n# This creates infinite loop which generally \n# waits for any interrupt (like keyboard or \n# mouse) to terminate \nmainloop() \n","sub_path":"mysql.connector.py","file_name":"mysql.connector.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"63044195","text":"from datetime import datetime, timedelta\n\nfrom lme.prices import get_latest_values_date, last_weeks\n\n\ndef test_prices_last_week_content(capsys):\n fim = get_latest_values_date()\n\n dia_semana = fim.isoweekday()\n semana_numero = fim.strftime(\"%U\")\n\n if semana_numero == \"00\":\n fim = datetime.now() - timedelta(days=fim.isoweekday())\n\n if dia_semana == 1:\n fim = datetime.now() - timedelta(days=3)\n\n last_weeks(1)\n captured = capsys.readouterr()\n assert f'Semana do ano: {semana_numero}' in captured.out\n assert f'Fim: {fim.strftime(\"%d-%m-%Y\")}' in captured.out\n","sub_path":"tests/test_prices.py","file_name":"test_prices.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"334565201","text":"# Bool\nimport aobjects as obj\nfrom . import types\n\ndef bool_init(self :obj.AILObject, v :obj.AILObject):\n if not isinstance(v, obj.AILObject):\n vv = False if not v else True\n else:\n if v['__class__'].otype in (types.I_INT_TYPE, types.I_FLOAT_TYPE):\n vv = False if not v['__value__'] else True\n elif v['__class__'].otype == types.I_STR_TYPE:\n vv = False if not v['__value__'] else True\n elif v == obj.null:\n vv = False\n else:\n vv = True\n\n self['__value__'] = vv\n\n\ndef bool_eq(self :obj.AILObject, o :obj.AILObject) -> obj.AILObject:\n return obj.ObjectCreater.new_object(BOOL_TYPE, o)\n\n\ndef bool_str(self :obj.AILObject):\n return '%s' % ('false' if not self['__value__'] else 'true')\n\n\nBOOL_TYPE = obj.AILObjectType('', types.I_TYPE_TYPE,\n __init__=bool_init,\n __eq__=bool_eq,\n __str__=bool_str)\n","sub_path":"core/objects/bool.py","file_name":"bool.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"105386154","text":"# model.py\n\nfrom torch import nn\nimport models\nimport losses\n\n\n# custom weights initialization called on netG and netD\n# def weights_init(m):\n# classname = m.__class__.__name__\n# if classname.find('Conv') != -1:\n# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n# m.weight.data.normal_(0, math.sqrt(2. / n))\n# elif classname.find('BatchNorm') != -1:\n# m.weight.data.fill_(1)\n# m.bias.data.zero_()\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\nclass Model:\n def __init__(self, args):\n self.ndf = args.ndf\n self.nef = args.nef\n self.cuda = args.cuda\n self.wkld = args.wkld\n self.gbweight = args.gbweight\n self.nlatent = args.nlatent\n self.nechannels = args.nechannels\n self.ngchannels = args.ngchannels\n \n def setup(self, checkpoints):\n model = {}\n criterion = {}\n #model[\"netE\"] = models.dcgan._netE(self.nechannels, self.nlatent, self.nef) #i have commented this line\n # self.nlatent is length of z vector. Adding 1 to it for gbtt\n model[\"netG\"] = models.dcgan._netG(self.ngchannels, self.ndf, self.nlatent)\n #criterion[\"netG\"] = losses.ge #i have commented this line\n #criterion[\"netE\"] = losses.vae_loss(self.wkld, self.gbweight) #I have commented this line\n\n criterion[\"netG\"] = losses.vae_loss(self.wkld, self.gbweight) #i have added this line\n\n if self.cuda:\n #model['netE'] = nn.DataParallel(model['netE']).cuda() #i have commented this line\n model['netG'] = nn.DataParallel(model['netG']).cuda() #I have commented this line\n #criterion[\"netE\"] = criterion[\"netE\"].cuda() #I have commented this line\n criterion[\"netG\"] = criterion[\"netG\"].cuda() #I have changed netE to netG in the RHS\n #criterion[\"netG\"] = losses.vae_loss(self.wkld, self.gbweight)\n\n models_to_resume = checkpoints.latest('resume')\n for name, net in model.items():\n if name in models_to_resume:\n tmp = checkpoints.load(name)\n net.load_state_dict(tmp)\n else:\n net.apply(weights_init)\n\n return model, criterion\n","sub_path":"Final_Project/scripts/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"322254897","text":"#\nfrom __future__ import (absolute_import, division,\n print_function) #, unicode_literals)\nimport decimal\nfrom collections import namedtuple\n\ntry:\n import urllib.parse as urlparselib\nexcept ImportError: #above is python 3\n import urllib as urlparselib\ntry:\n unicode\nexcept NameError: #no word 'unicode' in python 3\n unicode = str\nclass DummyClass:\n pass\ntry:\n from bson.objectid import ObjectId\nexcept ImportError:\n ObjectId = DummyClass\n\nimport inspect\nimport json\n#from jsonweb import encode #,decode\n\nimport os\nimport datetime\nfrom collections import OrderedDict\n\nimport sys\nimport types\nPY3k = sys.version_info[0] == 3\n\nif PY3k:\n basestring = (str, bytes)\n _iteritems = \"items\"\nelse:\n basestring = basestring\n _iteritems = \"iteritems\"\n\ndef items(d): #this is to become deprecated!\n return getattr(d, _iteritems)()\n\n\nclass ObjDictError(Exception):\n def __init__(self, message, **extras):\n super(Exception, self).__init__(self, message)\n self.extras = extras\n\nclass JsonDecodeError(ObjDictError):\n \"\"\"\n Raised when python containers (dicts and lists) cannot be decoded into\n complex types. These exceptions are raised from within an ObjectHook\n instance.\n \"\"\"\n def __init__(self, message, **extras):\n ObjDictError.__init__(self, message, **extras)\n\nclass ObjectNotFoundError(JsonDecodeError):\n def __init__(self, obj_type):\n JsonDecodeError.__init__(\n self,\n \"Cannot decode object {0}. No such object.\".format(obj_type),\n obj_type=obj_type,\n )\n\n#-----------------------------------------------------------\n# class registry\n#-----------------------------------------------------------\n\n\nclass FakeFloat(float):\n\n def __init__(self, value):\n self._value = value\n\n def __repr__(self):\n return str(self._value)\n\nfrom collections import namedtuple #can we used ObjDict instead? circular!\nClsEnc = namedtuple('ClsEnc', 'cls enc')\n\ndef type_str(obj):\n return '_'+obj.__class__.__name__ +':' +str(obj)\n\nclass ClassRegistry:\n \"\"\" The Encoder (for to json) table is in two parts.\n exact matches, and 'isinstance' matches.\n classes can have an encoder for the exact class,\n or for any class which matches an isinstance test,\n or separate encoders for both cases\n\n inst_table is a list of ClsEnc tuples\n exact_table is a dictionary of inst_tables where all classes in each \n inst_table have the same name\n \"\"\"\n\n def __init__(self):\n self.exact_table = {}\n self.inst_table = []\n self.add_to(decimal.Decimal, FakeFloat)\n self.add_to(datetime.datetime, str)\n self.add_to(datetime.date, str)\n self.add_to(datetime.timedelta, str)\n self.add_to(ObjectId, type_str)\n\n def add_to(self, cls, encoder, subclasses=False):\n\n if subclasses:\n lst = inst_table\n else:\n name = cls.__name__\n if not name in self.exact_table:\n self.exact_table[name] = []\n lst = self.exact_table[name]\n\n for entry in lst:\n if entry.cls == cls:\n entry.enc = encoder\n break\n else:\n lst.append(ClsEnc(cls=cls, enc=encoder))\n\n\n def find(self, cls):\n \"\"\" search exact match or isinstance match\"\"\"\n lst = self.exact_table.get(cls.__name__, [])\n for entry in lst:\n if entry.cls == cls:\n return entry\n\n for entry in self.inst_table:\n if isinstance(cls, entry.cls):\n return entry\n\n return None\n\n def do_method(self, entry, obj):\n return entry.enc(obj)\n\n\njson_registry = ClassRegistry()\nClassEntry = namedtuple(\"ClassEntry\", \"cls handler\")\n\nclass _ClassRegister(object):\n def __init__(self):\n self._classes = {}\n self._deferred_updates = {}\n\n def add_class(self, cls, handler, type_name=None):\n name = type_name or cls.__name__\n self._classes[name] = ClassEntry(cls, handler)\n\n def get(self, name):\n \"\"\"\n Get a handler tuple. Return None if no such handler.\n \"\"\"\n return self._classes.get(name)\n\n def set(self, name, handler_tuple):\n \"\"\"\n Add a handler tuple (handler, cls, schema)\n \"\"\"\n self._classes[name] = handler_tuple\n\n def clear(self):\n self._classes = {}\n self._deferred_updates = {}\n\n def update_handler(self, name, cls=None, handler=None, schema=None):\n \"\"\"\n Modify cls, handler and schema for a decorated class.\n \"\"\"\n handler_tuple = self._classes[name]\n self.set(name, self.__merge_tuples((handler, cls, schema),\n handler_tuple))\n\n def xupdate_handler_deferred(self, name, cls=None,\n handler=None, schema=None):\n \"\"\"\n If an entry does not exist in _classes an entry will be added to\n __deferred_updates instead. Then when add_handler is finally called\n values will be updated accordingly. Items in __deferred_updates will\n take precedence over those passed into add_handler.\n \"\"\"\n if name in self._classes:\n self.update_handler(name, cls, handler, schema)\n return\n d = self.__deferred_updates.get(name, (None,)*3)\n self.__deferred_updates[name] = self.__merge_tuples(\n (handler, cls, schema), d)\n\n def copy(self):\n handler_copy = _ClassRegister()\n [handler_copy.set(n, t) for n, t in self]\n return handler_copy\n\n def __merge_tuples(self, a_tuple, b_tuple):\n \"\"\"\n \"Merge\" two tuples of the same length. a takes precedence over b.\n \"\"\"\n if len(a_tuple) != len(b_tuple):\n raise ValueError(\"Iterators differ in length.\")\n return tuple([(a or b) for a, b in zip(a_tuple, b_tuple)])\n\n def __contains__(self, handler_name):\n return handler_name in self._classes\n\n def __getitem__(self, handler):\n return self._classes[handler]\n\n def __iter__(self):\n for name, handler_tuple in self._classes.items():\n yield name, handler_tuple\n\n_default_class_register = _ClassRegister()\n\n\n#------------------------------------------------------------------------\n# from json: loading\n#------------------------------------------------------------------------\n\nclass ObjPairHook(object):\n \"\"\"\n This class encapsulates the object decode mechanism used to create or\n recreate classes fom json text files.\n\n An instance of an ObjPairHook provides a decode_pairs() method.\n This method checks for a __type__ specified from the json data.\n If their is no __type__ then the 'BaseHook' object is used to instance\n an object from the Ojbect Pairs. The BaseHook can be a class or any other callable.\n If the BaseHook has a 'from_json' property then this 'from_json' method will\n be called, otherwise the BaseHook will be called directly.\n\n The 'DefaultType' is actually a default base class for the case where the\n __type__ is specified, but does not correspond to a class in the list of\n classes.\n Pairs hook uses two class lists. The\n process the data, otherwise Hool does most of the work in managing the handlers that decode the\n json into python class instances. You should not need to use this class\n directly. :func:`make_pairs_hook` is responsible for instantiating and using it.\n \"\"\"\n\n def __init__(self, classes_list=[], BaseHook=None,\n BaseType=None):\n #self.classes = classes_list\n self.BaseHook = BaseHook\n self.DefaultType = BaseType\n handle_key = '__from_json__'\n\n if classes_list:\n _class_register = _default_class_register.copy()\n for cls in classes_list:\n name = cls.__name__\n if name in _class_register:\n _class_register.update_class(name, **handler_dict)\n else:\n _class_register.add_class(\n each_class,\n getattr(cls, handle_key, cls), name\n )\n else:\n _class_register = _default_class_register\n\n self.classes = _class_register\n\n\n def from_json(self, obj):\n \"\"\"\n This method is called for every dict decoded from a json string. The\n presence of the key ``__type__`` in ``obj`` will trigger a lookup in\n ``self.handlers``. If a handler is not found for ``__type__`` then an\n :exc:`ObjectNotFoundError` is raised. If a handler is found it will\n be called with ``obj`` as it only argument. If an :class:`ObjectSchema`\n was supplied for the class, ``obj`` will first be validated then passed\n to handler. The handler should return a new python instant of type ``__type__``.\n \"\"\"\n if isinstance(obj, list):\n dobj = OrderedDict(obj)\n if \"__type__\" not in dobj:\n if self.BaseHook:\n return self.BaseHook(obj)\n else:\n #consider override if no baseHook to ObjDict\n return dobj\n obj = dobj\n elif \"__type__\" not in obj:\n if self.BaseHook:\n # do factory with BaseHook entry?\n return self.BaseHook(obj)\n return obj\n #else:\n # dobj=obj #should not apply with pairs hook\n obj_type = obj.pop(\"__type__\") # does not do much!\n try:\n cls, factory = self.classes[obj_type]\n except KeyError:\n if self.DefaultType:\n #print(\"doing this type\")\n #could do: factory,cls,schema ...?\n ThisType = type(str(obj_type), (self.DefaultType, ),\n {'_set_type':True})\n return ThisType(obj)\n raise ObjectNotFoundError(obj_type)\n\n try:\n #print(\"fact ret\",cls)\n return factory(obj)\n except KeyError as e:\n raise ObjectAttributeError(obj_type, e.args[0])\n\n\n\n#_default_object_handlers = _ObjectHandlers()\n\ndef make_kwargs(cls, args):\n return cls(**ObjDict(args))\n\ndef from_json(type_name=None, use=None):\n \"\"\"\n Decorating a class with :func:`from_object` adds\n will allow :func:`json.loads`\n to return instances of that class, embeded within the object returned.\n\n The class can contain a 'from_json' class method that will receives\n a list of object pairs as a parameter, and return an instance of the class.\n\n This class method will normally perform any validation on the 'pairs' data\n (key, value list) that is decoded from json and provided as a paramter,\n extract the relevant data and pass that to the class init method.\n\n If no '__from_json__' class method is present, then handling depends\n on the the 'use' paramater. If use is the defalt (None) the standard\n 'make_kwargs' routine is used to init the class or if use is,\n True, the 'pairs' key/value\n list decoded from jsons will be provide as a parameter to the class init method\n The third alternative is to provide a custom method to decode from json::\n\n use=\n\n Here is an\n example using __from__json__ ::\n\n >>> from objdict import ObjDict, from_factoryobject, loader\n >>> @classmethod\n >>> def __from_json__(cls, pairlist):\n ... obj = ObjDict(pairlist)\n ... return cls( obj.first_name, obj.last_name )\n ...\n >>> @from_object()\n ... class Person(object):\n ... def __init__(self, first_name, last_name):\n ... self.first_name\n ... self.last_name = last_name\n ...\n >>> person_json = '{\"__type__\": \"Person\", \"first_name\": \"Shawn\", \"last_name\": \"Adams\"}'\n >>> person = loader(person_json)\n >>> person\n \n >>> person.first_name\n 'Shawn'\n\n The ``__type__`` key in the json text is very important.\n Without it the 'object_pairs_hook'\n will simply treat the data as a generic ObjDict object/dictionary.\n\n By default\n :func:`ObjPairHook.decode` assumes ``__type__`` will be the class's ``__name__``\n attribute. You can specify your own value by setting the ``type_name``\n keyword argument ::\n\n @from_object( type_name=\"PersonObject\")\n\n Which means the json string would need to be modified to look like this::\n\n '{\"__type__\": \"PersonObject\", \"first_name\": \"Shawn\", \"last_name\": \"Adams\"}'\n \"\"\"\n handle_key='__from_json__'\n\n def wrapper(cls):\n wrap_use = getattr(cls, handle_key, use)\n if wrap_use == True:\n wrap_use = cls\n elif wrap_use == None:\n def wrapkwargs(cls):\n def callkwargs(args):\n return make_kwargs(cls, args)\n return callkwargs\n wrap_use = wrapkwargs(cls)\n\n _default_class_register.add_class(\n cls, wrap_use, type_name\n )\n return cls\n return wrapper\n\n\ndef make_pairs_hook(classes=None, BaseHook=None, DefaultBase=None):\n \"\"\"\n Wrapper to generate :class:`ObjectHook`. Calling this function will configure\n an instance of :class:`ObjectHook` and return a callable suitable for\n passing to :func:`json.loads` as ``object_pairs_hook``.\n\n Dictionaries/objects without a ``__type__``\n key are encoded as ObjDict objects ::\n\n >>> json_str = '{\"obj\":{\"inside\": \"value\"}}'\n >>> loader(json_str)\n {\"obj\":obnj dict\n >>> # lists work too\n >>> json_str = '''[\n ... {\"first_name\": \"bob\", \"last_name\": \"smith\"},\n ... {\"first_name\": \"jane\", \"last_name\": \"smith\"}\n ... ]'''\n >>> loader(json_str, as_type=\"Person\")\n [, ]\n\n .. note::\n\n Assumes every object a ``__type__`` kw is ObjDict\n\n ``handlers`` is an ObjDict with this format::\n\n {\"Person\": {\"cls\": Person, \"handler\": person_decoder, \"schema\": PersonSchema)}\n\n If you do not wish to decorate your classes with :func:`from_json` you\n can specify the same parameters via the ``classes`` keyword argument.\n Here is an example::\n\n >>> class Person(object):\n ... def __init__(self, first_name, last_name):\n ... self.first_name = first_name\n ... self.last_name = last_name\n ...\n >>> def person_decoder(cls, obj):\n ... return cls(obj[\"first_name\"], obj[\"last_name\"])\n\n >>> handlers = {\"Person\": {\"cls\": Person, \"handler\": person_decoder}}\n >>> person = loader(json_str, handlers=handlers)\n >>> # Or invoking the object_hook interface ourselves\n >>> person = json.loads(json_str, object_pairs_hook=make_pairs_hook(handlers))\n\n .. note::\n\n If you decorate a class with :func:`from_json` you can specify\n a list of classes to use\n \"\"\"\n\n return ObjPairHook(classes, BaseHook, DefaultBase).from_json\n\n\ndef loads(json_str, **kw):\n \"\"\"\n Call this function as you would call :func:`json.loads`. It wraps the\n :ref:`make_pairs_hook` interface and returns python class instances from JSON\n strings.\n\n :param ensure_type: Check that the resulting object is of type\n ``ensure_type``. Raise a ValidationError otherwise.\n :param handlers: is a dict of handlers. see :func:`make_pairs_hook`.\n :param as_type: explicitly specify the type of object the JSON\n represents. see :func:`make_pairs_hook`\n :param validate: Set to False to turn off validation (ie dont run the\n schemas) during this load operation. Defaults to True.\n :param kw: the rest of the kw args will be passed to the underlying\n :func:`json.loads` calls.\n\n\n \"\"\"\n\n #object_pairs_hook=ObjDict,\n # DefaultType=DefaultType, parse_float=decimal.Decimal\n\n baseword = \"object_pairs_hook\"\n\n #hookword= baseword if baseword in kw else \"object_hook\"\n kw[baseword] = make_pairs_hook(\n kw.pop(\"handlers\", None),\n kw.pop(baseword, kw.pop(\"object_hook\", ObjDict)),\n kw.pop(\"DefaultType\", ObjDict)\n )\n\n #ensure_type = kw.pop(\"ensure_type\", _as_type_context.top)\n\n #print(\"kw dict\",kw)\n\n try:\n obj = json.loads(json_str, **kw)\n except ValueError as error:\n #import pdb; pdb.set_trace()\n print(\"error txt\",error, error.args[0])\n raise JsonDecodeError(error.args[0])\n\n # if ensure_type:\n # return EnsureType(ensure_type).validate(obj)\n return obj\n\n##--------------------------------------------------------------------\n##--------------------------------------- to json\n##--------------------------------------------------------------------\n\n\nclass EncodeArgs:\n __type__ = None\n serialize_as = None\n handler = None\n suppress = None\n\n\ndef std__json__(self, enc=None, internal=False, **kw):\n \"\"\"\n ---note self reference not updated obj is self!\n\n Handles encoding instance objects of classes decorated by\n :func:`to_json`. Returns a dict containing all the key/value pairs\n in ``obj.__dict__``. Excluding attributes that\n\n * start with an underscore.\n * were specified with the ``suppress`` keyword argument.\n\n The returned dict will be encoded into JSON.\n\n .. note::\n\n Override this method if you wish to change how ALL objects are\n encoded into JSON objects.\n\n \"\"\"\n if not internal:\n return dumps(self, cls=ObjectEncoderAll, **kw)\n encode = self.__json__encode\n suppress = encode.suppress\n\n if enc and enc._exclude_nulls is not None:\n exclude_nulls = enc._exclude_nulls\n else:\n exclude_nulls = encode.exclude_nulls\n\n json_obj = {}\n\n def suppressed(key):\n return key in suppress or (enc and key in enc._hard_suppress)\n \n l1 = dir(self)\n if isinstance(self, dict):\n l2 = l1\n l1 = [i for i in self.keys()]\n for attr in l1:\n if attr == '__type__' or (\n not attr.startswith(\"_\") and not suppressed(attr)):\n value = getattr(self, attr)\n if value is None and exclude_nulls:\n continue\n if not isinstance(value, types.MethodType):\n json_obj[attr] = value\n if not suppressed(\"__type__\"):\n tname = self.__class__.__name__ # encode.__type__\n if tname not in ('ObjDict',):\n json_obj[\"__type__\"] = tname\n return json_obj\n\ndef to_json(cls_type=None, suppress=None, handler=None, exclude_nulls=False):\n \"\"\"\n Decorateor. To make your class instances JSON encodable decorate them with\n :func:`to_object`. The python built-in :py:func:`dir` is called on the\n class instance to retrieve key/value pairs that will make up the JSON\n object (*Minus any attributes that start with an underscore or any\n attributes that were specified via the* ``suppress`` *keyword argument*).\n\n Here is an example::\n\n >>> from jsonweb import to_object\n >>> @to_object()\n ... class Person(object):\n ... def __init__(self, first_name, last_name):\n ... self.first_name = first_name\n ... self.last_name = last_name\n\n >>> person = Person(\"Shawn\", \"Adams\")\n >>> dumper(person)\n '{\"__type__\": \"Person\", \"first_name\": \"Shawn\", \"last_name\": \"Adams\"}'\n\n A ``__type__`` key is automatically added to the JSON object. Its value\n should represent the object type being encoded. By default it is set to\n the value of the decorated class's ``__name__`` attribute. You can\n specify your own value with ``cls_type``::\n\n >>> from jsonweb import to_object\n >>> @to_object(cls_type=\"PersonObject\")\n ... class Person(object):\n ... def __init__(self, first_name, last_name):\n ... self.first_name = first_name\n ... self.last_name = last_name\n\n >>> person = Person(\"Shawn\", \"Adams\")\n >>> dumper(person)\n '{\"__type__\": \"PersonObject\", \"first_name\": \"Shawn\", \"last_name\": \"Adams\"}'\n\n If you would like to leave some attributes out of the resulting JSON\n simply use the ``suppress`` kw argument to pass a list of attribute\n names::\n\n >>> from jsonweb import to_object\n >>> @to_object(suppress=[\"last_name\"])\n ... class Person(object):\n ... def __init__(self, first_name, last_name):\n ... self.first_name = first_name\n ... self.last_name = last_name\n\n >>> person = Person(\"Shawn\", \"Adams\")\n >>> dumper(person)\n '{\"__type__\": \"Person\", \"first_name\": \"Shawn\"}'\n\n You can even suppress the ``__type__`` attribute ::\n\n @to_object(suppress=[\"last_name\", \"__type__\"])\n ...\n\n Sometimes it's useful to suppress ``None`` values from your JSON output.\n Setting ``exclude_nulls`` to ``True`` will accomplish this ::\n\n >>> from jsonweb import to_object\n >>> @to_object(exclude_nulls=True)\n ... class Person(object):\n ... def __init__(self, first_name, last_name):\n ... self.first_name = first_name\n ... self.last_name = last_name\n\n >>> person = Person(\"Shawn\", None)\n >>> dumper(person)\n '{\"__type__\": \"Person\", \"first_name\": \"Shawn\"}'\n\n .. note::\n\n You can also pass most of these arguments to :func:`dumper`. They\n will take precedence over what you passed to :func:`to_object` and\n only effects that one call.\n\n If you need greater control over how your object is encoded you can\n specify a ``handler`` callable. It should accept one argument, which is\n the object to encode, and it should return a dict. This would override the\n default object handler :func:`JsonWebEncoder.object_handler`.\n\n Here is an example::\n\n >>> from jsonweb import to_object\n >>> def person_encoder(person):\n ... return {\"FirstName\": person.first_name,\n ... \"LastName\": person.last_name}\n ...\n >>> @to_object(handler=person_encoder)\n ... class Person(object):\n ... def __init__(self, first_name, last_name):\n ... self.guid = 12334\n ... self.first_name = first_name\n ... self.last_name = last_name\n\n >>> person = Person(\"Shawn\", \"Adams\")\n >>> dumper(person)\n '{\"FirstName\": \"Shawn\", \"LastName\": \"Adams\"}'\n\n\n You can also use the alternate decorator syntax to accomplish this. See\n :func:`jsonweb.encode.handler`.\n\n \"\"\"\n def wrapper(cls):\n if not hasattr(cls, '__json__'):\n cls.__json__ = std__json__\n encode = EncodeArgs()\n encode.serialize_as = \"json_object\"\n #cls._encode.handler = handler\n encode.suppress = suppress or []\n encode.exclude_nulls = exclude_nulls\n encode.__type__ = cls_type or cls.__name__\n cls.__json__encode = encode\n return cls #__inspect_for_handler(cls)\n return wrapper\n\n#--------------------------------------------\n# Encode + dumps\nstd_e_args = object()\n#std_e_args.\n\nclass JsonEncoder(json.JSONEncoder):\n \"\"\"\n This :class:`json.JSONEncoder` subclass is responsible for encoding\n instances of classes that have been decorated with :func:`to_json` or\n :func:`to_list`. Pass :class:`JsonWebEncoder` as the value for the\n ``cls`` keyword argument to :func:`json.dump` or :func:`json.dumps`.\n\n Example::\n\n json.dumps(obj_instance, cls=JsonWebEncoder)\n\n Using :func:`dumper` is a shortcut for the above call to\n :func:`json.dumps` ::\n\n dumper(obj_instance) #much nicer!\n\n\n object to be encoded should have:\n __json__encode property, which has encode arguments\n\n \"\"\"\n\n _DT_FORMAT = \"%Y-%m-%dT%H:%M:%S\"\n _D_FORMAT = \"%Y-%m-%d\"\n\n def __init__(self, **kw):\n self._hard_suppress = kw.pop(\"suppress\", [])\n self._exclude_nulls = kw.pop(\"exclude_nulls\", None)\n self._handlers = kw.pop(\"handlers\", {})\n if not isinstance(self._hard_suppress, list):\n self._hard_suppress = [self._hard_suppress]\n json.JSONEncoder.__init__(self, **kw)\n\n def default(self, o):\n # if o.__class__.__name__ not in ('DataModel','Decimal','date',\n # 'datetime','ObjectId'): #== 'RawClass':\n # pass # placeholder for debug\n if hasattr(o, '__json__'):\n e_args = getattr(o, \"__json__encode\", std_e_args)\n\n # Passed in handlers take precedence.\n if o.__class__ in self._handlers: # was e_args.__type__ ...why (29/11/2016)\n assert False, \"never gets here in test\"\n return self._handlers[e_args.__type__](o)\n elif getattr(e_args, 'handler', None):\n assert False, \"doesn't get here either\"\n if e_args.handler_is_instance_method:\n return getattr(o, e_args.handler)()\n return e_args.handler(o)\n elif getattr(e_args, 'serialize_as', 'json_object') == \"json_object\":\n #return self.object_handler(o)\n return o.__json__(internal=True)\n elif e_args.serialize_as == \"json_list\":\n return self.list_handler(o)\n\n # if isinstance(o, datetime.datetime):\n # return o.strftime(self._DT_FORMAT)\n # if isinstance(o, datetime.date):\n # return o.strftime(self._D_FORMAT)\n\n encoder = json_registry.find(o.__class__)\n if encoder:\n return json_registry.do_method(encoder, o)\n\n return json.JSONEncoder.default(self, o)\n\n def encode(self, o):\n #import pdb; pdb.set_trace()\n return super(JsonEncoder, self).encode(o)\n\n\ndef list_handler(self, obj):\n \"\"\"\n Handles encoding instance objects of classes decorated by\n :func:`to_list`. Simply calls :class:`list` on ``obj``.\n\n .. note::\n\n Override this method if you wish to change how ALL objects are\n encoded into JSON lists.\n\n \"\"\"\n return list(obj)\n\nclass ObjectEncoderStd(JsonEncoder):\n def encode(self, o):\n if isinstance(o, ObjDict):\n if o._set_type:\n o['__type__'] = o.__class__.__name__\n res = super(ObjectEncoderStd, self).encode(o)\n pass\n return res\n return super(ObjectEncoderStd, self).encode(o)\n def default(self, o):\n return super(ObjectEncoderStd, self).default(o)\n\n#class ObjectEncoderAll(json.JSONEncoder):\nclass ObjectEncoderAll(ObjectEncoderStd): #JsonEncoder):\n def default(self, o):\n try:\n return super(ObjectEncoderAll, self).default(o)\n except TypeError as e:\n return str(o)\n\n\n\n#----------------------------------------------\n#------- ObjDict class\n#----------------------------------------------\n\n@to_json()\nclass Holder:\n def __init__(self,value,method):\n self.method = method\n self.value = value\n\n def __call__(self, *args, **kwargs):\n return self.method(*args, **kwargs)\n\n def __get__(self, obj, ibjtype):\n import pdb; pdb.set_trace()\n return self.value\n\n def __set__(self, obj, value):\n self.value = value\n\n def __json__(self, *args, **kwargs):\n return str(self.value)\n\n@to_json() #note handled by dict encoder\n@from_json()\nclass ObjDict(OrderedDict):\n _DICT_RESERVED =set(('items','keys','get','copy', 'dumps', 'from_json', 'fromkeys', 'get', 'items', 'keys',\n 'loads', 'make_pairs_hook', 'move_to_end', 'pop', 'popitem', 'setdefault',\n 'to_json', 'update', 'values','_DICT_RESERVED'))\n _DICT_RESERVED_NON_CALLS =set(('_DICT_RESERVED',))\n to_json = to_json\n from_json = from_json\n make_pairs_hook = make_pairs_hook\n JsonEncoder = ObjectEncoderAll\n loads=loads\n JsonDecodeError = JsonDecodeError\n\n #_set_type=False # can be overriden in instance\n def __init__(self,*args,**kwargs):\n set_type= kwargs.pop('_set_type',None)\n self._skipped_attrs = [] # data where key prevents being an attr\n self._seen_attrs= [] #reserved attributes already processed\n self.__keys__= kwargs.pop('__keys__', getattr(self,'__keys__',[]))\n self.__decimal__= kwargs.pop('__decimal__', getattr(self,'__decimal__',False))\n \n if isinstance(self.__keys__,str):\n self.__keys__=self.__keys__.split()\n predata=[]\n if len(args)==1 and isinstance(args[0],str):\n loadkw = {}\n if self.__decimal__:\n loadkw['parse_float'] = decimal.Decimal \n predata=loads(args[0], **loadkw).items()\n args=[]\n elif self.__keys__ and len(args)== len(self.__keys__):\n predata=zip(self.__keys__,args)\n args=[]\n super(ObjDict,self).__init__(*args,**kwargs)\n if set_type is not None:\n self._set_type=set_type\n for arg in args:\n if isinstance(arg,dict):\n self.__dict__.update(**arg)\n\n # the following blocks would create duplication\n #self.__dict__.update(**kwargs)\n #self.update(**args)\n\n for key,data in predata:\n self[key]=data\n\n clnm = self.__class__.__name__ #set type for derived classes\n if clnm != 'ObjDict':\n self['__type__'] = clnm \n @property\n def _set_type(self):\n return self.__dict__.get(\n '_set_type', self.__class__.__name__ != 'ObjDict')\n @_set_type.setter\n def _set_type(self,value):\n self.__dict__['_set_type'] = value #set inst value hiding property\n\n def __setitem__(self,key,value):\n\n super(ObjDict,self).__setitem__(key,value)\n if key in self._DICT_RESERVED and key not in self._seen_attrs:\n if False: #True: #isinstance(value,ObjDict) and key not in self._DICT_RESERVED_NON_CALLS:\n #this code waiting on OBJD-42\n call = getattr(super(ObjDict,self),key)\n setattr(self,key,Holder(value,call))\n\n else:\n self._skipped_attrs.append(key)\n self._seen_attrs.append(key)\n else:\n setattr(self,key,value)\n\n def __setattr__(self,attr,value):\n super(ObjDict,self).__setattr__(attr,value)\n if attr[:1] != '_':\n super(ObjDict,self).__setitem__(attr,value)\n\n\n # def __getitem__(self, key):\n # if key == 'bms':\n # import pdb; pdb.set_trace()\n # return super(ObjDict,self).__getitem__(key)\n\n @classmethod\n def __from_json__(cls,thedict):\n \"\"\" same result as 'use=True' so makes this the default \"\"\"\n #print('in handler',cls,thedict)\n return cls(thedict)\n\n def __str__(self):\n return self.__json__()\n\n def __repr__(self):\n return self.__class__.__name__ + \": \" + self.__json__()\n\n def dumps(obj, **kw):\n \"\"\"\n note: not called 'self' as can be used as unbound class method\n JSON encode your class instances by calling this function as you would\n call :func:`json.dumps`. ``kw`` args will be passed to the underlying\n json.dumps call.\n\n :param handlers: A dict of type name/handler callable to use.\n ie {\"Person:\" person_handler}\n\n :param cls: To override the given encoder. Should be a subclass\n of :class:`JsonWebEncoder`.\n\n :param suppress: A list of extra fields to suppress (as well as those\n suppressed by the class).\n\n :param exclude_nulls: Set True to suppress keys with null (None) values\n from the JSON output. Defaults to False.\n \"\"\"\n return json.dumps(obj, cls=kw.pop(\"cls\", ObjectEncoderAll), **kw)\n\ndumps=ObjDict.dumps\n","sub_path":"objdict/objDict.py","file_name":"objDict.py","file_ext":"py","file_size_in_byte":31809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"98388645","text":"\nfrom django.core.urlresolvers import reverse\nfrom django.template import defaultfilters as filters\nfrom django.utils import http\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ungettext_lazy\n\nfrom horizon import exceptions\nfrom horizon import tables\n\nfrom openstack_dashboard import api\nfrom openstack_dashboard import policy\n\nfrom openstack_dashboard.dashboards.mydashboard.mypanel.horizon_pardnet_client import HorizonPardnetClient\nclient = HorizonPardnetClient()\n\n\nclass InstancesTable(tables.DataTable):\n name = tables.Column(\"name\", verbose_name=_(\"Name\"))\n status = tables.Column(\"status\", verbose_name=_(\"Status\"))\n zone = tables.Column('availability_zone',\n verbose_name=_(\"Availability Zone\"))\n image_name = tables.Column('image_name', verbose_name=_(\"Image Name\"))\n\n class Meta:\n name = \"instances\"\n verbose_name = _(\"Instances\")\n\nclass AddController(tables.LinkAction):\n name = \"add\"\n verbose_name = _(\"Add a Controller\")\n url = \"horizon:mydashboard:mypanel:controller:controller_add\"\n classes = (\"ajax-modal\",)\n icon = \"plus\"\n\nclass DeleteController(policy.PolicyTargetMixin, tables.DeleteAction):\n name = \"deletecontroller\"\n policy_rules = ()\n\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n u\"Delete Controller\",\n u\"Delete Controllers\",\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n u\"Scheduled deletion of Controller\",\n u\"Scheduled deletion of Controllers\",\n count\n )\n\n def allowed(self, request, datum=None):\n return True\n \n def delete(self, request, controller_id):\n client.remove_resource(\"controllers/%s\" % controller_id)\n\nclass UpdateController(policy.PolicyTargetMixin, tables.LinkAction):\n name = \"update\"\n verbose_name = _(\"Edit Controller\")\n classes = (\"ajax-modal\", \"btn-update\",)\n policy_rules = ((\"network\", \"update_pool\"),)\n\n def get_link_url(self, controller):\n base_url = reverse(\"horizon:mydashboard:mypanel:controller:controller_update\",\n kwargs={'controller_id': controller.id})\n return base_url\n\n\n\nclass ControllerTable(tables.DataTable):\n name = tables.Column(\"name\", verbose_name=_(\"Name\"))\n description = tables.Column(\"description\",verbose_name=_(\"Description\"))\n address = tables.Column(\"address\", verbose_name=_(\"IP Address\"))\n username = tables.Column(\"username\", verbose_name=_(\"Username\"))\n password = tables.Column(\"password\", verbose_name=_(\"Password\"))\n status = tables.Column(\"status\", verbose_name=_(\"Status\"))\n# admin_state_up = tables.Column(\"admin_state_up\", verbose_name=_(\"Status\"))\n\n class Meta:\n name = \"controllers\"\n verbose_name = _(\"Controllers\")\n table_actions = (AddController,DeleteController,)\n row_actions = (UpdateController,)\n\nclass AddLbswitch(tables.LinkAction):\n name = \"add\"\n verbose_name = _(\"Add a Switch\")\n url = \"horizon:mydashboard:mypanel:lbswitch:add\"\n classes = (\"ajax-modal\",)\n icon = \"plus\"\n\n\n\nclass DeleteLbswitch(policy.PolicyTargetMixin, tables.DeleteAction):\n name = \"deletelbswitch\"\n policy_rules = ()\n\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n u\"Delete LBswitch\",\n u\"Delete LBswitchs\",\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n u\"Scheduled deletion of Switch\",\n u\"Scheduled deletion of Switchs\",\n count\n )\n\n def allowed(self, request, datum=None):\n return True\n \n def delete(self, request, switch_id):\n client.remove_resource(\"switches/%s\" % switch_id)\n\nclass UpdateLbswitch(policy.PolicyTargetMixin, tables.LinkAction):\n name = \"update\"\n verbose_name = _(\"Edit Switch\")\n classes = (\"ajax-modal\", \"btn-update\",)\n policy_rules = ((\"network\", \"update_pool\"),)\n\n def get_link_url(self, switch):\n base_url = reverse(\"horizon:mydashboard:mypanel:lbswitch:update\",\n kwargs={'switch_id': switch.id})\n return base_url\n\n\n\nclass LbswitchTable(tables.DataTable):\n name = tables.Column(\"name\", verbose_name=_(\"Name\"))\n description = tables.Column(\"description\", verbose_name=_(\"Description\"))\n address = tables.Column(\"address\", verbose_name=_(\"IP Address\"))\n# controller_id = tables.Column(\"controller_id\", verbose_name=_(\"ControllerID\"))\n controller_name = tables.Column(\"controller_name\", verbose_name=_(\"ControllerName\"))\n node_type = tables.Column(\"node_type\", verbose_name=_(\"Node_type\"))\n dpid = tables.Column(\"dpid\", verbose_name=_(\"DatapathID\"))\n admin_state_up = tables.Column(\"admin_state_up\", verbose_name=_(\"Status\"))\n\n class Meta:\n name = \"lbswitch\"\n verbose_name = _(\"Switches\")\n table_actions = (AddLbswitch,DeleteLbswitch,)\n row_actions = (UpdateLbswitch,)\n\nclass AddHaproxy(tables.LinkAction):\n name = \"add\"\n verbose_name = _(\"Add Haproxy Info\")\n url = \"horizon:mydashboard:mypanel:haproxy:add\"\n classes = (\"ajax-modal\",)\n icon = \"plus\"\n\n\nclass UpdateHaproxy(policy.PolicyTargetMixin, tables.LinkAction):\n name = \"update\"\n verbose_name = _(\"Edit Haproxy Info\")\n classes = (\"ajax-modal\", \"btn-update\",)\n policy_rules = ((\"network\", \"update_pool\"),)\n\n def get_link_url(self, haproxy):\n base_url = reverse(\"horizon:mydashboard:mypanel:haproxy:update\",\n kwargs={'haproxy_id': haproxy.id})\n return base_url\n\nclass DeleteHaproxy(policy.PolicyTargetMixin, tables.DeleteAction):\n name = \"deletehaproxy\"\n policy_rules = ()\n\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n u\"Delete Haproxy\",\n u\"Delete Haproxys\",\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n u\"Scheduled deletion of Haproxy\",\n u\"Scheduled deletion of Haproxys\",\n count\n )\n\n def delete(self, request, haproxy_id):\n client.remove_resource(\"haproxies/%s\" % haproxy_id)\n\n\n\nclass HaproxyTable(tables.DataTable):\n name = tables.Column(\"name\", verbose_name=_(\"Name\"))\n description = tables.Column(\"description\",verbose_name=_(\"Description\"))\n address = tables.Column(\"address\", verbose_name=_(\"IP Address\"))\n admin_state_up = tables.Column(\"admin_state_up\", verbose_name=_(\"Status\"))\n\n class Meta:\n name = \"haproxy\"\n verbose_name = _(\"Haproxys\")\n table_actions = (AddHaproxy,DeleteHaproxy,)\n row_actions = (UpdateHaproxy,)\n\n\n\n\n\n\n\n","sub_path":"mypanel/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":6786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"468613599","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 19 22:01:53 2016\n\n@author: ronaldo\n\"\"\"\nimport numpy as np\n\nfrom itertools import *\n\n\ndef possibleBins(choices):\n \"\"\"\n choices: a non-empty list of ints\n \n Returns a list with \n \n the power set of 0's and 1's of len(choices), e.g. 2**len(choices)\n \n e.g. choices= [1,2,3] -> [0,0,0], [1,0,0], [0,1,0], [0,0,1], [1,1,0] ,\n [1,0,1], [0,1,1], [1,1,1]\n \n \"\"\"\n \n comb = [] \n \n for i in range(len(choices)):\n array = len(choices) * [0]\n for j in range(i+1):\n array[j] = 1\n comb = comb + [i for i in permutations(array)]\n\n comb = set(comb)\n \n return comb\n \n \ndef find_combination(choices, total):\n \"\"\"\n choices: a non-empty list of ints\n total: a positive int\n \n Returns result, a numpy.array of length len(choices) \n such that\n * each element of result is 0 or 1\n * sum(result*choices) == total\n * sum(result) is as small as possible\n In case of ties, returns any result that works.\n If there is no result that gives the exact total, \n pick the one that gives sum(result*choices) closest \n to total without going over.\n \"\"\"\n \n # I could find an implementation which uses this hint gave at the \n # exercise:\n # choices = np.array([bin(i)[2:] for i in choices])\n\n choices = np.array(choices)\n \n combinations = possibleBins(choices)\n \n # Initialize the best values\n maxValue = 0\n \n bestComb = np.array([0] * len(choices))\n \n \n for comb in combinations:\n \n # The purpose of this function is to maximize this value and minimize\n # the sum of true values in the bestComb array\n v = np.dot(np.array(comb),choices)\n \n # This is the constraint\n if v > total:\n next\n \n elif v < total:\n \n if v > maxValue:\n maxValue = v\n bestComb = np.array(comb)\n \n elif v == maxValue:\n # tests if current match is better\n if sum(np.array(comb)) < sum(bestComb):\n bestComb = np.array(comb)\n \n \n else: # v == total\n if maxValue == v: #There is already a match kept\n # it tests if it is a better array\n if sum(np.array(comb)) < sum(bestComb):\n bestComb = np.array(comb)\n else: # This is the best match \n maxValue = v\n bestComb = np.array(comb)\n \n return bestComb\n \n \nprint(find_combination([1,2,2,3], 4)) \n\nprint(find_combination([4, 6, 3, 5, 2], 10)) \n\nprint(find_combination([1, 3, 4, 2, 5], 16)) \n\nprint(find_combination([4, 10, 3, 5, 8], 1)) \n \n \n","sub_path":"Final/problem6.py","file_name":"problem6.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"324496394","text":"'''\n@Author: ruoru\n@Date: 2019-11-05 10:02:48\n@LastEditors: ruoru\n@LastEditTime: 2019-11-05 10:06:54\n@Description: https://leetcode-cn.com/explore/interview/card/top-interview-questions-easy/5/strings/36/\n'''\nclass Solution(object):\n def isPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n s = [c for c in s.lower() if c.isalpha() or c.isnumeric()]\n return list(reversed(s)) == s\n\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.isPalindrome(\"A man, a plan, a canal: Panama\"))\n\n ","sub_path":"explore_cn/top_interview_questions_easy/b_strings/02_strings_36.py","file_name":"02_strings_36.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"135584636","text":"import re\r\nimport os\r\nimport sys\r\ndef isfloat(value):\r\n try:\r\n float(value)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\ndef read_chi_square_distribution():\r\n # dir = os.path.split(sys.argv[0])[0]\r\n dir = os.path.dirname(os.path.realpath(__file__))\r\n file = open(dir+'/chi-square distribution.txt','r')\r\n flag = 0\r\n chi_square_conf = []\r\n chi_square_dis = dict()\r\n for line in file.readlines():\r\n line=re.split('\\t|\\n',line)\r\n if (flag == 0):\r\n for i in range(13):\r\n chi_square_conf.append(float(line[i+1]))\r\n flag = 1\r\n else:\r\n free_degree=int(line[0])\r\n temp=dict()\r\n for i in range(13):\r\n if (isfloat(line[i+1])):\r\n temp[chi_square_conf[i]]=float(line[i+1])\r\n else:\r\n temp[chi_square_conf[i]]=0.000001\r\n chi_square_dis[free_degree]=temp\r\n file.close()\r\n return chi_square_conf, chi_square_dis\r\n\r\ndef read_normal_distribution():\r\n # dir = os.path.split(sys.argv[0])[0]\r\n dir = os.path.dirname(os.path.realpath(__file__))\r\n file = open(dir + '/normal distribution.txt','r')\r\n flag = 0\r\n normal_conf = []\r\n normal_dis = dict()\r\n for line in file.readlines():\r\n line=re.split('\\t|\\n',line)\r\n if (flag == 0):\r\n for i in range(13):\r\n normal_conf.append(float(line[i]))\r\n flag = 1\r\n else:\r\n for i in range(13):\r\n normal_dis[normal_conf[i]]=float(line[i])\r\n\r\n\r\n file.close()\r\n return normal_conf, normal_dis\r\n\r\ndef read_t_distribution():\r\n dir = os.path.split(sys.argv[0])[0]\r\n file = open(dir+'/t distribution.txt','r')\r\n flag = 0\r\n t_conf = []\r\n t_dis = dict()\r\n for line in file.readlines():\r\n line=re.split('\\t|\\n',line)\r\n if (flag == 0):\r\n for i in range(11):\r\n t_conf.append(float(line[i+1]))\r\n flag = 1\r\n else:\r\n free_degree=int(line[0])\r\n temp=dict()\r\n for i in range(11):\r\n if (isfloat(line[i+1])):\r\n temp[t_conf[i]]=float(line[i+1])\r\n else:\r\n print(\"t dis error\")\r\n t_dis[free_degree]=temp\r\n file.close()\r\n return t_conf, t_dis","sub_path":"src/non_feature_based/baselines/CATD/read_distribution.py","file_name":"read_distribution.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"190619184","text":"###########################################################################\n# \n# Copyright 2019 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###########################################################################\n\n'''\nSheet Copy\n\nCopy tab from a sheet to a sheet.\n\nP\nr\no\nv\ni\nd\ne\n \nt\nh\ne\n \nf\nu\nl\nl\n \ne\nd\ni\nt\n \nU\nR\nL\n \nf\no\nr\n \nb\no\nt\nh\n \ns\nh\ne\ne\nt\ns\n.\n\n\nP\nr\no\nv\ni\nd\ne\n \nt\nh\ne\n \nt\na\nb\n \nn\na\nm\ne\n \nf\no\nr\n \nb\no\nt\nh\n \ns\nh\ne\ne\nt\ns\n.\n\n\nT\nh\ne\n \nt\na\nb\n \nw\ni\nl\nl\n \no\nn\nl\ny\n \nb\ne\n \nc\no\np\ni\ne\nd\n \ni\nf\n \ni\nt\n \nd\no\ne\ns\n \nn\no\nt\n \na\nl\nr\ne\na\nd\ny\n \ne\nx\ni\ns\nt\n.\n\n'''\n\nfrom starthinker_airflow.factory import DAG_Factory\n \nUSER_CONN_ID = \"google_cloud_default\" # The connection to use for user authentication.\nGCP_CONN_ID = \"\" # The connection to use for service authentication.\n\nINPUTS = {\n 'from_sheet': '',\n 'from_tab': '',\n 'to_sheet': '',\n 'to_tab': '',\n}\n\nTASKS = [\n {\n 'sheets': {\n 'auth': 'user',\n 'template': {\n 'sheet': {\n 'field': {\n 'name': 'from_sheet',\n 'kind': 'string',\n 'order': 1,\n 'default': ''\n }\n },\n 'tab': {\n 'field': {\n 'name': 'from_tab',\n 'kind': 'string',\n 'order': 2,\n 'default': ''\n }\n }\n },\n 'sheet': {\n 'field': {\n 'name': 'to_sheet',\n 'kind': 'string',\n 'order': 3,\n 'default': ''\n }\n },\n 'tab': {\n 'field': {\n 'name': 'to_tab',\n 'kind': 'string',\n 'order': 4,\n 'default': ''\n }\n }\n }\n }\n]\n\nDAG_FACTORY = DAG_Factory('sheets_copy', { 'tasks':TASKS }, INPUTS)\nDAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)\nDAG = DAG_FACTORY.execute()\n\nif __name__ == \"__main__\":\n DAG_FACTORY.print_commandline()\n","sub_path":"starthinker_airflow/dags/sheets_copy_dag.py","file_name":"sheets_copy_dag.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"352382334","text":"# There's no logic or real code here... just data. You need to add some code to\n# the test!\n\nalice_name = \"Alice\"\nalice_age = 20\nalice_is_drinking = True\n\nbob_name = \"Bob\"\nbob_age = 12\nbob_is_drinking = False\n\ncharles_name = \"Charles\"\ncharles_age = 22\ncharles_is_drinking = True\n","sub_path":"challenges/04-Functions/C_your_own_test.py","file_name":"C_your_own_test.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"344272167","text":"from linked_list import Node, LinkedList\n# 2-3-7\n# 3-11\n# merge to become 2-3-3-7-11\nl1 = LinkedList(Node(2))\nl2 = LinkedList(Node(3))\nl1.add_end(3)\nl1.add_end(7)\nl2.add_end(11)\n\ndef merge_sorted_linkedlists(l1, l2):\n c1 = l1.head\n c2 = l2.head\n ans = tail = Node(0)\n while c1 and c2:\n if c1.data < c2.data:\n print(c1.data)\n tail.next = c1\n c1 = c1.next\n else:\n print(c2.data)\n tail.next = c2\n c2 = c2.next\n tail = tail.next\n while c1:\n print(c1.data)\n tail.next = c1\n c1 = c1.next\n while c2:\n print(c2.data)\n tail.next = c2\n c2 = c2.next\n return ans\n\ntail = merge_sorted_linkedlists(l1, l2)\ntail = LinkedList(tail)\nprint(tail.show())","sub_path":"merge_linked_list.py","file_name":"merge_linked_list.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"256531124","text":"from flask import redirect\nfrom flask_admin.base import expose\nfrom flask_admin.menu import MenuLink\nfrom jinja2 import Markup\nfrom sqlalchemy.ext.declarative import declarative_base\nimport sqlalchemy as sa\n\nfrom . import admin as at\n\n\ndef init_admin(app):\n admin = app.extensions['admin'][0]\n mm = app.config[\"ust_models\"]\n db = app.extensions['sqlalchemy'].db\n\n class PaymentSummaryViewView(at.UstModelView, at.UstReportsMixin):\n can_create = False\n can_edit = False\n can_delete = False\n can_view_details = True\n can_export = True\n column_filters = [\n 'account',\n 'period_id',\n 'end_date',\n 'payroll_frequency',\n 'transaction_type',\n 'deduction_type',\n 'failed'\n ]\n column_exclude_list = ['payroll_frequency_id']\n\n @expose('/')\n def index_view(self):\n # Grab parameters from URL\n view_args = self._get_list_extra_args()\n print(view_args.filters)\n print(len(view_args.filters))\n if not view_args.filters:\n db = app.extensions['sqlalchemy'].db\n rs = db.engine.execute(\"\"\"\n SELECT id\n FROM ust_period\n WHERE\n end_date BETWEEN now() - interval '21 days'\n AND now() + interval '18 days';\n \"\"\")\n periods = [r['id'] for r in rs]\n target = self.get_url('.index_view',\n flt0_12=\",\".join([\"%d\" % p for p in\n periods]))\n return redirect(target)\n return at.UstModelView.index_view(self)\n\n admin.add_view(\n PaymentSummaryViewView(\n mm[\"PaymentSummaryView\"],\n db.session,\n name=\"Payment Summary\",\n category='Dues',\n )\n )\n\n class OrgRateViewView(at.UstModelView, at.UstReportsMixin):\n can_create = False\n can_edit = False\n can_delete = False\n can_view_details = True\n can_export = True\n column_searchable_list = [\n 'employment_level_label', 'payroll_frequency_label',\n 'local', 'sublocal', 'description'\n ]\n column_filters = [\n 'rate', 'employment_level_label', 'payroll_frequency_label',\n 'local', 'sublocal', 'description'\n ]\n column_exclude_list = [\n 'org_rate_id', 'grandfathered', 'employment_level_id',\n 'payroll_frequency_id', 'legacy_council', 'org_id'\n ]\n\n admin.add_view(\n OrgRateViewView(\n mm['OrgRateView'],\n db.session,\n name='Dues Rates',\n category='Dues',\n )\n )\n\n class PersonDeductionDetailViewView(at.UstModelView, at.UstReportsMixin):\n can_create = False\n can_edit = False\n can_delete = False\n can_view_details = True\n column_display_pk = True\n named_filter_urls = True\n page_size = 20\n can_count = True\n can_export = True\n rowlist_default = ['organizer', 'fieldrep']\n #collist_default = ['rating']\n collist_default = ['transaction_type']\n column_labels = dict(exclude='rating', period='Organzing Rating')\n column_editable_list = [\n \"amount\",\n \"period_count\",\n \"pay_forward\",\n \"verified\",\n ]\n column_searchable_list = [\n 'fullname',\n ]\n column_filters = [\n \"status\",\n \"relationship\",\n \"local\",\n \"sublocal\",\n \"fieldrep\",\n \"amount\",\n \"period_count\",\n \"pay_forward\",\n \"verified\",\n \"deduction_run\",\n \"account_id\",\n \"account\",\n \"transaction_type\",\n \"deduction_type\",\n \"organizer\",\n \"visit_type\",\n \"rating\",\n \"rating_date\",\n \"last_paid_date\",\n ]\n column_exclude_list = [\n \"person_deduction_id\",\n \"firstname\",\n \"midname\",\n \"lastname\",\n \"suffix\",\n \"deduction_run_id\",\n \"account_id\",\n \"account\",\n \"transaction_type\",\n \"deduction_type\",\n ]\n\n def person_id_link(self, context, model, name):\n # `self` is current administrative view\n # `context` is instance of jinja2.runtime.Context\n # `model` is model instance\n # `name` is property name\n return Markup(''\n '%(person_id)s'\n % dict(person_id=model.person_id))\n\n def person_id_raw(self, context, model, name):\n # `self` is current administrative view\n # `context` is instance of jinja2.runtime.Context\n # `model` is model instance\n # `name` is property name\n return model.person_id\n\n column_formatters = dict(person_id=person_id_link)\n column_formatters_export = dict(person_id=person_id_raw)\n\n admin.add_view(\n PersonDeductionDetailViewView(\n mm['PersonDeductionDetailView'],\n db.session,\n name='Deduction Details',\n category='Dues',\n )\n )\n admin.add_link(MenuLink('Zero Dues',\n url=('/web/persondeductiondetailview/?flt0_status_equals=A&flt1_relationship_in_list=Retiree%2CMember&flt2_amount_equals=0&flt5_deduction_type_equals=Dues'),\n category='Dues'))\n\n admin.add_link(MenuLink('Multiple Payments',\n url=('/web/persondeductiondetailview/?flt0_status_equals=A&flt1_relationship_in_list=Retiree%2CMember&flt2_period_count_greater_than=1'),\n category='Dues'))\n\n admin.add_link(MenuLink('Unverified',\n url=('/web/persondeductiondetailview/?flt0_status_equals=A&flt1_relationship_in_list=Retiree%2CMember&flt4_verified_equals=0'),\n category='Dues'))\n\n class TransactionDetailViewView(at.UstModelView, at.UstReportsMixin):\n can_create = False\n can_edit = False\n can_delete = False\n can_view_details = True\n can_export = True\n column_display_pk = True\n named_filter_urls = True\n page_size = 20\n column_searchable_list = [\n 'fullname',\n ]\n can_count = True\n can_export = True\n rowlist_default = ['local', 'sublocal', 'failure_reason']\n collist_default = ['relationship']\n column_labels = dict(exclude='Excluded', period='Dues Period')\n column_filters = [\n \"person_id\",\n \"firstname\",\n \"midname\",\n \"lastname\",\n \"suffix\",\n \"status\",\n \"relationship\",\n \"local\",\n \"sublocal\",\n \"period_id\",\n \"period\",\n \"transaction_date\",\n \"transaction_type\",\n \"deduction_type\",\n \"amount\",\n \"dues_paid\",\n \"failure_reason\",\n \"check_number\",\n \"exclude\",\n ]\n column_exclude_list = [\n \"firstname\",\n \"midname\",\n \"lastname\",\n \"suffix\",\n \"deduction_run\",\n \"transaction_id\",\n \"transaction_event_id\",\n \"transaction_failure_id\",\n \"deduction_run_id\",\n \"period_id\",\n \"org_id\",\n \"check_number\",\n ]\n\n def person_id_link(self, context, model, name):\n # `self` is current administrative view\n # `context` is instance of jinja2.runtime.Context\n # `model` is model instance\n # `name` is property name\n return Markup(''\n '%(person_id)s'\n % dict(person_id=model.person_id))\n\n def person_id_raw(self, context, model, name):\n # `self` is current administrative view\n # `context` is instance of jinja2.runtime.Context\n # `model` is model instance\n # `name` is property name\n return model.person_id\n\n column_formatters = dict(person_id=person_id_link)\n column_formatters_export = dict(person_id=person_id_raw)\n\n @expose('/')\n def index_view(self):\n # Grab parameters from URL\n view_args = self._get_list_extra_args()\n print(view_args.filters)\n print(len(view_args.filters))\n if not view_args.filters:\n db = app.extensions['sqlalchemy'].db\n rs = db.engine.execute(\n \"\"\"select id from ust_period\n where end_date BETWEEN now() - interval '21 days'\n AND now() + interval '18 days'\n \"\"\")\n periods = [r['id'] for r in rs]\n target = self.get_url('.index_view',\n flt1_period_id_in_list=\n \",\".join([\"%d\" % p for p in periods]))\n return redirect(target)\n return at.UstModelView.index_view(self)\n\n admin.add_view(\n TransactionDetailViewView(\n mm['TransactionDetailView'],\n db.session,\n name='Transaction Details',\n category='Dues',\n )\n )\n\n\ndef init_db(app):\n db = app.extensions['sqlalchemy'].db\n mm = make_models(db.engine, db.Model)\n models = app.config.setdefault('ust_models', {})\n models.update(mm)\n for k in mm.keys():\n print(\"MODEL: \", k)\n\n\ndef init_app(app):\n try:\n admin = app.extensions['admin'][0]\n except:\n admin = None\n if admin:\n init_admin(app)\n\n\ndef make_models(engine, sa_base=None):\n if sa_base is None:\n # NOTE: used when not in flask environment.\n sa_base = declarative_base()\n meta = sa_base.metadata\n meta.bind = engine\n\n class PaymentSummaryView(sa_base):\n __table__ = sa.Table('payment_summary_view',\n meta,\n sa.Column(\"id\", sa.Integer, primary_key=True),\n autoload_with=engine,\n schema=\"1\")\n\n class PersonDeductionDetailView(sa_base):\n __table__ = sa.Table('person_deduction_detail_view',\n meta,\n sa.Column('person_id', sa.Integer,\n primary_key=True),\n sa.Column('deduction_run_id', sa.Integer,\n primary_key=True),\n autoload_with=engine,\n schema=\"1\")\n\n class TransactionDetailView(sa_base):\n __table__ = sa.Table('transaction_detail_view',\n meta,\n sa.Column('transaction_event_id', sa.Integer,\n primary_key=True),\n sa.Column('transaction_id', sa.Integer,\n primary_key=True),\n autoload_with=engine,\n schema=\"1\")\n\n class OrgRateView(sa_base):\n __table__ = sa.Table('ust_org_rate_view_with_ids',\n meta,\n sa.Column('org_rate_id', sa.Integer,\n primary_key=True),\n autoload_with=engine,\n schema='public')\n\n mm = [PaymentSummaryView, OrgRateView, PersonDeductionDetailView,\n TransactionDetailView]\n\n return dict((m.__name__, m) for m in mm)\n","sub_path":"webapp/lib/ust_dues.py","file_name":"ust_dues.py","file_ext":"py","file_size_in_byte":12027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"211087684","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.views.generic.edit import CreateView\nfrom django.contrib.auth.forms import UserCreationForm\nfrom core.views import create_group, groups_list, AddMember, CreateTeam, team_index, add_team_member, join_team, index, group_index\nadmin.autodiscover()\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'new_site.views.home', name='home'),\n url(r'^task/', include('tasks.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^gmail/', include('api.urls')),\n url(r'^login/$', 'django.contrib.auth.views.login', name=\"login\"),\n url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),\n url('^register/', CreateView.as_view(\n template_name='registration/register.html',\n form_class=UserCreationForm,\n success_url='/'\n ), name='register'),\n url(r'^$', index, name='index'),\n url(r'^group/$', group_index, name='group_index'),\n url(r'^group/create/$',\n create_group, name=\"create_group\"),\n url(r'^group/list/$',\n groups_list, name=\"group_list\"),\n url(r'^add/$', AddMember.as_view(), name=\"add_member\"),\n url(r'^create_team/$',\n CreateTeam.as_view(), name=\"create_team\"),\n url(r'^team/add/$',\n add_team_member, name=\"add_team_member\"),\n url(r'^team$', team_index, name='team'),\n url(r'^join_team', join_team),\n )\n","sub_path":"gmail/mailage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"199849366","text":"def d_treasure_hunt(N, M, T, A, From_list, To_list, Weight_list):\r\n # ??: https://atcoder.jp/contests/abc035/submissions/677725\r\n from numpy import inf\r\n from scipy.sparse import csr_matrix\r\n from scipy.sparse.csgraph import dijkstra\r\n\r\n graph = csr_matrix((Weight_list, (From_list, To_list)), shape=(N, N))\r\n graph_rev = csr_matrix((Weight_list, (To_list, From_list)), shape=(N, N))\r\n forward, backward = dijkstra(graph, indices=0), dijkstra(graph_rev, indices=0)\r\n\r\n # ???k? ???/?? ????????????forward[k]/backward[k]???\r\n # ??????????k????????????????????\r\n return max(a * (T - int(f + b)) for f, b, a in zip(forward, backward, A) if f != inf and b != inf)\r\n\r\nN, M, T = [int(i) for i in input().split()]\r\nA = [int(i) for i in input().split()]\r\nFrom_list, To_list, Weight_list = [], [], []\r\nfor _ in range(M):\r\n a, b, c = map(int, input().split())\r\n From_list.append(a - 1)\r\n To_list.append(b - 1)\r\n Weight_list.append(c)\r\nprint(d_treasure_hunt(N, M, T, A, From_list, To_list, Weight_list))","sub_path":"Source Codes/AtCoder/abc035/D/4901142.py","file_name":"4901142.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"554880329","text":"from tkinter import *\r\nimport threading\r\n\r\n\r\n \r\ndef handle_click():\r\n def callback():\r\n total =sum(range(1000000000))\r\n root.on_main_thread(lambda: label.config(text=total))\r\n t = threading.Thread(target=callback)\r\n t.start()\r\n\r\nroot = Tk()\r\nButton(root, text='Add it up', command=handle_click).pack()\r\nlabel = Label(root)\r\nlabel.pack()\r\nroot.mainloop()\r\n","sub_path":"gui002.py","file_name":"gui002.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"236284181","text":"\"\"\"\n Created by Yanxing Li on 2018/5/7.\n Copyright © 2018年 WATT LEE. All rights reserved.\n \"\"\"\n\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nfrom pylab import *\n\nn = 1\n\nclass data:\n x = []\n y = []\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\"\"\"\n @brief: the differential equation\n \"\"\"\ndef func(y,x):\n return array([y[1],-(n**2*math.pi**2*y[0])])\n\"\"\"\n @brief: OdeSolve function\n @param: f derived function\n @param: a the initial value of x\n @param: b the last value of x\n @param: alpha the first value of y\n @param: m the derivative on x=a\n @param: k number of intervals\n @return: y[:,0] the value of y\n \"\"\"\ndef OdeSolve(f,a,b,alpha,m,k):\n x = linspace(a,b,k)\n yinit = array([alpha,m])\n y = odeint(f,yinit,x)\n return y[:,0]\n\n\"\"\"\n @brief: shootev\n @param: n num\n @param: a the initial value of x\n @param: b the last value of x\n @param: func the differential equation\n @param: alpha the first value of y\n @param: beta the last value of y\n @param: m1 the guessed derivative on x=a\n @param: tor tolerence\n @return: x selceted x value\n @return: y solved y value\n \"\"\"\ndef shootev(k,a,b,alpha,beta,m1,tor):\n m = []\n yb = []\n x = linspace(a,b,k)\n m.append(m1)\n y = OdeSolve(func,a,b,alpha,m[0],k)\n yb.append(y[k-1])\n if abs(yb[0]-beta) >= tor:\n m.append(m[0]*beta/yb[0])\n y = OdeSolve(func,a,b,alpha,m[1],k)\n yb.append(y[k-1])\n i = 1\n while abs(yb[i]-beta) >= tor :\n l = len(m)\n m.append(m[l-1]+(beta-yb[i])*(m[l-1]-m[l-2])/(yb[i]-yb[i-1]))\n y = OdeSolve(func,a,b,alpha,m[i+1],k)\n yb.append(y[k-1])\n i = i + 1\n datas = data(x,y)\n return datas\n else:\n datas = data(x,y)\n return datas\n\nfor i in range(1,4):\n res = shootev(1000,0,1,0,0,np.sqrt(2)*math.pi*n-0.1,0.0001)\n plt.plot(res.x,res.y,label='E ='+str(i))\n n = n + 1\nplt.xlabel('x')\nplt.ylabel('phi')\nplt.grid(True)\nplt.legend(loc=4)\nplt.title('shoot method on 1D infinite deep well mapping')\nplt.show()\n","sub_path":"其余计算物理作业/One-Dimensional Inf.py","file_name":"One-Dimensional Inf.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"25556670","text":"'''\nThis file contains helpers for loading data into the database from XML files\n'''\n\nimport defusedxml.lxml as ET\nimport logging\nimport re\nimport csv\n\nfrom io import StringIO\n\nfrom talentmap_api.common.common_helpers import ensure_date, xml_etree_to_dict\n\n\nclass XMLloader():\n\n def __init__(self, model, instance_tag, tag_map, collision_behavior=None, collision_field=None, override_loading_method=None):\n '''\n Instantiates the XMLloader\n\n Args:\n model (Class) - The model class used to create instances\n instance_tag (str) - The name of a tag which defines a new instance\n tag_map (dict) - A dictionary defining what XML tags map to which model fields\n collision_behavior (str) - What to do when a collision is detected (update or delete)\n collision_field (str) - The field to detect collisions on\n override_loading_method (Method) - This will override the normal behavior of the load function\n '''\n\n self.model = model\n self.instance_tag = instance_tag\n self.tag_map = tag_map\n self.collision_behavior = collision_behavior\n self.collision_field = collision_field\n self.override_loading_method = override_loading_method\n\n def create_models_from_xml(self, xml, raw_string=False):\n '''\n Loads data from an XML file into a model, using a defined mapping of fields\n to XML tags.\n\n Args:\n xml (str) - The XML to load; either a filepath or string\n raw_string (bool) - True if xml is a string, false (default) if it is a filepath\n\n Returns:\n list: The list of new instance ids\n list: The list of updated instance ids\n '''\n\n # A list of instances to instantiate with a bulk create\n new_instances = []\n\n # A list of updated instance id's\n updated_instances = []\n\n # Parse the XML tree\n parser = ET._etree.XMLParser(recover=True)\n\n if raw_string:\n xml = StringIO(xml)\n\n xml_tree = ET.parse(xml, parser)\n\n # Get the root node\n root = xml_tree.getroot()\n\n # Get a set of all tags which match our instance tag\n instance_tags = root.findall(self.instance_tag, root.nsmap)\n\n # If we get nothing using namespace, try without\n if len(instance_tags) == 0:\n instance_tags = [element for element in list(root.iter()) if element.tag == self.instance_tag]\n\n # For every instance tag, create a new instance and populate it\n self.last_tag_collision_field = None # Used when loading piecemeal\n for tag in instance_tags:\n if self.override_loading_method:\n self.override_loading_method(self, tag, new_instances, updated_instances)\n continue\n\n self.default_xml_action(tag, new_instances, updated_instances)\n\n # We want to call the save() logic on each new instance\n for instance in new_instances:\n instance.save()\n new_instances = [instance.id for instance in new_instances]\n\n # Create our instances\n return (new_instances, updated_instances, self.last_tag_collision_field)\n\n def default_xml_action(self, tag, new_instances, updated_instances):\n instance = self.model()\n for key in self.tag_map.keys():\n # Find a matching entry for the tag from the tag map\n item = tag.find(key, tag.nsmap)\n if item is not None:\n # If we have a matching entry, and the map is not a callable,\n # set the instance's property to that value\n if not callable(self.tag_map[key]):\n data = item.text\n if data and len(data.strip()) > 0:\n setattr(instance, self.tag_map[key], data)\n else:\n # Tag map is a callable, so call it with instance + item\n self.tag_map[key](instance, item)\n\n # Check for collisions\n if self.collision_field:\n q_kwargs = {}\n q_kwargs[self.collision_field] = getattr(instance, self.collision_field)\n self.last_tag_collision_field = getattr(instance, self.collision_field)\n collisions = type(instance).objects.filter(**q_kwargs)\n if collisions.count() > 1:\n logging.getLogger('console').warn(f\"Looking for collision on {type(instance).__name__}, field {self.collision_field}, value {getattr(instance, self.collision_field)}; found {collisions.count()}. Skipping item.\")\n return\n elif collisions.count() == 1:\n # We have exactly one collision, so handle it\n if self.collision_behavior == 'delete':\n collisions.delete()\n new_instances.append(instance)\n elif self.collision_behavior == 'update':\n # Update our collided instance\n update_dict = dict(instance.__dict__)\n del update_dict[\"id\"]\n del update_dict[\"_state\"]\n collisions.update(**update_dict)\n updated_instances.append(collisions.first().id)\n return\n elif self.collision_behavior == 'skip':\n # Skip this instance, because it already exists\n return\n else:\n new_instances.append(instance)\n else:\n # Append our instance\n new_instances.append(instance)\n\n\nclass CSVloader():\n\n def __init__(self, model, tag_map, collision_behavior=None, collision_field=None):\n '''\n Instantiates the CSVloader\n\n Args:\n model (Class) - The model class used to create instances\n tag_map (dict) - A dictionary defining what CSV column headers map to which model fields\n collision_behavior (str) - What to do when a collision is detected (update or delete)\n collision_field (str) - The field to detect collisions on\n '''\n\n self.model = model\n self.tag_map = tag_map\n self.collision_behavior = collision_behavior\n self.collision_field = collision_field\n\n def create_models_from_csv(self, csv_filepath):\n '''\n Loads data from an CSV file into a model, using a defined mapping of fields\n to CSV column titles.\n\n Args:\n csv_filepath (str) - The filepath to the CSV file to load\n\n Returns:\n list: The list of new instance ids\n list: The list of updated instance ids\n '''\n\n # A list of instances to instantiate with a bulk create\n new_instances = []\n\n # A list of updated instance id's\n updated_instances = []\n\n # Parse the CSV\n with open(csv_filepath, 'r') as csv_file:\n for line in csv.DictReader(csv_file):\n instance = self.model()\n for key in line.keys():\n # If we have a matching entry, and the map is not a callable,\n # set the instance's property to that value\n if not callable(self.tag_map[key]):\n data = line[key]\n if data and len(data.strip()) > 0:\n setattr(instance, self.tag_map[key], data)\n else:\n # Tag map is a callable, so call it with instance + item\n self.tag_map[key](instance, line[key])\n\n # Check for collisions\n if self.collision_field:\n q_kwargs = {}\n q_kwargs[self.collision_field] = getattr(instance, self.collision_field)\n collisions = type(instance).objects.filter(**q_kwargs)\n if collisions.count() > 1:\n logging.getLogger('console').warn(f\"Looking for collision on {type(instance).__name__}, field {self.collision_field}, value {getattr(instance, self.collision_field)}; found {collisions.count()}. Skipping item.\")\n continue\n elif collisions.count() == 1:\n # We have exactly one collision, so handle it\n if self.collision_behavior == 'delete':\n collisions.delete()\n new_instances.append(instance)\n elif self.collision_behavior == 'update':\n # Update our collided instance\n update_dict = dict(instance.__dict__)\n del update_dict[\"id\"]\n del update_dict[\"_state\"]\n # strip out any \"null\" values from the update dict; when we parse the CSVs we set nulls where empty\n # and this sometimes will inadvertently overwrite data we want to keep\n update_dict = {k: v for k, v in update_dict.items() if v is not None}\n collisions.update(**update_dict)\n updated_instances.append(collisions.first().id)\n continue\n elif self.collision_behavior == 'skip':\n # Skip this instance, because it already exists\n continue\n else:\n new_instances.append(instance)\n else:\n # Append our instance\n new_instances.append(instance)\n\n # We want to call the save() logic on each new instance\n for instance in new_instances:\n instance.save()\n new_instances = [instance.id for instance in new_instances]\n\n # Create our instances\n return (new_instances, updated_instances)\n\n\ndef strip_extra_spaces(field):\n '''\n Creates a function for processing a specific field by removing duplicated and\n trailing spaces during XML loading\n '''\n def process_function(instance, item):\n setattr(instance, field, re.sub(' +', ' ', item.text).strip())\n return process_function\n\n\ndef parse_boolean(field, true_values_override=None):\n '''\n Creates a function for processing booleans from a string\n '''\n def process_function(instance, item):\n true_values = [\"1\", \"True\", \"true\", \"Y\", \"T\"]\n if true_values_override:\n true_values = true_values_override\n value = False\n if item.text in true_values:\n value = True\n setattr(instance, field, value)\n return process_function\n\n\ndef parse_date(field):\n '''\n Parses date fields into datetime\n '''\n def process_function(instance, item):\n setattr(instance, field, ensure_date(item.text))\n return process_function\n\n\ndef append_to_array(field):\n '''\n Appends the item to the array field\n '''\n def process_function(instance, item):\n getattr(instance, field).append(item.text)\n return process_function\n\n\ndef get_nested_tag(field, tag, many=False):\n '''\n Creates a function to grab a nested tag\n If the many parameter is set to True, it will concatenate them into a comma\n seperated list as a string\n '''\n\n def process_function(instance, item):\n if not many:\n setattr(instance, field, item.find(tag).text)\n else:\n data = [element.text for element in list(item.iter()) if element.tag == tag]\n setattr(instance, field, \",\".join(data))\n return process_function\n","sub_path":"talentmap_api/common/xml_helpers.py","file_name":"xml_helpers.py","file_ext":"py","file_size_in_byte":11615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"204477608","text":"import cv2\nimport sys\n\nprint(sys.argv[0])\nprint('A demo program of WeChat QRCode Detector:')\ncamIdx = -1\nif len(sys.argv) > 1:\n if sys.argv[1] == \"-camera\":\n camIdx = int(sys.argv[2]) if len(sys.argv)>2 else 0\n img = cv2.imread(sys.argv[1])\nelse:\n print(\" Usage: \" + sys.argv[0] + \" \")\n exit(0)\n\n# For python API generator, it follows the template: {module_name}_{class_name},\n# so it is a little weird.\n# The model is downloaded to ${CMAKE_BINARY_DIR}/downloads/wechat_qrcode if cmake runs without warnings,\n# otherwise you can download them from https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode.\ntry:\n detector = cv2.wechat_qrcode_WeChatQRCode(\n \"detect.prototxt\", \"detect.caffemodel\", \"sr.prototxt\", \"sr.caffemodel\")\nexcept:\n print(\"---------------------------------------------------------------\")\n print(\"Failed to initialize WeChatQRCode.\")\n print(\"Please, download 'detector.*' and 'sr.*' from\")\n print(\"https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode\")\n print(\"and put them into the current directory.\")\n print(\"---------------------------------------------------------------\")\n exit(0)\n\nprevstr = \"\"\n\nif camIdx < 0:\n res, points = detector.detectAndDecode(img)\n print(res,points)\nelse:\n cap = cv2.VideoCapture(camIdx)\n while True:\n res, img = cap.read()\n if img is None:\n break\n res, points = detector.detectAndDecode(img)\n for t in res:\n if t != prevstr:\n print(t)\n if res:\n prevstr = res[-1]\n cv2.imshow(\"image\", img)\n if cv2.waitKey(30) >= 0:\n break\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n","sub_path":"modules/wechat_qrcode/samples/qrcode.py","file_name":"qrcode.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"495157508","text":"from flask import Flask, render_template, request #ovde smo dodali i requeast trebaće nam za form\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/hello\", methods=[\"POST\"]) #ovde je metod post, po defoltu je GET, tako da aplikacija očekuje da korisnik nešto preda\ndef hello():\n name = request.form.get(\"name\").capitalize() #ovde taj form iz indeksa predaj evrednost u name na kraju sam ga povećao\n surname =request.form.get(\"surname\").capitalize() #ovde sam se pravio malo važan i ubacio sam i promenljivu prezime\n return render_template(\"hello.html\", name=name, surname=surname) #tu promenljivu predaje u name i upućuje u hello.html da\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"557213630","text":"from math import pow\n\ndef sum_of_powers(n, exp=5):\n sum = 0\n n = str(n)\n for i in n:\n sum += pow(int(i), exp)\n return int(sum)\n\ntotal = 0\n\nfor i in range(9, 10000000):\n if i == sum_of_powers(i):\n total += i\n print(i)\n\nprint(total)","sub_path":"problems001to099/Problem30.py","file_name":"Problem30.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"238750318","text":"# coding:utf-8\r\n\r\nimport Tkinter as tk\r\n \r\ndef drawCircle(self, x, y, r, **kwargs):\r\n return self.create_oval(x - r, y - r, x + r, y + r, **kwargs)\r\n \r\ndef run_drawCircle(): \r\n top = tk.Tk()\r\n top.title(\"Canvas Test\")\r\n \r\n cvs = tk.Canvas(top, width=600, height=400)\r\n cvs.pack()\r\n \r\n cvs.create_line(50, 50, 50, 300)\r\n cvs.create_line(100, 50, 200, 300, fill=\"red\", dash=(4, 4), arrow=tk.LAST)\r\n \r\n cvs.create_rectangle(200, 50, 400, 200, fill=\"blue\")\r\n \r\n cvs.create_oval(450, 50, 550, 200, fill=\"green\")\r\n drawCircle(cvs, 450, 300, 50, fill=\"red\")\r\n \r\n cvs.create_polygon(200, 250, 350, 250, 350, 350, 220, 300, fill=\"yellow\")\r\n top.mainloop() \r\n\r\n\r\nfrom Tkinter import *\r\ndef test1():\r\n root=Tk()\r\n frame=Frame(root,width=150,height=350)\r\n label=Label(frame,text='Emtered :')\r\n label.pack()\r\n def enter(event):\r\n #label['text']='Entered Frame:x=%d y=%d'%(event.x,event.y)\r\n label.configure(text='Entered Frame:x=%d y=%d'%(event.x,event.y))\r\n frame.bind('',enter)\r\n frame.pack()\r\n root.mainloop()\r\n \r\ndef test2():\r\n root=Tk()\r\n for relief in [RAISED,SUNKEN,FLAT,RIDGE,GROOVE,SOLID]:\r\n f=Frame(root,borderwidth=2,relief=relief)\r\n Label(f,text=relief,width=10).pack(side=LEFT)\r\n f.pack(side=LEFT,padx=5,pady=5)\r\n root.mainloop()\r\n \r\ndef test3():\r\n root=Tk()\r\n f=Frame(root,width=200,height=110)\r\n xf=Frame(f,relief=GROOVE,borderwidth=2)\r\n Label(xf,text='You shot him!').pack(pady=10)\r\n Button(xf,text='''He's dead''',state=DISABLED).pack(side=LEFT,padx=5,pady=8)\r\n Button(xf,text='''He's good''',command=root.quit).pack(side=RIGHT,padx=5,pady=8)\r\n xf.place(relx=0.01,rely=0.0125,anchor=NW)\r\n Label(f,text='self--defind').place(relx=.06,rely=0.0125,anchor=W)\r\n f.pack()\r\n root.mainloop()\r\n \r\ndef test4():\r\n root=Tk()\r\n Label(root,text='Anagram:').pack(side=LEFT,padx=5,pady=10)\r\n e=StringVar()\r\n entry=Entry(root,width=40,textvariable=e).pack(side=LEFT)\r\n e.set('hello my name is liang qian wu')\r\n var=IntVar()\r\n rF=Frame(root,borderwidth=2)\r\n rF.pack(side=BOTTOM,padx=5,pady=5)\r\n for text,value in[('Passin fruit',1),('Apples',2),('Oranges',3)]:\r\n Radiobutton(rF,text=text,value=value,variable=var,command=lambda se=e,i=var:se.set('select: %d '%i.get())).pack(anchor=NW)\r\n var.set(3)\r\n for text,value in[('Red',1),('Tilsit',2),('Brie',3)]:\r\n Radiobutton(rF,text=text,value=value,variable=var,indicatoron=0).pack(anchor=W,fill=X,padx=18)\r\n var.set(3)\r\n root.mainloop()\r\n \r\ndef test5():\r\n root=Tk()\r\n var=IntVar()\r\n for castmember,row,col,status in[('Passin fruit',0,0,NORMAL),('Apples',0,1,NORMAL),('Oranges',1,0,DISABLED),\r\n ('Terry Jones',1,1,NORMAL)]:\r\n Checkbutton(root,text=castmember,state=status,variable=(var,castmember),\r\n anchor=NW).grid(row=row,column=col,sticky=W)\r\n root.mainloop()\r\n \r\ndef test6():\r\n root=Tk()\r\n text=Text(root,height=26,width=50)\r\n scroll=Scrollbar(root,command=text.yview)\r\n text.configure(yscrollcommand=scroll.set)\r\n text.tag_configure('bold_italics',font=('Verdane',12,'bold','italic'))\r\n text.tag_configure('big',font=('Verdane',24,'bold'))\r\n text.tag_configure('color',foreground='blue',font=('Tempus Sans ITC',14))\r\n text.tag_configure('groove',relief=GROOVE,borderwidth=2)\r\n text.tag_bind('bite','<1>',lambda e,t=text:t.insert(END,\"I'll bite your legs off!\"))\r\n text.insert(END,'something up with my banter,chaps?\\n')\r\n text.insert(END,'Four hours to bury a cat?\\n','bold_italics')\r\n text.insert(END,'Cat I call you Frank?\\n','big')\r\n text.insert(END,'Cat I call you Frank?\\n','color')\r\n text.insert(END,'Cat I call you Frank?\\n','groove')\r\n button=Button(text,text='I do live at 46')\r\n text.window_create(END,window=button)\r\n text.pack(side=LEFT)\r\n scroll.pack(fill=Y)\r\n root.mainloop()\r\n \r\ndef test7():\r\n root=Tk()\r\n canvas=Canvas(root,width=400,height=400)\r\n canvas.create_oval(10,10,100,100,fill='gray90')\r\n canvas.create_text(350,100,text='text',fill='yellow')\r\n frm=Frame(canvas,relief=GROOVE,borderwidth=2)\r\n Label(frm,text='Embedded Frame/Label').pack()\r\n canvas.create_window(285,280,window=frm,anchor=CENTER)\r\n canvas.pack()\r\n root.mainloop()\r\n \r\ndef test8():\r\n root=Tk()\r\n list=Listbox(root,width=15,height=6)\r\n #scroll=Scrollbar(root,command=list.yview)\r\n #list.configure(yscrollcommand=scroll.set)\r\n list.pack(side=LEFT)\r\n #scroll.pack(side=RIGHT,fill=Y)\r\n for item in range(30):\r\n list.insert(END,item)\r\n root.mainloop()\r\n \r\ndef test9():\r\n root=Tk()\r\n label=Label(root,text=\"h=\")\r\n scale=Scale(root,orient=VERTICAL,length=284,from_=0,to=250,tickinterval=50,command=lambda h,l=label:l.configure(text='h=%s'%h))\r\n label.pack(side=LEFT)\r\n scale.pack(side=LEFT)\r\n root.mainloop()\r\n\r\n\r\nimport time\r\n\r\nimport Tkinter as tk\r\n\r\nclass Window:\r\n \r\n def __init__(self, title='nms', width=300, height=120, staFunc=bool, stoFunc=bool):\r\n \r\n self.w = width\r\n \r\n self.h = height\r\n \r\n self.stat = True\r\n \r\n self.staFunc = staFunc\r\n \r\n self.stoFunc = stoFunc\r\n \r\n self.staIco = None\r\n \r\n self.stoIco = None\r\n \r\n self.root = tk.Tk(className=title)\r\n \r\n def center(self):\r\n \r\n ws = self.root.winfo_screenwidth()\r\n \r\n hs = self.root.winfo_screenheight()\r\n \r\n x = int( (ws/2) - (self.w/2) )\r\n \r\n y = int( (hs/2) - (self.h/2) )\r\n \r\n self.root.geometry('{}x{}+{}+{}'.format(self.w, self.h, x, y))\r\n \r\n def packBtn(self):\r\n \r\n self.btnSer = tk.Button(self.root, command=self.event, width=15, height=3)\r\n \r\n self.btnSer.pack(padx=20, side='left')\r\n \r\n btnQuit = tk.Button(self.root, text='关闭窗口', command=self.root.quit, width=15, height=3)\r\n \r\n btnQuit.pack(padx=20, side='right')\r\n \r\n def event(self):\r\n \r\n self.btnSer['state'] = 'disabled'\r\n \r\n if self.stat:\r\n \r\n if self.stoFunc():\r\n \r\n self.btnSer['text'] = '启动服务'\r\n \r\n self.stat = False\r\n \r\n self.root.iconbitmap(self.stoIco)\r\n \r\n else:\r\n \r\n if self.staFunc():\r\n \r\n self.btnSer['text'] = '停止服务'\r\n \r\n self.stat = True\r\n \r\n self.root.iconbitmap(self.staIco)\r\n \r\n self.btnSer['state'] = 'active'\r\n \r\n def loop(self):\r\n self.root.resizable(False, False) #禁止修改窗口大小\r\n \r\n self.packBtn()\r\n \r\n self.center() #窗口居中\r\n \r\n self.event()\r\n \r\n self.root.mainloop()\r\n \r\n ########################################################################\r\n \r\ndef sta():\r\n print('start.')\r\n return True\r\n\r\ndef sto():\r\n print('stop.')\r\n return True\r\n\r\n\r\ndef test():\r\n import sys, os\r\n \r\n w = Window(staFunc=sta, stoFunc=sto)\r\n \r\n w.staIco = os.path.join(sys.exec_prefix, 'DLLs\\pyc.ico')\r\n \r\n w.stoIco = os.path.join(sys.exec_prefix, 'DLLs\\py.ico')\r\n \r\n w.loop()\r\n\r\n \r\nif __name__ =='__main__':\r\n# run_drawCircle()\r\n# test1()\r\n# test2()\r\n# test3()\r\n# test4()\r\n# test5()\r\n# test6()\r\n# test7()\r\n# test8()\r\n# test9()\r\n test()\r\n","sub_path":"GUI/Tkinter/my-tkinter.py","file_name":"my-tkinter.py","file_ext":"py","file_size_in_byte":7680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"308464103","text":"from flask import (Blueprint, render_template, url_for,\n flash, redirect, request, abort)\nfrom hello_blog.users.users_forms import (SignupForm, LoginForm,\n UpdateAccount,\n DeleteAccountForm)\nfrom hello_blog.posts.posts_forms import SearchForm\nfrom hello_blog.models import User, Post, Categories, Comment\nfrom hello_blog import bcrypt\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom hello_blog.users.users_utils import save_user_image\n\n\nusers = Blueprint(\"users\", __name__)\n\n\n# create the users route where users can sign up\n@users.route(\"/signup\", methods=[\"GET\", \"POST\"])\ndef signup():\n # if the user has logged in and gets to\n # this route they will be redirected home\n if current_user.is_authenticated:\n return redirect(url_for(\"main.home\"))\n # use form created in users.forms.py\n form = SignupForm()\n # checks if the form is valid using wtf validators\n if form.validate_on_submit():\n # creates an instance of User class from form data\n # and then hashes thier password\n # and saves this user to the database in mongodb\n user = User(username=form.username.data,\n email=form.email.data)\n user.hash_password(form.password.data)\n user.save()\n flash(\"User registered, Please login now\", \"success\")\n return redirect(url_for(\"users.login\"))\n return render_template(\"users/signup.html\",\n title=\"Sign Up\",\n form=form)\n\n\n# create the route to login the user.\n@users.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n # if the user has logged in and gets to\n # this route they will be redirected home\n if current_user.is_authenticated:\n return redirect(url_for(\"posts.all_posts\"))\n form = LoginForm()\n if form.validate_on_submit():\n # Finds the user in the database by their username\n user = User.objects(\n username=form.username.data).first()\n\n # if user exists use bycrpt check passsword hashes\n # function to check the passwords match\n if user and bcrypt.check_password_hash(user.password,\n form.password.data):\n login_user(user, remember=form.remember_user.data)\n flash(\"You've been logged in successfully\", \"success\")\n return redirect(url_for(\"posts.all_posts\"))\n\n # if no user exists or wrong details lets\n # user know and directs them back to the login page\n else:\n flash(\"Login Unsuccessful. Please check login details\", \"errors\")\n return redirect(url_for(\"users.login\"))\n return render_template(\"users/login.html\",\n title=\"Login\",\n form=form)\n\n\n# create the route to logout the user.\n@users.route(\"/logout\")\ndef logout():\n logout_user()\n flash(\"You've been logged out succesfully\", \"success\")\n return redirect(url_for(\"main.home\"))\n\n\n# code from stack overflow to stop cache\n@users.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n return response\n\n\n# Displays the users account information\n@users.route(\"/account/\")\n@login_required\ndef account(username):\n if current_user.username != username:\n abort(403)\n form = DeleteAccountForm()\n user = User.objects(username=username).first_or_404()\n return render_template(\"users/account.html\",\n user=user,\n form=form,\n title=\"Account\")\n\n\n# create the route for updating the users account\n@users.route(\"/account/update\", methods=[\"GET\", \"POST\"])\n@login_required\ndef update_account():\n form = UpdateAccount()\n # checks the form and changes the data in the database\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.email = form.email.data\n # if the user adds an image it calls the save user image\n # function from utils to upload image to cloudinary\n if form.user_image.data:\n image_url = save_user_image(form.user_image.data)\n current_user.user_image = image_url\n if form.bio.data:\n current_user.bio = form.bio.data\n current_user.save()\n return redirect(url_for(\"users.account\",\n username=current_user.username))\n\n # fills the form with the current data from the database\n elif request.method == \"GET\":\n form.username.data = current_user.username\n form.email.data = current_user.email\n if current_user.bio:\n form.bio.data = current_user.bio\n return render_template(\"users/update_account.html\",\n title=\"Update Account\",\n form=form)\n\n\n# create route to delete user\n@users.route(\"/account//delete\", methods=[\"GET\", \"POST\"])\n@login_required\ndef delete_account(username):\n if request.method == \"POST\":\n # find user in database and delete their details\n user = User.objects(username=username).first()\n posts = Post.objects(author=user)\n comments = Comment.objects(comment_author=user)\n user.delete()\n posts.delete()\n comments.delete()\n flash(\"Account deleted successfully\", \"success\")\n return redirect(url_for(\"main.home\"))\n # if the users types this route into the url it will\n # give an error so the account can only be deleted from the modal form\n # on the users account page.\n return abort(403)\n\n\n# create route to show all a perticular users posts\n@users.route(\"/posts/user/\")\n@login_required\ndef users_posts(username):\n categories = Categories.objects()\n form = SearchForm()\n page = request.args.get('page', 1, type=int)\n user = User.objects(username=username).first_or_404()\n posts = Post.objects(author=user.id).order_by(\"-date_posted\").paginate(\n page=page, per_page=4)\n return render_template(\"users/users_posts.html\",\n title=f\"{user.username}'s Posts\",\n posts=posts,\n heading=f\"{user.username}'s Posts\",\n form=form,\n categories=categories,\n user=user)\n","sub_path":"hello_blog/users/users_views.py","file_name":"users_views.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"47681320","text":"import logging\nimport flask\nfrom flask import Blueprint\nimport pycodcif\nimport os, sys\nimport json\nimport random\nfrom flask_paginate import Pagination, get_page_args\nlib_path = os.path.abspath(os.path.join('..', 'tools-barebone', 'webservice'))\nsys.path.append(lib_path)\nfrom run_app import mysql\nblueprint = Blueprint('compute', __name__, url_prefix='/compute')\n\nlogger = logging.getLogger('tools-app')\nusers = list(range(45947))\n\n\ndef get_users(offset=0, per_page=50):\n return users[offset: offset + per_page]\n\n@blueprint.route('/database', defaults={'page':1})\n@blueprint.route('/database/page/')\ndef database(page):\n page, per_page, offset = get_page_args(page_parameter='page', per_page_parameter='per_page')\n pagination_users = get_users(offset=offset, per_page=per_page)\n pagination = Pagination(page=page, per_page=per_page, total=45947, css_framework='bootstrap4')\n # onlyfiles = [f for f in os.listdir('.') if os.path.isfile(os.path.join('.', f))]\n # return str(os.listdir('./code/webservice/'))\n if page*per_page<=45947:\n data = open(\"./code/webservice/COD-selection.txt\", 'r').read().splitlines()[(page-1)*per_page: page*per_page]\n else:\n data = open(\"./code/webservice/COD-selection.txt\", 'r').read().splitlines()[(page-1)*per_page: ]\n # data = urllib.request.urlopen(\"http://www.crystallography.net/cod/result.php?format=urls&CODSESSION=oo6nu37qiglf2p9f8uioqc0jum7ivd2d\")\n # return 'lol'\n return flask.render_template('db.html', page=page, per_page=per_page, pagination=pagination, data = data)\n\n\n@blueprint.route('/process_structure/', methods=['GET', 'POST'])\ndef process_structure():\n if flask.request.method == 'POST':\n structurefile = flask.request.files['structurefile']\n fileformat = flask.request.form.get('fileformat', 'unknown')\n filecontent = structurefile.read().decode('utf-8')\n\n try:\n return \"FORMAT: {}
    CONTENT:
    {}
    \".format(fileformat, filecontent)\n except Exception:\n flask.flash(\"Unable to process the data, sorry...\")\n return flask.redirect(flask.url_for('input_data'))\n else:\n return flask.redirect(flask.url_for('compute.process_structure_example'))\n\n\n@blueprint.route('/process_example_structure/', methods=['GET', 'POST'])\ndef process_structure_example():\n if flask.request.method == 'POST':\n return \"This was a POST\"\n else:\n return \"This was a GET\"\n\n\n@blueprint.route('/validate/', methods=['GET', 'POST'])\ndef validate():\n if flask.request.method == 'POST':\n file = flask.request.files['cif']\n filename = (\"testfile_\" + str(random.randint(1, 1000000)) + \".cif\")\n file.save(filename)\n try:\n conf = {}\n for option in flask.request.form.items():\n conf[option[0]] = 1\n data, err_count, err_msg = pycodcif.parse(filename, conf)\n data[0]['err_count'] = err_count\n data[0]['err_msg'] = err_msg\n except Exception as e:\n e = str(e).replace(\"\\n\", \" \")\n error = 'Failed to parse the cif file: ' + e\n data = [{'err_msg': error, }]\n try:\n os.remove(filename)\n except Exception as e:\n e = str(e).replace(\"\\n\", \" \")\n error = 'Error: ' + e\n data = [{'err_msg': error, }]\n return json.dumps(data)\n\n\n@blueprint.route('/visualize/')\ndef visualize():\n return flask.render_template('player.html')\n\n\n@blueprint.route('/')\ndef index():\n return flask.render_template('upload.html')\n","sub_path":"cif-validator/compute/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"265979853","text":"#%%\nimport numpy as np\nfrom tensorflow import keras as k\n\ndef mnist_reshape(x):\n return x.reshape(x.shape[0], 28, 28, 1)\n\ndef cluttered_mnist():\n data = np.load('../data/spatial-transformer-tensorflow-master/data/mnist_sequence1_sample_5distortions5x5.npz')\n xtrn = mnist_reshape(data['X_train'])\n xval = mnist_reshape(data['X_valid'])\n xtst = mnist_reshape(data['X_test'])\n ytrn = np.array([[float(y == i) for i in range(10)] for y in data['y_train']])\n yval = np.array([[float(y == i) for i in range(10)] for y in data['y_valid']])\n ytst = np.array([[float(y == i) for i in range(10)] for y in data['y_test']])\n return (xtrn,xval,xtst,ytrn,yval,ytst)\n\ndef prerotated_mnist():\n mtrn = np.load('../data/mnist_rotation_new/rotated_train.npz')\n mval = np.load('../data/mnist_rotation_new/rotated_valid.npz')\n mtst = np.load('../data/mnist_rotation_new/rotated_test.npz')\n\n xtrn = mnist_reshape(mtrn['x'])\n xval = mnist_reshape(mval['x'])\n xtst = mnist_reshape(mtst['x'])\n ytrn = np.array([[float(y == i) for i in range(10)] for y in mtrn['y']])\n yval = np.array([[float(y == i) for i in range(10)] for y in mval['y']])\n ytst = np.array([[float(y == i) for i in range(10)] for y in mtst['y']])\n\n return (xtrn,xval,xtst,ytrn,yval,ytst)\n\ndef ownrotated_mnist():\n data = np.load('../data/own_rot_mnist.npz')\n xtrn = mnist_reshape(np.reshape(data['x_train'][:50000],(50000,784)))\n xval = mnist_reshape(np.reshape(data['x_train'][50000:],(10000,784)))\n xtst = mnist_reshape(np.reshape(data['x_test'],(10000,784)))\n ytrn = np.array([[float(y == i) for i in range(10)] for y in data['y_train'][:50000]])\n yval = np.array([[float(y == i) for i in range(10)] for y in data['y_train'][50000:]])\n ytst = np.array([[float(y == i) for i in range(10)] for y in data['y_test']])\n return (xtrn,xval,xtst,ytrn,yval,ytst)\n\ndef oldmnist():\n (xtrn, ytrn), (xtst, ytst) = k.datasets.mnist.load_data()\n xtrn = xtrn.reshape([xtrn.shape[0],28,28,1]) / 255\n ytrn = np.array([[float(y == i) for i in range(10)] for y in ytrn])\n return (xtrn[:50000],ytrn[:50000],xtrn[50000:],ytrn[50000:])\n\ndef mnist():\n (xtrn, ytrn), (xtst, ytst) = k.datasets.mnist.load_data()\n xtrn = xtrn.reshape([xtrn.shape[0],28,28,1]) / 255\n xtst = xtst.reshape([xtst.shape[0],28,28,1]) / 255\n ytrn = np.array([[float(y == i) for i in range(10)] for y in ytrn])\n ytst = np.array([[float(y == i) for i in range(10)] for y in ytst])\n return (xtrn,ytrn,xtst,ytst)\n\ndef cifar10():\n (xtrn, ytrnind), (xtst, ytstind) = k.datasets.cifar10.load_data()\n xtrn = xtrn / 255\n xtst = xtst / 255\n ytrn = np.array([[float(y == i) for i in range(10)] for y in ytrnind])\n ytst = np.array([[float(y == i) for i in range(10)] for y in ytstind])\n return (xtrn,ytrn,xtst,ytst)\n\n\ndata_dic = {\n 'mnist': mnist,\n 'oldmnist': oldmnist,\n 'cifar10': cifar10\n}\n","sub_path":"stn/tf/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"41998538","text":"import sqlite3\n\nDB_Name = \"unoapp/uno.db\"\n\ndef newGame(name,first_prize,second_prize):\n\tTableSchema = \"insert into games(name,first_prize,second_prize,datetime) values(?,?,?,datetime('now'))\"\n\tval = (name,first_prize,second_prize)\n\n\tconn = sqlite3.connect(DB_Name)\n\tcurs = conn.cursor()\n\t#Create Tables\n\tcurs.execute(TableSchema,val)\n\tconn.commit()\n\tTableSchema = \"SELECT id FROM games ORDER BY id DESC LIMIT 1;\"\n\tcurs.execute(TableSchema)\n\tresult = curs.fetchall()\n\tcurs.close()\n\tconn.close()\n\treturn result[0][0]\n\ndef insertScore(gameid,name,score):\n\tTableSchema = \"insert into results(gameid,name,score) values(?,?,?)\"\n\tval = (gameid,name,score)\n\n\tconn = sqlite3.connect(DB_Name)\n\tcurs = conn.cursor()\n\t#Create Tables\n\tcurs.execute(TableSchema,val)\n\tconn.commit()\n\tcurs.close()\n\tconn.close()\n\ndef getTotalScores():\n\tTableSchema = \"select name,sum(score) as 'total score' from results group by name order by sum(score) desc;\"\n\tconn = sqlite3.connect(DB_Name)\n\tcurs = conn.cursor()\n\tcurs.execute(TableSchema)\n\tresult = curs.fetchall()\n\tcurs.close()\n\tconn.close()\n\treturn result\n\ndef getAll():\n\tTableSchema = \"select * from results\"\n\tconn = sqlite3.connect(DB_Name)\n\tcurs = conn.cursor()\n\tcurs.execute(TableSchema)\n\tresult = curs.fetchall()\n\tcurs.close()\n\tconn.close()\n\treturn result\n\ndef getWinners():\n\tTableSchema = \"select * from results group by gameid order by gameid,max(score);\"\n\tconn = sqlite3.connect(DB_Name)\n\tcurs = conn.cursor()\n\tcurs.execute(TableSchema)\n\tresult = curs.fetchall()\n\tcurs.close()\n\tconn.close()\n\treturn result\n\ndef getScores():\n\tTableSchema = \"select * from v1;\"\n\ttry:\n\t\tconn = sqlite3.connect(DB_Name)\n\texcept Error as e:\n\t\tprint(e)\n\n\tcurs = conn.cursor()\n\tcurs.execute(TableSchema)\n\tresult = curs.fetchall()\n\tcurs.close()\n\tconn.close()\n\treturn result\n","sub_path":"unoapp/datafunc.py","file_name":"datafunc.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"345270805","text":"\nimport re\nimport typing\n\nimport datatypes\n\nfrom ...cli.utils import cli_name\n\n\ndef str_param(param):\n return cli_name(param).upper()\n\n\ndef str_action(action):\n name = cli_name(action)\n\n if action.parameters:\n param_string = ' ' + ' '.join(str_param(param) for param in action.parameters)\n else:\n param_string = ''\n\n return '{name}{params}'.format(name=name, params=param_string)\n\n\ndef str_description(description, capitalize=False):\n # replace parameter references in the description\n if description is None:\n return ''\n\n description = re.sub(r'@\\w+', lambda m: cli_name(m.group()[1:]).upper(), description)\n\n if description:\n if capitalize:\n description = description[0].upper() + description[1:]\n else:\n description = description[0].lower() + description[1:]\n\n return description\n\n\ndef str_type(typ):\n if hasattr(typ, '__origin__'):\n origin = typ.__origin__\n types = typ.__args__\n\n if origin is typing.Union:\n if len(types) == 1:\n return str_type(types[0])\n return '{} or {}'.format(', '.join(str_type(t) for t in types[:-1]), str_type(types[-1]))\n\n if origin in {typing.List}:\n return '{} of {}s'.format(cli_name(origin), str_type(types[0]))\n\n if origin is typing.Generic:\n if len(types) == 1:\n return '{} of {}s'.format(cli_name(origin), str_type(types[0]))\n\n return str(typ)\n\n return cli_name(typ)\n","sub_path":"applib/ui/interface/DefaultCLI/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"451638250","text":"from urllib.request import urlopen\r\nimport json\r\n\r\nkey = 'be1128a4ac78d50e4a840f42bccbff914a19cb7d'\r\n#url_pop_1990 = \"https://api.census.gov/data/1990/pep/int_charagegroups?get=POP,RACE_SEX,HISP&&for=county:071&in=state:06&AGEGRP=0&YEAR=90&key=be1128a4ac78d50e4a840f42bccbff914a19cb7d\"\r\n\r\n\r\ndef get_fips(latitude, longitude):\r\n\r\n\turl_loc = urlopen(\"https://geo.fcc.gov/api/census/block/find?latitude=\" + latitude + \"&longitude=\" + longitude + \"&format=json\")\r\n\tdata = json.loads(url_loc.read())\r\n\tif data['County'] is None:\r\n\t\treturn None\r\n\tif data['County']['FIPS'] is None:\r\n\t\treturn None\r\n\r\n\treturn (data['County']['FIPS'][:2], data['County']['FIPS'][2:])\r\n\r\ndef get_population(latitude, longitude, year = \"2018\"):\r\n\tfips = get_fips(latitude, longitude)\r\n\tif fips is None:\r\n\t\treturn None\r\n\r\n\turl = urlopen(\"https://api.census.gov/data/\" + year + \"/pep/population?get=POP&for=county:\" + fips[1] + \"&in=state:\" + fips[0] + \"&key=\" + key)\r\n\tdata = json.loads(url.read())\r\n\treturn int(data[1][0])\r\n\r\n","sub_path":"census_data.py","file_name":"census_data.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"339867327","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/krflorek/Documents/dryad/dryad_app/dryad.py\n# Compiled at: 2020-05-08 17:22:21\n# Size of source mod 2**32: 5328 bytes\nimport sys, os, re, argparse\nfrom shutil import which, copyfile\nfrom datetime import date\nimport pexpect, re, sys\n\ndef main():\n lib_path = os.path.abspath(os.path.dirname(__file__) + '/' + '../lib')\n dryad_path = os.path.abspath(os.path.dirname(__file__))\n nextflow_path = os.path.join(lib_path, 'nextflow')\n\n class MyParser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\n parser = MyParser(description='A comprehensive tree building program.')\n parser.add_argument('reads_path', type=str, help='path to the directory of raw reads in the fastq format', nargs='?', default=False)\n parser.add_argument('--output', '-o', metavar='', type=str, help='path to ouput directory, default \"dryad_results\"', default='dryad_results')\n parser.add_argument('--core-genome', '-cg', default=False, action='store_true', help='construct a core-genome tree')\n parser.add_argument('--snp', '-s', default=False, action='store_true', help='construct a SNP tree, requires a reference sequence in fasta format (-r)')\n parser.add_argument('-r', metavar='', type=str, help='reference sequence for SNP pipeline')\n parser.add_argument('-ar', default=False, action='store_true', help='detect AR mechanisms')\n parser.add_argument('--sep', metavar='sep_chars', type=str, help='dryad identifies sample names from the name of the read file by splitting the name on the specified separating characters, default \"_\"', default='_')\n parser.add_argument('--profile', type=str, choices=['docker', 'singularity'], help='specify nextflow profile, dryad will try to use docker first, then singularity')\n parser.add_argument('--config', '-c', type=str, help='Nextflow custom configureation')\n parser.add_argument('--get_config', action='store_true', help='get a Nextflow configuration template for dryad')\n parser.add_argument('--resume', default='', action='store_const', const='-resume', help='resume a previous run')\n parser.add_argument('--report', action='store_true', help='generte a pdf report')\n args = parser.parse_args()\n if args.get_config:\n config_path = os.path.join(dryad_path, 'configs/dryad_config_template.config')\n dest_path = os.path.join(os.getcwd(), date.today().strftime('%y-%m-%d') + '_dryad.config')\n copyfile(config_path, dest_path)\n sys.exit()\n if not args.reads_path:\n parser.print_help()\n print('Please specify a path to a directory containing the raw reads.')\n sys.exit(1)\n elif args.snp:\n if args.r == None:\n parser.print_help()\n print('Please specify a reference sequence for the SNP pipeline.')\n sys.exit(1)\n if which('docker'):\n profile = '-profile docker'\n elif which('singularity'):\n profile = '-profile singularity'\n else:\n profile = ''\n config = ''\n if args.config:\n config = '-C ' + os.path.abspath(args.config)\n profile = ''\n else:\n if args.profile:\n if which(args.profile):\n profile = '-profile ' + args.profile\n else:\n print(f\"{args.profile} is not installed or found in PATH.\")\n else:\n if not profile:\n print('Singularity or Docker is not installed or not found in PATH.')\n sys.exit(1)\n else:\n work = ''\n if profile:\n work = f\"-w {args.output}/logs/work\"\n selections = ''\n if args.ar:\n selections += ' --ar'\n if args.core_genome:\n selections += ' --cg'\n if args.snp:\n selections += f\" --snp --snp_reference {args.r}\"\n if args.report:\n if args.snp and args.core_genome:\n report_template_path = os.path.abspath(os.path.dirname(__file__) + '/' + '../report/report.Rmd')\n logo_path = os.path.abspath(os.path.dirname(__file__) + '/' + 'assets/dryad_logo_250.png')\n selections += f\" --report {report_template_path} --logo {logo_path}\"\n mqc_config_path = '--multiqc_config ' + os.path.join(dryad_path, 'configs/multiqc_config.yaml')\n mqc_logo_path = '--multiqc_logo ' + os.path.join(dryad_path, 'assets/dryad_logo_250.png')\n other_args = f\"--name_split_on {args.sep} --outdir {args.output}\"\n command = nextflow_path\n command = command + f\" {config} run {dryad_path}/dryad.nf {profile} {args.resume} --reads {args.reads_path} {selections} {other_args} {mqc_config_path} {mqc_logo_path} -with-trace {args.output}/logs/dryad_trace.txt -with-report {args.output}/logs/dryad_execution_report.html {work}\"\n print('Starting the Dryad pipeline:')\n child = pexpect.spawn(command)\n child.interact()","sub_path":"pycfiles/dryad-2.0.0-py3-none-any/dryad.cpython-37.py","file_name":"dryad.cpython-37.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"114974741","text":"class Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n res = ''\n for i in range(len(s)):\n odd = self.search(s, i, i)\n if len(odd) > len(res):\n res = odd\n even = self.search(s, i, i + 1)\n if len(even) > len(res):\n res = even\n return res\n\n def search(self, s, lo, hi):\n while lo >= 0 and hi < len(s) and s[lo] == s[hi]:\n lo -= 1\n hi += 1\n return s[lo + 1:hi]\n","sub_path":"leetcode_python/005_longest_palindromic_substring.py","file_name":"005_longest_palindromic_substring.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"509409142","text":"import random\np = 1\nwhile True:\n print(\"Enter a number between 0 and 10\")\n i = random.randint(0,10)\n pokusy = 0\n while True:\n gnum = int(input())\n pokusy +=1\n if gnum <= 10 :\n if gnum == i:\n print(\"YEEEY you guessed correctly :).And it took you only %s tries\" %pokusy)\n pokusy = 0\n break\n else:\n print(\"Try it again...\")\n else:\n print(\"You must enter a number between 0 and 10\")\n\n","sub_path":"Zlozite/hadanie cisla/hadanie.py","file_name":"hadanie.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"461253884","text":"import sqlite3\nfrom datetime import datetime\n\n\ndef create_batch(batch_id: str):\n conn = sqlite3.connect(\"cpq_code_compare.db\")\n sql = \"\"\" INSERT INTO BATCH_STATUS (BATCH_ID,STATUS,START_TIME,END_TIME,ERROR) VALUES (?,?,?,?,?)\"\"\"\n conn.execute(sql, (batch_id, \"In-Progress\", datetime.now(), None, None))\n conn.commit()\n print(\"Database updated successfully........\")\n conn.close()\n\n\ndef update_batch(batch_id: str, status: str, error: str = None):\n conn = sqlite3.connect(\"cpq_code_compare.db\")\n time = datetime.now()\n sql = \"\"\"Update BATCH_STATUS SET STATUS=?,END_TIME=?,ERROR=? WHERE BATCH_ID = ?\"\"\"\n conn.execute(sql, (status, time, error, batch_id))\n print(\"Database updated successfully........\")\n conn.commit()\n conn.close()\n","sub_path":"windows/db_updates/update_db.py","file_name":"update_db.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"209245146","text":"from tkinter import *\n\ntk=Tk()\n\ntext=StringVar()\nname=StringVar()\nname.set('Alex')\ntext.set('')\ntk.title('Chat')\ntk.geometry('400x300')\n\nlog = Text(tk)\nnick = Entry(tk, textvariable=name)\nmsg = Entry(tk, textvariable=text)\nmsg.pack(side='bottom', fill='x', expand='true')\nnick.pack(side='bottom', fill='x', expand='true')\nlog.pack(side='top', fill='both',expand='true')\n\ndef loopproc():\n\tlog.insert (END,'Hello '+ name.get() + '!\\n')\n\ttk.after(1000,loopproc)\n\ndef sendproc(event):\n\tlog.insert (END,name.get()+':'+text.get()+'\\n')\n\ttext.set('')\n\nmsg.bind('',sendproc)\ntk.after(1000,loopproc)\ntk.mainloop()\n\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"596152827","text":"# load json and create model\n\nfrom keras.models import model_from_json\nfrom tensorflow import keras\nfrom keras.optimizers import Adam\n\n## Replace modelname with the name of model run you wish to load\n\nname='Densenet_test'\n\njson_file = open(f'{name}', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n\n# Replace weightsname with name used to save model weights in previous run\n\nloaded_model.load_weights(f'{name}.h5')\n\nopt = Adam(lr=1e-3, decay=1e-3 / 200)\nloaded_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n\nprint(\"--------------- MODEL LOADED ---------------------\")\n\n# Compile model and predict on test data\n\n# prediction = loaded_model.predict(x=[self.X_met_train, self.X_im_train], verbose=0)\n","sub_path":"skin_lesion_detection/loadmodel.py","file_name":"loadmodel.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"322070575","text":"from io import BytesIO\nimport string\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\n\nfrom tensorflow.keras.models import load_model\n\nfrom ijazahpy.preprocessing import crop_ijazah\nfrom ijazahpy.preprocessing import to_mnist_ar, to_mnist\nfrom ijazahpy.preprocessing import remove_noise_bin\nfrom ijazahpy.preprocessing import prepare_ws_image\nfrom ijazahpy.preprocessing import prepare_for_tr\nfrom ijazahpy.preprocessing import preprocess_for_tesseract\nfrom ijazahpy.segmentation import DotsSegmentation\nfrom ijazahpy.segmentation import WordSegmentation\nfrom ijazahpy.segmentation import segment_characters\nfrom ijazahpy.unit_test import process_label\n\nimport pytesseract\n\ndef decode_file(file):\n return cv2.imdecode(np.fromstring(file.read(), np.uint8),\n cv2.IMREAD_COLOR)\n\ndef numpy_to_djfile(img_array, file=None):\n pil_img = Image.fromarray(img_array)\n thumb_io = BytesIO()\n pil_img.save(thumb_io, format='JPEG')\n file_ = InMemoryUploadedFile(thumb_io, None, file.name, 'image/jpeg', thumb_io.tell, None)\n return file_\n\ndef crop(img):\n return crop_ijazah(img)\n\ndef segment_dot_ijazah(og, val=47, dot_size=3, min_width=32):\n img = og.copy()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n dot = DotsSegmentation(rlsa_val=val)\n \n rects = dot.segment(gray, dot_size=dot_size, min_width=min_width)\n segmented_imgs = []\n model = load_model('trained_models/engchars-sgd-100-90.h5')\n for i, rect in enumerate(rects):\n x,y,w,h = rect\n segmented_img = gray[y:y+h, x:x+w]\n label = ''\n\n cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)\n \n # get label\n if x > 200 and x < 400:\n # segment from colored image. for detailEnhance process.\n label_img = og[y:y+h+10, 0:x]\n \n label_img = cv2.cvtColor(cv2.detailEnhance(label_img, sigma_s=10, sigma_r=0.15),\n cv2.COLOR_BGR2GRAY)\n \n chars = segment_characters(label_img)\n test_set = []\n for j, entry in enumerate(chars):\n box, char_img = entry[0], entry[1]\n mnist_like = to_mnist(char_img, aspect_ratio=False)\n \n test_set.append(mnist_like)\n\n test_set = np.asarray(test_set).reshape(-1, 28, 28, 1)\n predicted_y = model.predict(test_set)\n \n for prediction in predicted_y:\n label += string.ascii_letters[prediction.argmax()]\n \n segmented_imgs.append((segmented_img,\n process_label(label, metrics='ratio', tolerance=0.4),\n rect))\n\n return img, segmented_imgs\n\ndef segment_char(url, walk=False):\n img = cv2.imread(url[1:], cv2.IMREAD_GRAYSCALE)\n \n char_entries = segment_characters(img, walking_kernel=walk)\n res = []\n for entry in char_entries:\n try:\n mnist_like = to_mnist_ar(entry[1])\n res.append(mnist_like)\n except:\n continue\n return res\n\ndef segment_word(url):\n ws = WordSegmentation()\n img = cv2.imread(url[1:], cv2.IMREAD_GRAYSCALE)\n prepared_img = prepare_ws_image(img, 50)\n words = ws.segment(img)\n\n return words\n\ndef recognize_text(url, tr):\n ws = WordSegmentation()\n img = cv2.imread(url[1:], cv2.IMREAD_GRAYSCALE)\n \n prepared_img = prepare_ws_image(img, 50)\n _, prepared_img = cv2.threshold(prepared_img, \n 128, \n 255, \n cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n prepared_img = remove_noise_bin(prepared_img, 30)\n prepared_img = cv2.subtract(255,prepared_img)\n \n words = ws.segment(prepared_img)\n\n res = []\n for entry in words:\n curr_box, curr_img = entry\n\n curr_img = cv2.subtract(255,curr_img)\n\n if curr_img.shape[0] < 40 and curr_img.shape[1] < 40:\n continue\n \n curr_img = prepare_for_tr(curr_img, thresh=False)\n res.append(tr.recognize(curr_img))\n \n return res\n\ndef recognize_with_tesseract(url):\n img = cv2.imread(url[1:], cv2.IMREAD_GRAYSCALE)\n return pytesseract.image_to_string(\n preprocess_for_tesseract(img), config='--psm 7')\n\nif __name__ == '__main__':\n print(cv2.__version__)\n img = cv2.imread('G:\\\\Kuliah\\\\skripsi\\\\Project\\\\Ijazah\\\\ijazah3.jpg')\n \n entries = segment_dot_ijazah(crop_ijazah(img))\n for e in entries:\n word = e[1]\n if word != '':\n print(word)\n","sub_path":"SimpleApp/ijazah/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"140531360","text":"import torch\nimport torch.nn as nn\n\n# (conv => BN => ReLU) * 2\nclass double_conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(double_conv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n \n def forward(self, x):\n x = self.conv(x)\n return x\n\n# input conv\nclass in_conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(in_conv, self).__init__()\n self.conv = double_conv(in_ch, out_ch)\n \n def forward(self, x):\n x = self.conv(x)\n return x\n\n# down conv\nclass down(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(down, self).__init__()\n self.down_conv = nn.Sequential(\n nn.MaxPool2d(2),\n double_conv(in_ch, out_ch)\n )\n \n def forward(self, x):\n x = self.down_conv(x)\n return x\n\n# up conv\nclass up(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(up, self).__init__()\n self.up_conv = nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2)\n self.double_conv = double_conv(in_ch, out_ch)\n \n def forward(self, x, prex):\n x = self.up_conv(x)\n x = torch.cat([x, prex], dim=1)\n x = self.double_conv(x)\n return x\n\n# output conv\nclass out_conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(out_conv, self).__init__()\n self.conv = nn.Conv2d(in_ch, out_ch, 1)\n \n def forward(self, x):\n x = self.conv(x)\n return x\n\n'''\nU-Net:\n __init__()\n num_ch: num of initial feature map channel (16)\n num_fm: num of output feature map channel (16)\n forward()\n input: N x 1 x 256 x 256\n output: N x num_fm x 256 x 256\n'''\nclass u_net(nn.Module):\n def __init__(self, num_ch, num_fm):\n super(u_net, self).__init__()\n self.inconv = in_conv(1, num_ch)\n self.down1 = down(num_ch, num_ch * 2)\n self.down2 = down(num_ch * 2, num_ch * 4)\n self.down3 = down(num_ch * 4, num_ch * 8)\n self.down4 = down(num_ch * 8, num_ch * 16)\n self.down5 = down(num_ch * 16, num_ch * 32)\n self.down6 = down(num_ch * 32, num_ch * 64)\n self.up6 = up(num_ch * 64, num_ch * 32)\n self.up5 = up(num_ch * 32, num_ch * 16)\n self.up4 = up(num_ch * 16, num_ch * 8)\n self.up3 = up(num_ch * 8, num_ch * 4)\n self.up2 = up(num_ch * 4, num_ch * 2)\n self.up1 = up(num_ch * 2, num_ch)\n self.outconv = out_conv(num_ch, num_fm)\n \n def forward(self, x):\n conx1 = self.inconv(x)\n conx2 = self.down1(conx1)\n conx3 = self.down2(conx2)\n conx4 = self.down3(conx3)\n conx5 = self.down4(conx4)\n conx6 = self.down5(conx5)\n conx7 = self.down6(conx6)\n x = self.up6(conx7, conx6)\n x = self.up5(x, conx5)\n x = self.up4(x, conx4)\n x = self.up3(x, conx3)\n x = self.up2(x, conx2)\n x = self.up1(x, conx1)\n x = self.outconv(x)\n return torch.sigmoid(x)\n","sub_path":"code/u_net.py","file_name":"u_net.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"513163335","text":"import asyncio\nimport datetime\nimport aiohttp\nfrom prometheus_client import CollectorRegistry\nfrom collections import deque, defaultdict\n\nimport discord\nfrom discord import Intents\nfrom discord.ext.commands import AutoShardedBot\n\nfrom Bot import Handlers\nfrom Utils import Utils\nfrom Database import Connector, DBUtils\n\n\ndb = Connector.Database()\n\n\ndef prefix_callable(bot, message):\n prefixes = [f\"<@!{bot.user.id}> \", f\"<@{bot.user.id}> \"]\n if message.guild is None:\n prefixes.append(\"+\")\n elif bot.READY:\n try:\n prefix = DBUtils.get(db.configs, \"guildId\", f\"{message.guild.id}\", \"prefix\")\n if prefix is not None:\n prefixes.append(prefix)\n else:\n prefixes.append(\"+\")\n except Exception:\n prefixes.append(\"+\")\n return prefixes\n\nclass AutoMod(AutoShardedBot):\n \"\"\"\n A subclass of AutoShardedBot\n The handling of initial events \n through the Handlers.py file\n is inspired by GearBot\n (https://github.com/gearbot/GearBot)\n \"\"\"\n READY = False\n version = \"\"\n command_count = 0\n custom_command_count = 0\n locked = True\n shard_count = 1\n shard_ids = []\n missing_guilds = []\n loading_task = None\n initial_fill_complete = False\n aiosession = None\n errors = 0\n own_messages = 0\n bot_messages = 0\n user_messages = 0\n cleans_running = dict()\n running_unbans = set()\n running_msg_deletions = set()\n running_removals = set()\n last_reload = None\n \n \n def __init__(self, shards=1):\n intents = Intents(\n guilds=True,\n members=True,\n bans=True,\n emojis=True,\n messages=True,\n reactions=True\n )\n super().__init__(\n command_prefix=prefix_callable, intents=intents, description=\"Discord moderation bot\",\n case_insensitive=True, max_messages=1000, chunk_guilds_at_startup=False, shard_count=shards\n )\n self.total_shards = shards\n\n self.session = aiohttp.ClientSession(loop=self.loop)\n self.prev_events = deque(maxlen=10)\n\n self.resumes = defaultdict(list)\n self.identifies = defaultdict(list)\n\n\n def _clear_gateway_data(self):\n ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)\n for sid, dates in self.identifies.items():\n needs_removal = [i for i, dt in enumerate(dates) if dt < ago]\n for i in reversed(needs_removal):\n del dates[i]\n\n for sid, dates in self.resumes.items():\n needs_removal = [i for i, dt in enumerate(dates) if dt < ago]\n for i in reversed(needs_removal):\n del dates[i]\n\n async def _run_event(self, coro, event_name, *args, **kwargs):\n while (self.locked or not self.READY) and event_name != \"on_ready\":\n await asyncio.sleep(0.2)\n await super()._run_event(coro, event_name, *args, **kwargs)\n\n\n async def on_socket_response(self, message):\n self.prev_events.append(message)\n\n\n async def on_shard_resumed(self, sid):\n self.resumes[sid].append(datetime.datetime.utcnow())\n await Handlers.on_shard_resumed(self, sid)\n \n\n async def before_identify_hook(self, sid, *, initial):\n self._clear_gateway_data()\n self.identifies[sid].append(datetime.datetime.utcnow())\n await super().before_identify_hook(sid, initial=initial)\n \n\n\n def run(self): # a custom run function\n try:\n super().run(Utils.from_config(\"TOKEN\"), reconnect=True)\n finally:\n with open(\"prev_events.log\", \"w\", encoding=\"utf-8\") as f:\n for data in self.prev_events:\n try:\n x = json.dumps(data, ensure_ascii=True, indent=4)\n except:\n f.write(f\"{data}\\n\")\n else:\n f.write(f\"{x}\\n\")\n \n \"\"\"Events handled through Handlers.py\"\"\"\n async def on_ready(self):\n await Handlers.on_ready(self)\n\n async def on_message(self, message):\n await Handlers.on_message(self, message)\n\n async def on_guild_join(self, guild):\n await Handlers.on_guild_join(self, guild)\n\n async def on_guild_remove(self, guild):\n await Handlers.on_guild_remove(self, guild)\n\n async def on_command_error(self, ctx, error):\n await Handlers.on_command_error(self, ctx, error)\n\n async def on_guild_update(self, before, after):\n await Handlers.on_guild_update(before, after)\n\n async def on_shard_connect(self, shard_id):\n await Handlers.on_shard_connect(self, shard_id)\n\n async def on_shard_disconnect(self, shard_id):\n await Handlers.on_shard_disconnect(self, shard_id)\n \n async def on_shard_ready(self, shard_id):\n await Handlers.on_shard_ready(self, shard_id)\n","sub_path":"src/Bot/AutoMod.py","file_name":"AutoMod.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"151151564","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\n\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as tp\n\n\ndef np_var(input_nhwc, eps=1e-05):\n assert len(input_nhwc.shape) == 4\n input_nhwc_reshape_to_1d = np.reshape(\n input_nhwc, (input_nhwc.shape[0], -1, input_nhwc.shape[3])\n )\n\n # compute instance normalization in numpy\n mean_np = np.mean(input_nhwc_reshape_to_1d, axis=(1), keepdims=True)\n in_sub_mean = input_nhwc_reshape_to_1d - mean_np\n var_np = np.var(input_nhwc_reshape_to_1d, axis=(1), keepdims=True)\n\n gamma = np.ones((1, 1, input_nhwc_reshape_to_1d.shape[2]), dtype=np.float32)\n beta = np.zeros((1, 1, input_nhwc_reshape_to_1d.shape[2]), dtype=np.float32)\n\n invar_np = 1.0 / np.sqrt(var_np + eps)\n out_np = in_sub_mean * invar_np * gamma + beta\n\n return out_np, mean_np, var_np\n\n\ndef _compare_with_np(test_case, input_shape, eps=1e-5):\n flow.config.enable_legacy_model_io(True)\n\n np_input = np.random.random(input_shape).astype(np.float32)\n\n config = flow.function_config()\n config.default_placement_scope(flow.scope.placement(\"cambricon\", \"0:0\"))\n\n @flow.global_function(type=\"predict\", function_config=config)\n def instance_norm_2d_job(\n x: tp.Numpy.Placeholder(np_input.shape, dtype=flow.float32)\n ):\n out, mean, var = flow.nn.InstanceNorm2d(x, eps=eps)\n return out, mean, var\n\n check_point = flow.train.CheckPoint()\n check_point.init()\n\n out_of, mean, var = instance_norm_2d_job(np_input).get()\n out_of = out_of.numpy()\n print(mean.numpy())\n print(var.numpy())\n out_np, mean_np, var_np = np_var(np_input, eps=eps)\n print(\"numpy: \")\n print(mean_np)\n print(var_np)\n\n test_case.assertTrue(np.allclose(out_np.flatten(), out_of.flatten(), atol=1e-04))\n\n\n@flow.unittest.skip_unless_1n1d()\nclass TestInstanceNorm2D(flow.unittest.TestCase):\n def test_random_value(test_case):\n # _compare_with_np(test_case, (2, 2, 2, 3))\n _compare_with_np(test_case, (2, 512, 512, 3))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"oneflow_cambricon-cambricon/oneflow/python/test/ops/_test_instance_norm_2d_cambricon.py","file_name":"_test_instance_norm_2d_cambricon.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"276360629","text":"\"\"\"\n The new main file for the slack bot\n\"\"\"\nfrom slackclient import SlackClient\n\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom sqlalchemy.sql import select\nfrom sqlalchemy import create_engine\nfrom random import randint\nimport os\nimport config\nimport time\nimport sqlite3\nimport httplib2\nfrom apiclient.discovery import build\nimport datetime\nimport dateutil.parser\nimport pygal\nimport util\nimport requests\nclass wHacker:\n def __init__(self,hackerID,request):\n self.h = hackerID\n self.r = request\n\n\nclass eventObj:\n def __init__(self,startime,summary):\n self.s = startime\n self.sum = summary\n#List Of Waiting Hacker -> Hackers who are currently waiting for a mentor to respond to them!\nLOWH = []\n#List Of Active Channels -> Active channels created from the mentor chat.\nLOAC = []\nBOT_NAME = 'slackru'\nslack_client = SlackClient(config.apiT)\nslack_web_client = SlackClient(config.oauthT)\nBOTID = config.botID\nAT_BOT = \"<@\" + BOTID + \">\"\nBOT_CHANNEl = \"D4GSK3HG9\"\n#authorize google calender stuff\n#def get_messages():\n# events_obj = []\n# scopes = ['https://www.googleapis.com/auth/calendar']\n# credentials = ServiceAccountCredentials.from_json_keyfile_name(\n#'creds.json', scopes=scopes)\n# http_auth = credentials.authorize(httplib2.Http())\n# service = build('calendar', 'v3', http=http_auth)\n# page_token = None\n#\n#\n# now = datetime.datetime.utcnow().isoformat() + 'Z'\n# eventsResult = service.events().list(\n# calendarId='hl4bsn6030jr76nql68cen2jto@group.calendar.google.com', timeMin=now, maxResults=5, singleEvents=True,\n# orderBy='startTime').execute()\n# events = eventsResult.get('items', [])\n#\n# if not events:\n# print('No upcoming events found.')\n# for event in events:\n# start = event['start'].get('dateTime', event['start'].get('date'))\n# end = event['end'].get('dateTime',event['end'].get('date'))\n# dte = dateutil.parser.parse(end)\n#\n# dt = dateutil.parser.parse(start)\n# dte = dte.strftime('%H:%M')\n# dt = dt.strftime('%H:%M')\n# rn = str(dt) + \" - \" + str(dte)\n# e = eventObj(rn,event['summary'])\n# events_obj.append(e)\n# return events_obj\n#get_messages()\ndef hours_left():\n epoch_of_end_hack_ru = 1492970400\n curr_epoch_time = int(time.time())\n return (epoch_of_end_hack_ru/3600 - curr_epoch_time/3600)\ndef parse_slack_output(slack_rtm_output):\n \"\"\"\n The Slack Real Time Messaging API is an events firehose.\n this parsing function returns None unless a message is\n directed at the Bot, based on its ID.\n \"\"\"\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n for output in output_list:\n if output and 'text' in output and AT_BOT in output['text']:\n print(output['text'])\n user_name = util.grab_user(output['user'])\n return output['text'].split(AT_BOT)[1].strip().lower(), \\\n output['channel'], \\\n output['user'], \\\n user_name\n\n return None, None, \"\", \"\"\n\n#userid, mentorid\n#opens a multiparty im with some users\n\ndef create_channel_pair(userid, mentorid, username, mentorname, question):\n userlist = []\n userlist.append(config.botID)\n userlist.append(mentorid)\n print (\"This is the question: \"+question)\n print (\"MENTOR ID \" + mentorid)\n print (\"USERID \" + userid)\n newGroup = slack_web_client.api_call(\n\n \"mpim.open\",\n token = config.oauthT,\n users = userid + ',' + mentorid + ',' + config.botID\n )\n if not newGroup.get('ok'):\n return\n\n print (newGroup)\n test = slack_client.api_call(\"chat.postMessage\", channel = newGroup['group']['id'], text = \"This channel has been created to resolve the issue \"+mentorname+\"'s issue. When the issue has been resolved, mentor please call the @slackru unbusy command. If you do not know the password please contact Architect's Sam or Srihari. Good luck!\\nIssue: \"+question, as_user = True);\n print (test)\n #Once the active channel is created, put it in the LOAC array so it can be monitored\n #And reminded if things go sour (afk mentor, afk hacker, etc)\n LOAC.append(newGroup['group']['id'])\n\ndef handle_command(command:str, channel:str,userid:str,username:str) -> None:\n \"\"\"\n Receives commands directed at the bot and determines if they\n are valid commands. If so, then acts on the commands. If not,\n returns back what it needs for clarification.\n :param command:str the command to parse\n :param channel:str the channel id\n :param userid:str the user id\n :param:str the username \n \"\"\"\n print (command)\n dividedCommand = command.split()\n cmd = dividedCommand[0]\n cmd = cmd.lower()\n \n if cmd == 'mentors':\n print (len(dividedCommand)) \n if len(dividedCommand) == 1:\n util.message(userid,\"Please input a question\")\n else:\n findMentor(command[8:],username,userid)\n elif cmd == 'help':\n help(userid,username)\n #call the findAvailMentorCommand\n\n\ndef findMentor(command:str,username:str,userid:str) -> str:\n \"\"\"\n Makes a post request to the server and passes the pairing to he mentee\n :param command:str the parsedcommand\n :param username:str the username\n \"\"\"\n postData = {}\n postData['data'] = command\n postData['user'] = username\n postData['userid'] = userid\n util.message(userid,\"Trying to find a mentor\")\n req = requests.post(config.serverurl +'pairmentor',data = postData)\n return req.text\n \n\n\n\n \n\ndef help(userid, username):\n util.message(userid,\"Hello! You requested the help command, here are a list of commands you can use delimeted by |'s:\")\n util.message(userid,\"All commands will begin with slackru\")\n util.message(userid,\"\"\"Hacker:\\n| mentors | -> This command takes keywords and attempts to set you up with a mentor\n \\n| help | -> Wait what?\n \\n | announcements | -> returns next 5 events \\n | hours | -> returns hours left in the hackathon\n \\nMentor:\\n| shortenList | -> Used to help a hackers whose keywords could not be found.\n \\n | unbusy | makes your busy status 0, so you can help more people!\n \\n | busy | -> opposite of the guy above, used when you want to afk I guess\"\"\")\n\ndef checkTime(channelid):\n ts = time.time()\n timeStuff = slack_web_client.api_call(\"channels.history\", token = config.oauthT,channel = channelid, latest = ts-1850)\n if not timeStuff.get('ok'):\n print(\"The api call did not work as intended!\")\n print(timeStuff)\n return\n latestMessage = timeStuff['messages'][1]['text']\n message(channelid,\"This is the latest message: \"+latestMessage)\n return\n\ndef list_channels():\n channels_call = slack_client.api_call(\"groups.list\")\n if channels_call.get('ok'):\n return channels_call['groups']\ndef shortenlist(mentorID, mentorName, commandOptions):\n if(commandOptions[1] != config.mpass):\n util.message(mentorID, \"The password inserted was incorrect, please try again. If you need the password message Architect Sam or Architect Sri.\")\n return\n hackerID = commandOptions[2].upper()\n found = 0\n messageL = \"\"\n #look for the hacker on the list...\n for i in LOWH:\n if(i.h == hackerID):\n found = 1\n continue\n #Isn't on the list? two things possible -> the mentor typed it in wrong or they hacker already found another mentor\n #Either way, the entire list of hackers on the list are printed to the mentor so they can see the id\n if(found == 0):\n util.message(mentorID, \"I couldn't seem to find the hacker you tried to look for, please look back at the messages sent to the mentor chat and make sure you got the right thing! If you are sure you put in the right ID, then the hacker probably found another mentor. Here is the current list of hackers who are waiting for help...\")\n listOfHackers = \"\"\n for i in LOWH:\n util.message(mentorID, i.h+\"\\t\"+i.r+\"\\n\")\n print(\"This is the hackerID: \"+hackerID)\n else:\n #remove them from the waiting list\n for i in LOWH:\n if(i.h == hackerID):\n messageL = i.r\n LOWH.remove(i)\n #create channel pair\n userInfo = slack_client.api_call(\"user.info\", user=hackerID, as_user=True)\n print(\"Trying to pair from list...\")\n create_channel_pair(hackerID, mentorID,util.grab_user(hackerID), mentorName,messageL)\n\ndef findAvaliableMentor(hackerName,userid ,keywords):\n #This is used later on if we have to put this user in the list of waiting hackers\n saveKeywords = keywords;\n strKeywords = \"\"\n for i in saveKeywords:\n if i == \"mentors\":\n continue\n strKeywords += i\n strKeywords += \" \"\n print(\"These are the keywords: \"+strKeywords)\n goodMentors = [];\n #Join the keywords for an easier way to search for keywords\n conn = sqlite3.connect(\"main.db\")\n #Find an unbusy mentor\n count = 0\n found = [0,\"dummy\"]\n listOfUnBusyMentors = conn.execute(\"select mentorid from mentors where busy=0\")\n #This entire process below is for finding a mentor that is currently not busy and contains\n #0 - several keywords that the user's request has\n for i in listOfUnBusyMentors:\n Mentorid = i[0];\n listOfKeywords = conn.execute(\"select keywords from mentors where mentorid = ?\",[Mentorid])\n for j in listOfKeywords:\n Keywords = j[0].split(\",\")\n for k in Keywords:\n k = k.lower()\n for z in keywords:\n z = z.lower()\n if k == z:\n count = count+1\n\n #Everytime the count is larger than the currently largest count, swap a new found\n if count > found[0]:\n found[0] = count\n found[1] = Mentorid\n\n if count > 0:\n goodMentors.append(Mentorid)\n\n count = 0\n #If the dummy value is still valid, then we know no keywords were found :(\n\n if(found[1] == \"dummy\"):\n print(\"Could not find suitable mentor!\")\n #This method below should be uncommented once we have the mentors channel set up and we have the mentor channel id\n slack_client.api_call(\"chat.postMessage\", channel='G53T6D0A2',\n text=\"There is currently a hacker with the ID: \"+str(userid)+\" Who is having trouble with: \"+strKeywords+\". Please use the command slackru shortenlist in order to help them with this if you can! The password is mentors2017\", as_user=True)\n util.message(userid,\"We could not find a mentor with the paramters you gave us, we have placed your request on a list. If a mentor believes they can help you with this issue they will respond to you! You are more than welcome to use the mentors command again with new keywords!\")\n f = 0\n strKeywords = \"\"\n for i in LOWH:\n if i.h == userid:\n f = 1\n #If they are currently not in the waiting list, put them on it\n if(f == 0):\n i = 1\n while(i < len(saveKeywords)):\n strKeywords += (saveKeywords[i])\n strKeywords += \" \";\n i+=1\n newWaitingHacker = wHacker(userid,strKeywords)\n LOWH.append(newWaitingHacker)\n\n else:\n if found[0] < 3:\n randomMentor = randint(0,len(goodMentors)-1);\n found[1] = goodMentors[randomMentor]\n for i in LOWH:\n if(i.h == userid):\n LOWH.remove(i)\n print(\"Suitable mentor found!\\n\"+found[1]+\"!\")\n conn.execute(\"update mentors set busy = 1 where mentorid = ?\",[found[1]])\n\n #create a channel between the two\n create_channel_pair(userid,found[1],util.grab_user(userid),util.grab_user(found[1]),strKeywords)\n conn.commit()\n\n conn.close()\n\n#This is for management of the database woot woot\n#This function has a lot of functionality (no pun intended) the dbcommand paramter\n#Is a list of different strings which represent different options\n#Below each if statemenet are the paramters that the command needs in order to continue the action on the database\ndef dbManage(mentorid,channelid, dbcommand):\n conn = sqlite3.connect('main.db')\n #params: dbmanager \n #delimit and tokenize the command, first part is pass\n if(dbcommand[1] != config.dbpass):\n slack_client.api_call(\"chat.postMessage\", channel=channel,text=\"Incorrect Password.\", as_user=True)\n return\n #delimit and tokenize the command for the second part for the command\n elif(dbcommand[2] == \"delete\" or dbcommand[2] == \"d\"):\n print(\"Attempting to delete...\")\n #Option Params: mentorid\n #Cannot really delete a user so we will make them permanently busy\n if len(command) != 3:\n print(\"Incorrect arguments got: \"+str(len(dbcommand))+\" need 3\")\n print(\"Deleting...\")\n conn.execute(\"update mentors set busy = 1 where mentor=?\",[dbcommand[3]])\n print(conn.execute(\"select * from mentor\"))\n\n elif(dbcommand[2] == \"addmentor\" or dbcommand[2] == \"am\"):\n #Option Params::: name busy keywords mentorid\n print(\"Adding new Mentor\")\n if len(dbcommand) != 9:\n print(\"Incorrect arguments got: \"+str(len(dbcommand))+\" arguments instead of 7\")\n return\n conn.execute(\"insert into mentors values (?,?,?,?,?)\",[dbcommand[4]+\" \"+dbcommand[5],dbcommand[6],dbcommand[7],mentorid,dbcommand[8]])\n print(conn.execute(\"select * from mentors\"))\n\n elif(dbcommand[2] == \"busystat\" or dbcommand[2] == \"bs\"):\n #Option Params: mentorid <0,1> 0 for unbusy 1 for busy\n print(\"Changing busy status...\")\n if dbcommand[4] == 0:\n match = conn.execute(\"select mentorid from mentors where mentorid=?\",[dbcommand[3]])\n if list(match) == []:\n util.message(mentorid, \"I tried tried to change your status in the database, but could not, please contact Architect Sam or Shrihari!\")\n else:\n conn.execute(\"update mentors set busy = 0 where mentorid =?\",[dbcommand[3]])\n util.message(mentorid,\"All good buddy, set you to unbusy!\")\n if dbcommand[4] == 1:\n match = conn.execute(\"select mentorid from mentors where mentorid=?\",[dbcommand[3]])\n if list(match) == []:\n util.message(mentorid, \"I tried tried to change your status in the database, but could not, please contact Architect Sam or Shrihari!\")\n else:\n conn.execute(\"update mentors set busy = 1 where mentorid =?\",[dbcommand[3]])\n util.message(mentorid,\"All good buddy, set you to busy!\")\n\n elif(dbcommand[2] == 'setinactive'):\n match = conn.execute(\"select mentorid from mentors where mentorid=?\",[dbcommand[3]])\n if list(match) == []:\n util.message(mentorid, \"I tried tried to change your status in the database, but could not, please contact Architect Sam or Shrihari!\")\n else:\n conn.execute(\"update mentors set inactive = 1 where mentorid =?\",[dbcommand[3]])\n conn.execute(\"update mentors set busy = 1 where mentorid=?\",[dbcommand[3]])\n util.message(mentorid,\"Made you inactive!!\")\n\n elif(dbcommand[2] == 'setactive'):\n match = conn.execute(\"select mentorid from mentors where mentorid=?\",[dbcommand[3]])\n if list(match) == []:\n util.message(mentorid, \"I tried tried to change your status in the database, but could not, please contact Architect Sam or Shrihari!\")\n else:\n conn.execute(\"update mentors set inactive = 0 where mentorid =?\",[dbcommand[3]])\n conn.execute(\"update mentors set busy = 0 where mentorid =?\",[dbcommand[3]])\n util.message(mentorid,\"Made you active!!\")\n\n\n elif(dbcommand[2] == 'listactivity'):\n match1 = conn.execute(\"select name from mentors where inactive=1\")\n\n c = list(match1.fetchall())\n if list(c) == []:\n util.message(mentorid,\"List contains no inactive mentors!\")\n else:\n util.message(mentorid,\"Current Inactive Mentors\\n________________________\")\n for i in c:\n util.message(mentorid,str(i[0]).upper() + '\\n')\n\n match2 = conn.execute(\"select name from mentors where inactive=0\")\n c = list(match2.fetchall())\n\n if list(match2.fetchall()) == None:\n util.message(mentorid,\"List contains no active mentors!\")\n else:\n util.message(mentorid,\"Current Active Mentors\\n______________________\")\n for i in c:\n util.message(mentorid,i[0].upper()+\"\\n\")\n\n conn.commit()\n conn.close()\n\n\n\ndef checkOnChannels():\n for i in LOAC:\n channelINFO = slack_web_client.api_call(\"mpim.history\", channel = i)\n currentTS = int(time.time())\n #If the latest message on the chat was an hour ago, ask the chat if everything is ok once\n if(currentTS > int(float(channelINFO['messages'][0]['ts']))+(3600*2)):\n util.message(i,\"The last message sent on this channel is an hour long! Just making sure everything is alright!\")\n util.message(i, \"For Mentors:\")\n util.message(i,\"If you are finished with the issue make sure to run @slackru unbusy if you haven't already\")\n util.message(i,\"(message Architects Sam or Srihari for that!)\")\n util.message(i,\"For Hackers:\")\n util.message(i,\"If your mentor is not responding, you can run the command for a mentor again\")\n util.message(i,\"This channel will stop being monitored by slackru.\")\n LOAC.remove(i)\n\n\n\n#sends message to a channel\nif __name__ == \"__main__\":\n READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose\n if slack_client.rtm_connect():\n print(\"SlackRU connected and running!\")\n while True:\n command, channel, userid,username = parse_slack_output(slack_client.rtm_read())\n if command and channel:\n handle_command(command, channel,userid,username)\n #check busy status of all users, their last time busy and if they have been busy for more than 35 minutes\n time.sleep(READ_WEBSOCKET_DELAY)\n #This function will check on all the active channels and if the latest response was an hour ago from the current time\n #The bot will message the channel and let them know it will be stop being monitored and give them insturctions\n #For certain scenarios.\n checkOnChannels()\n else:\n print(\"Connection failed. Invalid Slack token or bot ID?\")\n","sub_path":"slackrureborn.py","file_name":"slackrureborn.py","file_ext":"py","file_size_in_byte":19122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"504004044","text":"import cv2\n\n# set verbose to true to see intermediate results\ndef findtext(imgpath, verbose=False):\n img = cv2.imread(imgpath, 0)\n if verbose:\n cv2.imshow('orig', img)\n\n # we are blurring more over the X axis as the letters are non-square\n blur = cv2.GaussianBlur(img, (25, 15), 0)\n if verbose:\n cv2.imshow('blurred', blur)\n\n # unfortunately I get an error trying to set the kernel size different to 3\n # I use scale to imitate a larger kernel\n laplace = cv2.Laplacian(blur, 8, None, ksize=3, scale=2)\n if verbose:\n cv2.imshow('laplace', laplace)\n\n # the threshold value is determined experimentally to reduce artifacts on the image edges\n res = cv2.threshold(laplace, 7, 255, type=cv2.THRESH_BINARY)\n return res[1]\n\n\nif __name__ == \"__main__\":\n cv2.imshow('result', findtext(\"text.bmp\", True))\n cv2.waitKey(0)\n\n","sub_path":"task1/findtext.py","file_name":"findtext.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"483929091","text":"import pandas as pd\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\n\r\nfrom sklearn.metrics import mean_squared_error,explained_variance_score\r\nfrom sklearn.model_selection import KFold\r\nimport xgboost as xgb\r\nfrom xgboost.sklearn import XGBRegressor\r\nfrom math import radians, cos, sin, asin, sqrt\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n# baseline只用到gps定位数据,即train_gps_path\r\nEARTH_REDIUS = 6378.137\r\ntrain_gps_path = 'del_not_suit_dest_data_2.csv'\r\ntest_data_path = 'event_port/A_testData0531.csv'\r\norder_data_path = 'event_port/loadingOrderEvent.csv'\r\nport_data_path = 'event_port/port_archived.csv'\r\n#train_gps_path = 'D:/data1/skip_multi_trace_data.csv'\r\n#test_data_path = 'D:/A_testData0531.csv'\r\n#order_data_path = 'D:/event_port/loadingOrderEvent.csv'\r\n#port_data_path = 'D:/event_port/port_archived1.csv'\r\n\r\ndef geodistance(lng1,lat1,lng2,lat2):\r\n #print(type(lng1),type(lng2),type(lat1),type(lat2))\r\n #print(lng1,lng2,lat1,lat2)\r\n lng1, lat1, lng2, lat2 = map(radians, [float(lng1), float(lat1), float(lng2), float(lat2)]) # 经纬度转换成弧度\r\n dlon=lng2-lng1\r\n dlat=lat2-lat1\r\n a=sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\r\n distance=2*asin(sqrt(a))*6371*1000 # 地球平均半径,6371km\r\n distance=round(distance/1000,3)\r\n return distance\r\n\r\ndef get_data(data, mode='train'):\r\n assert mode == 'train' or mode == 'test'\r\n #类型转换\r\n if mode == 'train':\r\n data['vesselNextportETA'] = pd.to_datetime(data['vesselNextportETA'],infer_datetime_format=True)\r\n elif mode == 'test':\r\n data['temp_timestamp'] = data['timestamp']\r\n data['onboardDate'] = pd.to_datetime(data['onboardDate'], infer_datetime_format=True)\r\n data['timestamp'] = pd.to_datetime(data['timestamp'], infer_datetime_format=True)\r\n data['longitude'] = data['longitude'].astype(float)\r\n data['loadingOrder'] = data['loadingOrder'].astype(str)\r\n data['latitude'] = data['latitude'].astype(float)\r\n data['speed'] = data['speed'].astype(float)\r\n data['direction'] = data['direction'].astype(float)\r\n return data\r\n\r\n\r\ndef mean_skip_zero(arr):\r\n number=0\r\n mysum=0\r\n for i,v in arr.iteritems():\r\n if v!=0:\r\n number+=1\r\n mysum+=v\r\n if number==0:\r\n return 0\r\n else:\r\n return mysum/number\r\ndef MY_MSE_skip_zero(arr):\r\n number = 0\r\n mysum = 0\r\n for i, v in arr.iteritems():\r\n if v != 0:\r\n number += 1\r\n mysum += v\r\n if number == 0:\r\n average=0\r\n else:\r\n average=mysum / number\r\n res=0\r\n for i,v in arr.iteritems():\r\n if v!=0:\r\n res+=np.square(average-v)\r\n if number==0:\r\n return 0\r\n else:\r\n return res/number\r\ndef get_time(arr):\r\n a=(arr.max()-arr.min()).total_seconds()\r\n #print(a)\r\n return a\r\n#下面这些表示该函数返回后的结果中的列名,其中的loadingOrder,label,count不是训练特征,其他的都是特征\r\n#loadingOrder,distance,mean_speed,speed_mse,mean_speed_skip0,speed_mse_skip0\r\n#anchor_0_6,anchor_7_15,label(时间、label),count,anchor_ratio_0_6,anchor_ratio_7_15,\r\ndef get_feature_train(df):\r\n df.sort_values(['loadingOrder', 'timestamp'], inplace=True)\r\n #首先按照订单号进行排序,然后按照时间进行排序\r\n df['lat_diff']=df.groupby('loadingOrder')['latitude'].diff(1)#计算相邻两个时间点上的经纬度差\r\n df['lon_diff']=df.groupby('loadingOrder')['longitude'].diff(1)\r\n df['point_to_point']=df.apply(lambda x:geodistance(x['latitude'],x['longitude'],x['latitude']-\r\n x['lat_diff'],x['longitude']-x['lon_diff']) if True else 0,axis=1) #计算当前这一点与上一点之间的距离\r\n dis=df.groupby('loadingOrder')['point_to_point'].agg('sum').reset_index()#dis表示每一个订单中对应的总距离\r\n dis.columns=['loadingOrder','distance']\r\n mean_speed=df.groupby('loadingOrder')['speed'].agg(['mean','var',mean_skip_zero,MY_MSE_skip_zero]).reset_index()#求出速度,速度的方差,有0,无0的\r\n mean_speed.columns=['loadingOrder','mean_speed','speed_mse','mean_speed_skip0','speed_mse_skip0']\r\n df['anchor_0_6']=df.apply(lambda x: 1.0 if x['speed']<=6 else 0.0,axis=1)#抛锚次数\r\n df['anchor_7_15']=df.apply(lambda x:1.0 if x['speed']>6 and x['speed']<=15 else 0.0,axis=1)\r\n res_df=df.groupby('loadingOrder').agg({'anchor_0_6':['sum'],'anchor_7_15':['sum']}).reset_index()\r\n res_df.columns=['loadingOrder','anchor_0_6','anchor_7_15']#hhhhhhhhhhhh\r\n a=df.groupby('loadingOrder')['timestamp'].agg(['count','max','min']).reset_index()\r\n a.columns=('loadingOrder','count','max','min')\r\n #a['label']=a.apply(lambda x:(x['max']-x['min']).total_sconds())\r\n a['label']=(a['max']-a['min']).dt.total_seconds()\r\n a=a[['loadingOrder','count','label']]\r\n #print(a)\r\n res_df=res_df.merge(a,on='loadingOrder')\r\n #res_df['label']=df.groupby('loadingOrder')['timestamp'].agg(get_time).reset_index()#时间\r\n res_df['anchor_ratio_0_6']=res_df['anchor_0_6']/res_df['count']\r\n res_df['anchor_ratio_7_15']=res_df['anchor_7_15']/res_df['count']\r\n res_df=res_df.merge(dis,on='loadingOrder')\r\n res_df=res_df.merge(mean_speed,on='loadingOrder')\r\n first_df = df.sort_values('timestamp').groupby('loadingOrder', as_index=False).first() # 找出最近的时间戳\r\n first_df = first_df[['loadingOrder', 'longitude', 'latitude']]\r\n first_df.columns = ['loadingOrder', 'first_longitude', 'first_latitude']\r\n last_df = df.sort_values('timestamp', ascending=False).groupby('loadingOrder', as_index=False).first()\r\n last_df = last_df[['loadingOrder', 'longitude', 'latitude']]\r\n last_df.columns = ['loadingOrder', 'last_longitude', 'last_latitude']\r\n first_df = first_df.merge(last_df, on='loadingOrder') # 存储的是第一个经纬度和最后一个经纬度\r\n res_df = res_df.merge(first_df, on='loadingOrder')\r\n res_df.reset_index(drop=True)\r\n #应该把count这一列删去?,count是GPS的检测次数\r\n return res_df\r\n\r\n#loadingOrder,distance,mean_speed,speed_mse,mean_speed_skip0,speed_mse_skip0\r\n#anchor_0_6,anchor_7_15,label(时间、label),count,anchor_ratio_0_6,anchor_ratio_7_15,\r\ndef get_feature_test(df,port_data_path):\r\n df.sort_values(['loadingOrder', 'timestamp'], inplace=True)\r\n # 首先按照订单号进行排序,然后按照时间进行排序\r\n df['lat_diff'] = df.groupby('loadingOrder')['latitude'].diff(1) # 计算相邻两个时间点上的经纬度差\r\n df['lon_diff'] = df.groupby('loadingOrder')['longitude'].diff(1)\r\n df['point_to_point'] = df.apply(lambda x: geodistance(x['latitude'], x['longitude'], x['latitude'] -\r\n x['lat_diff'], x['longitude'] - x['lon_diff']) if True else 0,axis=1) # 计算当前这一点与上一点之间的距离\r\n dis = df.groupby('loadingOrder')['point_to_point'].agg('sum').reset_index() # dis表示每一个订单中对应的总距离\r\n dis.columns=['loadingOrder','previous_dis']\r\n #接下来计算后半段的距离\r\n back_dis=df.sort_values('timestamp',ascending=False).groupby('loadingOrder',as_index=False).first()#找出最远的那个时间戳\r\n back_dis['dest']=back_dis.apply(lambda x:x['TRANSPORT_TRACE'][x['TRANSPORT_TRACE'].rfind('-')+1:] if True else '',axis=1)#提取出终点港口\r\n ports=pd.read_csv(port_data_path)#读取港口文件\r\n #print(ports.columns)\r\n ports['LONGITUDE']=ports['LONGITUDE'].astype(float)\r\n ports['LATITUDE']=ports['LATITUDE'].astype(float)\r\n dict_ports={}#存到一个字典里\r\n for index,row in ports.iterrows():\r\n dict_ports[row['TRANS_NODE_NAME']]=(row['LONGITUDE'],row['LATITUDE'])#港口名是key,经纬度是value\r\n #已经获得了终点港口的经纬度,接下来可以计算距离\r\n back_dis['dest_lon']=back_dis.apply(lambda x:dict_ports[x['dest']][0],axis=1)\r\n back_dis['dest_lat']=back_dis.apply(lambda x:dict_ports[x['dest']][1],axis=1)\r\n back_dis['back_dis']=back_dis.apply(lambda x:geodistance(x['longitude'],x['latitude'],x['dest_lon'],x['dest_lat']) if True else 0,axis=1)\r\n temp=back_dis[['loadingOrder','back_dis']]\r\n dis=dis.merge(temp,on='loadingOrder')\r\n dis['distance']=dis['back_dis']+dis['previous_dis']\r\n #dis['distance']=dis.apply(lambda x:dis['back_dis']+dis['previous_dis'] if True else 0,axis=1)#dis中的列名有loadingOrder,previous_dis,back_dis,distance\r\n dis=dis[['loadingOrder','distance']]\r\n\r\n mean_speed = df.groupby('loadingOrder')['speed'].agg(\r\n ['mean', 'var', mean_skip_zero, MY_MSE_skip_zero]).reset_index() # 求出速度,速度的方差,有0,无0的\r\n mean_speed.columns = ['loadingOrder', 'mean_speed', 'speed_mse', 'mean_speed_skip0', 'speed_mse_skip0']\r\n df['anchor_0_6'] = df.apply(lambda x: 1.0 if x['speed'] <= 6 else 0.0, axis=1) # 抛锚次数\r\n df['anchor_7_15'] = df.apply(lambda x: 1.0 if x['speed'] > 6 and x['speed'] <= 15 else 0.0, axis=1)\r\n res_df = df.groupby('loadingOrder').agg({'anchor_0_6': ['sum'], 'anchor_7_15': ['sum']}).reset_index()\r\n res_df.columns = ['loadingOrder', 'anchor_0_6', 'anchor_7_15'] # hhhhhhhhhhhh\r\n #a = df.groupby('loadingOrder')['timestamp'].agg(['count', get_time]).reset_index()\r\n a = df.groupby('loadingOrder')['timestamp'].agg(['count', 'max', 'min']).reset_index()\r\n a.columns = ('loadingOrder', 'count', 'max', 'min')\r\n # a['label']=a.apply(lambda x:(x['max']-x['min']).total_sconds())\r\n a['label'] = (a['max'] - a['min']).dt.total_seconds()\r\n a = a[['loadingOrder', 'count', 'label']]\r\n #print(a)\r\n #a.columns = ('loadingOrder', 'count', 'label')\r\n res_df = res_df.merge(a, on='loadingOrder')\r\n # res_df['label']=df.groupby('loadingOrder')['timestamp'].agg(get_time).reset_index()#时间\r\n res_df['anchor_ratio_0_6'] = res_df['anchor_0_6'] / res_df['count']\r\n res_df['anchor_ratio_7_15'] = res_df['anchor_7_15'] / res_df['count']\r\n res_df = res_df.merge(dis, on='loadingOrder')\r\n res_df = res_df.merge(mean_speed, on='loadingOrder')\r\n first_df = df.sort_values('timestamp').groupby('loadingOrder', as_index=False).first() # 找出最近的时间戳\r\n first_df = first_df[['loadingOrder', 'longitude', 'latitude']]\r\n first_df.columns = ['loadingOrder', 'first_longitude', 'first_latitude']\r\n last_df = df.sort_values('timestamp', ascending=False).groupby('loadingOrder', as_index=False).first()\r\n last_df = last_df[['loadingOrder', 'longitude', 'latitude']]\r\n last_df.columns = ['loadingOrder', 'last_longitude', 'last_latitude']\r\n first_df = first_df.merge(last_df, on='loadingOrder') # 存储的是第一个经纬度和最后一个经纬度\r\n res_df = res_df.merge(first_df, on='loadingOrder')\r\n res_df.reset_index(drop=True)\r\n # 应该把count这一列删去?,count是GPS的检测次数\r\n return res_df\r\n\r\n\r\ndef mse_score_eval(preds, valid):\r\n labels = valid.get_label()\r\n scores = mean_squared_error(y_true=labels, y_pred=preds)\r\n return 'mse_score', scores, True\r\n\r\n\r\ndef build_model(train, test, pred, label, seed=1080, is_shuffle=True):\r\n train_pred = np.zeros((train.shape[0],))\r\n test_pred = np.zeros((test.shape[0],))\r\n n_splits = 10\r\n # Kfold\r\n fold = KFold(n_splits=n_splits, shuffle=is_shuffle, random_state=seed)\r\n kf_way = fold.split(train[pred])\r\n # params\r\n params = {\r\n 'booster': 'gbtree',\r\n 'objective': 'reg:gamma',\r\n 'gamma': 0.1,\r\n 'max_depth': 5,\r\n 'lamda': 3,\r\n 'subsample': 0.7,\r\n 'colsample_bytree': 0.7,\r\n 'min_child_weight': 3,\r\n 'silent': 1,\r\n 'eta': 0.1,\r\n 'seed': seed,\r\n 'nthread': 8,\r\n 'eval_meric': 'rmse'\r\n }\r\n # train\r\n for n_fold, (train_idx, valid_idx) in enumerate(kf_way, start=1):\r\n train_x, train_y = train[pred].iloc[train_idx], train[label].iloc[train_idx]\r\n valid_x, valid_y = train[pred].iloc[valid_idx], train[label].iloc[valid_idx]\r\n # 数据加载\r\n n_train = xgb.DMatrix(train_x, label=train_y)\r\n n_valid = xgb.DMatrix(valid_x, label=valid_y)\r\n\r\n xgbModel = XGBRegressor(\r\n max_depth=30,\r\n learning_rate=0.1,\r\n n_estimators=5,\r\n objective='reg:logistic',\r\n booster='gbtree',\r\n gamma=0.1,\r\n seed=seed\r\n )\r\n xgbModel.fit(train_x, train_y, verbose=True)\r\n train_pred[valid_idx] = xgbModel.predict(valid_x)\r\n test_pred += xgbModel.predict(test[pred]) / fold.n_splits\r\n\r\n test['label'] = test_pred\r\n\r\n return test[['loadingOrder', 'label']]\r\n\r\n\r\ndef main():\r\n #NROWS = 20000000\r\n train_data = pd.read_csv(train_gps_path, nrows=100000)\r\n #del train_data[0]\r\n #train_data.columns = ['a','loadingOrder', 'carrierName', 'timestamp', 'longitude',\r\n # 'latitude', 'vesselMMSI', 'speed', 'direction', 'vesselNextport',\r\n # 'vesselNextportETA', 'vesselStatus', 'vesselDatasource', 'TRANSPORT_TRACE']\r\n #train_data.drop([0],inplace=True)\r\n test_data = pd.read_csv(test_data_path)\r\n #print(test_data.columns)\r\n train_data = get_data(train_data, mode='train')\r\n test_data = get_data(test_data, mode='test')\r\n print('get data done')\r\n train = get_feature_train(train_data)\r\n test = get_feature_test(test_data, port_data_path)\r\n #print(train.columns)\r\n #print(test.columns)\r\n features = [c for c in train.columns if c not in ['count', 'label', 'loadingOrder']]\r\n print('FEATURES:'+str(features))\r\n '''\r\n train['anchor_0_6'] = train['anchor_0_6'].astype(float)\r\n train['anchor_7_15'] = train['anchor_7_15'].astype(float)\r\n train['anchor_ratio_0_6'] = train['anchor_ratio_0_6'].astype(float)\r\n train['anchor_ratio_7_15'] = train['anchor_ratio_7_15'].astype(float)\r\n train['distance'] = train['distance'].astype(float)\r\n train['mean_speed'] = train['mean_speed'].astype(float)\r\n train['speed_mse'] = train['speed_mse'].astype(float)\r\n train['mean_speed_skip0'] = train['mean_speed_skip0'].astype(float)\r\n train['speed_mse_skip0'] = train['speed_mse_skip0'].astype(float)\r\n #train['first_longitude'] = train['first_longitude'].astype(float)\r\n #train['first_latitude'] = train['first_latitude'].astype(float)\r\n #train['last_longitude'] = train['last_longitude'].astype(float)\r\n #train['last_latitude'] = train['last_latitude'].astype(float)\r\n\r\n test['anchor_0_6'] = test['anchor_0_6'].astype(float)\r\n test['anchor_7_15'] = test['anchor_7_15'].astype(float)\r\n # train['count'] = train['count'].astype(float)\r\n # train['label'] = train['label'].astype(float)\r\n test['anchor_ratio_0_6'] = test['anchor_ratio_0_6'].astype(float)\r\n test['anchor_ratio_7_15'] = test['anchor_ratio_7_15'].astype(float)\r\n test['distance'] = test['distance'].astype(float)\r\n test['mean_speed'] = test['mean_speed'].astype(float)\r\n test['speed_mse'] = test['speed_mse'].astype(float)\r\n test['mean_speed_skip0'] = test['mean_speed_skip0'].astype(float)\r\n test['speed_mse_skip0'] = test['speed_mse_skip0'].astype(float)\r\n #test['first_longitude'] = test['first_longitude'].astype(float)\r\n #test['first_latitude'] = test['first_latitude'].astype(float)\r\n #test['last_longitude'] = test['last_longitude'].astype(float)\r\n #test['last_latitude'] = test['last_latitude'].astype(float)\r\n '''\r\n\r\n result = build_model(train, test, features, 'label', is_shuffle=True)\r\n result.to_csv('result-061818.csv')\r\n #构建并训练模型,result就是预测出的消耗的时间,再加上起始时间就是ETA;\r\n test_data = test_data.merge(result, on='loadingOrder', how='left')\r\n test_data['ETA'] = (test_data['onboardDate'] + test_data['label'].apply(lambda x:pd.Timedelta(seconds=x))).apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\r\n test_data.drop(['direction', 'TRANSPORT_TRACE'], axis=1, inplace=True)\r\n test_data['onboardDate'] = test_data['onboardDate'].apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\r\n test_data['creatDate'] = pd.datetime.now().strftime('%Y/%m/%d %H:%M:%S')\r\n test_data['timestamp'] = test_data['temp_timestamp']\r\n # 整理columns顺序\r\n result = test_data[['loadingOrder', 'timestamp', 'longitude', 'latitude', 'carrierName', 'vesselMMSI', 'onboardDate', 'ETA', 'creatDate']]\r\n result.to_csv('testout-061818.csv',index=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"main_6_17_00_31.py","file_name":"main_6_17_00_31.py","file_ext":"py","file_size_in_byte":16451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"545246413","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 11 10:45:39 2021\r\n\r\n@author: vidhya\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy import stats\r\n\r\nA = np.random.normal(25, 5, 10000)\r\nB = np.random.normal(12, 8, 10000)\r\n\r\nprint('stat of 2 different distribution with different values : ', \r\n stats.ttest_ind(A, B))\r\n\r\nB = np.random.normal(25, 5, 10000)\r\nprint('stat of 2 different distribution with same values : ',\r\n stats.ttest_ind(A, B))\r\n\r\nA = np.random.normal(25, 5, 100000)\r\nB = np.random.normal(25, 5, 100000)\r\n\r\nprint('stat of 2 different distribution with same values : ',\r\n stats.ttest_ind(A, B))\r\nprint('stat of same distribution', stats.ttest_ind(A, A))","sub_path":"misc/abtest.py","file_name":"abtest.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"79453808","text":"from kitty.conf.types import ShortcutMapping,Option,Group\nfrom kitty.options.definition import definition\nfrom kitty.options.utils import args_funcs\n\ndef find_options(group):\n opts = []\n actions = []\n for i in group.items:\n if type(i) is Group:\n rec_opts, rec_actions = find_options(i)\n opts += rec_opts\n actions += rec_actions\n else:\n if type(i) is ShortcutMapping:\n actions.append(i.name)\n else:\n opts.append(i.name)\n return opts, actions\n\n\nall_opts, all_actions = find_options(definition.root_group)\nall_actions += args_funcs.keys()\n\nall_opts = sorted(list(set(all_opts)))\nall_actions = sorted(list(set(all_actions)))\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\nkittyKeyword = ['syn keyword kittyKeyword contained\\n'] + [\n \" \\\\ \" + \" \".join(i) + \"\\n\" for i in chunks(all_opts, 8)\n ]\n\nkittyAction = ['syn keyword kittyAction contained\\n'] + [\n \" \\\\ \" + \" \".join(i) + \"\\n\" for i in chunks(all_actions, 8)\n ]\n \n\nwith open(\"syntax/kitty.vim\") as f:\n infile = list(f)\nnon_generated = infile.index('\" START GENERATED CODE\\n') + 1\nupdated_file = infile[0:non_generated] + kittyKeyword + kittyAction\n\nwith open(\"syntax/kitty.vim\", 'w') as f:\n f.writelines(updated_file)\n","sub_path":"gen-syntax.py","file_name":"gen-syntax.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"197470544","text":"import tornado.testing\nimport tornado.web\nimport tornado.gen\nimport tornado.wsgi\nimport tornado.util\n\nif tornado.util.PY3:\n from python3 import hprose\nelse:\n from python2 import hprose\n\n\ndef hello(name):\n return 'Hello %s!' % name\n\n\nservice = hprose.HttpService()\nservice.addFunction(hello)\n\n\nclass TestHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n # hprose的service只接受wsgi的environ,因此需要将request对象转为environ对象再传给hprose处理\n environ = tornado.wsgi.WSGIContainer.environ(self.request)\n status, headers, body = service(environ)\n # 返回响应\n self.set_status(int(status.split(' ')[0]))\n for name, value in headers:\n self.add_header(name, value)\n yield tornado.gen.sleep(2)\n self.finish(body[0])\n\n @tornado.gen.coroutine\n def post(self):\n # hprose的service只接受wsgi的environ,因此需要将request对象转为environ对象再传给hprose处理\n environ = tornado.wsgi.WSGIContainer.environ(self.request)\n status, headers, body = service(environ)\n # 返回响应\n self.set_status(int(status.split(' ')[0]))\n for name, value in headers:\n self.add_header(name, value)\n yield tornado.gen.sleep(2)\n self.finish(body[0])\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r\"/\", TestHandler),\n ]\n settings = dict(\n xsrf_cookies=False,\n )\n super(Application, self).__init__(handlers, **settings)\n\n\nclass HttpServerTest(tornado.testing.AsyncTestCase):\n def test_start_server(self):\n pass\n\n\nif __name__ == \"__main__\":\n tornado.testing.main()\n import tornado.ioloop\n\n app = Application()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"test/tornado/httpserver.py","file_name":"httpserver.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"188439648","text":"# https://leetcode-cn.com/problems/longest-common-subsequence/solution/chao-xiang-xi-dong-tai-gui-hua-jie-fa-by-shi-wei-h/\n\n# 求两个字符串的最长子序列\n# 子问题划分:字符串s1, s2,\n# (1)如果s1 的最后一位 等于 s2的最后一位,则两者的最大子序列就是 s1[0, i-1]和s2[0, i-1]两个字符串的最大子序列+1\n# (2)如果两者最后一位不相等,最大子序列就是 \n# s1[0, i-1], s2[0, j]\n# s1[0, i], s2[0, j-1] 两个子序列的最大值\n\n# 使用一个二维表格存储结果,子问题的具体实现:\n # 如果s1[i]==s2[j],则dp[i][j]=dp[i-1][j-1]+1\n # 若不相等,dp[i][j]=max(dp[i-1][j],dp[i][j-1])\n\n\nclass Solution:\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n if not text1 or not text2:\n return 0\n \n m, n = len(text1), len(text2)\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if text1[i - 1] == text2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n \n return dp[m][n]\n\n ","sub_path":"04-DP/12-Dynamic-Programming/1143-longest-common-subsequence/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"305012303","text":"import RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BOARD)\nLED_PIN = 11\nGPIO.setup(LED_PIN, GPIO.OUT)\n\nwhile True:\n GPIO.output(LED_PIN,True)\n\ntime.sleep(3)\nGPIO.cleanup\n","sub_path":"rasp1.py","file_name":"rasp1.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"185024471","text":"# Copyright (c) 2013, System Engineering Software Society\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the System Engineering Software Society nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED.\n# IN NO EVENT SHALL SYSTEM ENGINEERING SOFTWARE SOCIETY BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nThe exportation of data is the final step in an analysis workflow. The analysis\nis performed and the result must to be exported to an additional data format\nfor presentation or visualisation. Or, Sympathy for Data has been used for\ndata management, where data from different source has been gathered and merged\ninto a joint structure that can be exported to different data format.\n\nThere exists exportation from the following internal data types:\n - :ref:`Export Tables`\n - :ref:`Export RAW Tables`\n - Text, to be implemented,\n - :ref:`Export ADAFs`\n\nThe exportation nodes are all based on the use of plugins, the same structure\nas the importation nodes. Each supported data format has its own plugin, and\nmay also have a specific GUI settings.\n\nAt the moment, exportation of Tables are supported to following data formats:\n - CSV\n - HDF5\n - SQL\n - SQLite\n - XLS\n - XLSX\n\nIn the separate node, :ref:`Export RAW Tables`, the internal structure of\nTables are exported into a single file, where data format is connected to\nSympathy with the extension .sydata.\n\nThe exportation nodes can also be used for storing partial results on disk.\nThe stored data can be reimplemented further ahead in the workflow by\nconnecting the outgoing datasources to an importation node.\n\nIf the input Table(s) has a plot attribute (as created by e.g.,\n:ref:`Plot Tables`) it can be exported to a separate file by selecting one of\nthe extensions in the output section.\n\"\"\"\nimport os\nimport itertools\n\nfrom sympathy.api import exporters\nfrom sympathy.api import datasource as dsrc\nfrom sympathy.api import table\nfrom sympathy.api import node as synode\nfrom sympathy.api.nodeconfig import Port, Ports, Tag, Tags\nfrom sympathy.api.exceptions import SyNodeError\nfrom sylib.plot import backend as plot_backends\nfrom sylib.plot import model as plot_models\n\n\nclass ExportTables(synode.Node):\n \"\"\"\n Export tables to a selected data format.\n\n :Inputs:\n **Tables** : Tables\n Tables with data to export.\n :Outputs:\n **Datasources** : Datasources\n Datasources with paths to the created files.\n :Configuration:\n **Exporter to use**\n Select data format exporter. Each data format has its own exporter\n with its own special configuration, see exporter information. The\n selection of exporter do also suggest filename extension.\n **Filename extension**\n Specify a new extension if you are not satisfied with the\n predefined one for the exporter.\n **Output directory**\n Specify/select directory where the created files will be stored.\n **Filename**\n Specify the common base for the filenames. If there are several\n incoming Tables the node will add \"_${index number of corresponding\n Table in the incoming list}\" after the base for each file. If\n nothing is specified the filename will be equal to index number.\n Do not specify extension.\n **Filename(s) preview** : button\n When pressed a preview of all filenames will be presented under the\n considered button.\n :Opposite node: :ref:`Tables`\n :Ref. nodes: :ref:`Export ADAFs`\n \"\"\"\n\n name = 'Export Tables'\n description = 'Export Tables'\n icon = 'export_table.svg'\n inputs = Ports([Port.Tables('Tables to be exported', name='port0')])\n outputs = Ports([Port.Datasources(\n 'Datasources of exported files', name='port0', scheme='text')])\n\n tags = Tags(Tag.Output.Export)\n author = 'Alexander Busck '\n copyright = '(c) 2013 Combine AB'\n nodeid = 'org.sysess.sympathy.export.exporttables'\n version = '0.1'\n\n parameters = synode.parameters()\n parameters.set_string('active_exporter')\n custom_exporter_group = parameters.create_group('custom_exporter_data')\n parameters.set_string(\n 'directory', value='.', label='Output directory',\n description='Select the directory where to export the files.',\n editor=synode.Util.directory_editor().value())\n parameters.set_string(\n 'filename', label='Filename',\n description='Filename without extension.')\n parameters.set_list(\n 'plot',\n label='Output separate plot file with the following extension:',\n description='If there is a plot attribute in the input tables(s), '\n 'create a separate file with the plot.',\n value=[0],\n plist=['-', 'eps', 'pdf', 'svg', 'png'],\n editor=synode.Util.combo_editor().value())\n\n def verify_parameters(self, node_context):\n parameter_root = synode.parameters(node_context.parameters)\n parameters_ok = \"\" != parameter_root.value_or_empty('active_exporter')\n return parameters_ok\n\n def exec_parameter_view(self, node_context):\n parameter_root = synode.parameters(node_context.parameters)\n export_params_widget = exporters.base.ExporterConfigurationWidget(\n exporters.utils.available_table_exporters(),\n parameter_root, node_context.input)\n widget = exporters.base.ExporterWidget(\n node_context, parameter_root, export_params_widget, table)\n return widget\n\n def execute(self, node_context):\n parameter_root = node_context.parameters\n exporter_type = parameter_root['active_exporter'].value\n filename = parameter_root.value_or_empty('filename')\n directory = parameter_root.value_or_empty('directory')\n if not os.path.isdir(directory):\n os.makedirs(directory)\n exporter_parameter_root = parameter_root[\n 'custom_exporter_data'][exporter_type]\n\n exporter = exporters.utils.table_exporter_factory(exporter_type)(\n exporter_parameter_root)\n # Create filenames from the parameter_root and the data available\n # as input. If active the exporter will use a specific filename\n # strategy when creating the filenames.\n\n fq_filenames = exporters.base.create_fq_filenames(\n directory, exporter.create_filenames(node_context.input, filename))\n\n if 'plot' in parameter_root:\n plot = parameter_root['plot'].selected\n plot = None if plot == '-' else plot\n else:\n plot = None\n\n if isinstance(fq_filenames, list):\n number_of_filenames = len(fq_filenames)\n else:\n number_of_filenames = None\n\n input_list = node_context.input['port0']\n datasource_list = node_context.output['port0']\n number_of_objects = len(input_list)\n\n exporter_class = (\n exporters.utils.table_exporter_factory(\n exporter_type))\n exporter_parameter_root = synode.parameters(\n node_context.parameters[\n 'custom_exporter_data'][exporter_type])\n\n exporter = exporter_class(exporter_parameter_root)\n\n if number_of_filenames is None:\n for object_no, (fq_outfilename, table_file) in enumerate(\n itertools.izip(fq_filenames, input_list)):\n\n if not os.path.isdir(os.path.dirname(fq_outfilename)):\n os.makedirs(os.path.dirname(fq_outfilename))\n datasource_file = dsrc.File()\n datasource_file.encode_path(fq_outfilename)\n datasource_list.append(datasource_file)\n\n try:\n exporter.export_data(table_file, fq_outfilename)\n except (IOError, OSError):\n raise SyNodeError(\n 'Unable to create file. Please check that you have '\n 'permission to write to the selected folder.')\n if plot is not None:\n plots_model = plot_models.get_plots_model(\n table_file)\n plot_exporter = plot_backends.ExporterBackend(\n plots_model, plot)\n plot_exporter.render(\n os.path.splitext(fq_outfilename)[0])\n\n self.set_progress(\n 100.0 * (1 + object_no) / number_of_objects)\n\n else:\n fq_outfilename = fq_filenames[0]\n datasource_file = dsrc.File()\n datasource_file.encode_path(fq_outfilename)\n datasource_list.append(datasource_file)\n\n exporter.export_data(input_list, fq_outfilename)\n\n if plot is not None:\n for table_file, i in zip(input_list, range(len(input_list))):\n plots_model = plot_models.get_plots_model(table_file)\n plot_exporter = plot_backends.ExporterBackend(\n plots_model, plot)\n filename = (\n os.path.splitext(fq_outfilename)[0] + '_' + str(i))\n plot_exporter.render(filename)\n\n self.set_progress(100)\n\n\nclass ExportRAWTables(synode.Node):\n \"\"\"\n Export tables to the internal data format .sydata.\n\n :Inputs:\n **Tables** : Tables\n Tables with data to export.\n :Outputs:\n **Datasources** : Datasource\n Datasource with paths to the created file.\n :Configuration:\n **Output directory**\n Specify/select directory where the created files will be stored.\n **Filename**\n Specify filename.\n :Opposite node: :ref:`RAW Tables`\n :Ref. nodes: :ref:`Export Tables`\n \"\"\"\n\n name = 'Export RAW Tables'\n description = 'Export RAW Tables'\n icon = 'export_table.svg'\n inputs = Ports([Port.Tables('Tables to be exported', name='port0')])\n outputs = Ports([Port.Datasource(\n 'Datasources of exported files', name='port0', scheme='text')])\n\n author = 'Alexander Busck '\n copyright = '(c) 2013 Combine AB'\n nodeid = 'org.sysess.sympathy.export.exportrawtables'\n version = '0.12a'\n tags = Tags(Tag.Output.Export)\n\n parameters = synode.parameters()\n parameters.set_string(\n 'directory', value='.', label='Output directory',\n description='Select the directory where to export the files.',\n editor=synode.Util.directory_editor().value())\n parameters.set_string(\n 'filename', label='Filename',\n description='Filename without extension.')\n\n def verify_parameters(self, node_context):\n parameter_root = synode.parameters(node_context.parameters)\n filename = parameter_root.value_or_empty('filename')\n return filename != ''\n\n def execute(self, node_context):\n parameter_root = synode.parameters(node_context.parameters)\n\n directory = parameter_root['directory'].value\n if not os.path.isdir(directory):\n os.makedirs(directory)\n filename = parameter_root['filename'].value\n fq_outfilename = '{}{}{}'.format(\n os.path.join(directory, filename), os.path.extsep, 'sydata')\n\n in_table_file = node_context.input['port0']\n with table.FileList(filename=fq_outfilename,\n mode='w') as out_table_file:\n out_table_file.extend(in_table_file)\n node_context.output['port0'].encode_path(fq_outfilename)\n","sub_path":"Library/Library/sympathy/export/node_export_tables.py","file_name":"node_export_tables.py","file_ext":"py","file_size_in_byte":12865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"536902135","text":"import streamlit as st\nimport yfinance as yf\n\ntickerSymbol = st.text_input('Symbol','GOOGL')\ntickerData = yf.Ticker(tickerSymbol)\ntickerDf = tickerData.history(period='1d', start='2010-5-31', end='2020-5-31')\n\nst.write(\"\"\"\n# Simple Stock Price App\nShown are the stock **closing price** and ***volume*** of \"\"\", tickerSymbol, \"\"\"!\n\"\"\")\n\nst.write(\"\"\"\n## Closing Price\n\"\"\")\nst.line_chart(tickerDf.Close)\nst.write(\"\"\"\n## Volume Price\n\"\"\")\nst.line_chart(tickerDf.Volume)\n","sub_path":"ap.py","file_name":"ap.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"378104603","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# write by caozhi, 2018-08-23, version:3.2\n# 用户信息管理系统\n\n\nfrom time import time\nimport pickle\n\ntry:\n with open('file', 'rb') as f:\n usermessage = pickle.load(f)\nexcept FileNotFoundError as eee:\n print(eee,'管理员用户文件异常')\n exit(1)\n\ntry:\n with open('message', 'rb') as a:\n userinfo = pickle.load(a)\nexcept FileNotFoundError as fff:\n print(fff,'用户信息文件异常')\n exit(1)\n\n# usermessage = {'name': 'admin', 'passwd': 'playbook', 'count': 3, 'lasttime': 1535080972.4686918}\n\n# userinfo = [\n# {'id': 1, 'name': 'name1', 'age': 20, 'tel': '132xxx', 'address': 'beijing'},\n# {'id': 2, 'name': 'name2', 'age': 20, 'tel': '132xxx', 'address': 'beijing'},\n# {'id': 3, 'name': 'name3', 'age': 20, 'tel': '132xxx', 'address': 'beijing'},\n# {'id': 5, 'name': 'name4', 'age': 20, 'tel': '132xxx', 'address': 'beijing'},\n# {'id': 8, 'name': 'name5', 'age': 20, 'tel': '132xxx', 'address': 'beijing'},\n# {'id': 9, 'name': 'name6', 'age': 20, 'tel': '132xxx', 'address': 'beijing'}\n# ]\n\nbreak_flag = 0\n\nwhile 1:\n if break_flag:\n break\n\n # 没有机会时,强制同步 最后登陆失败时时间戳\n now_time = time()\n usermessage['lasttime'] = now_time\n with open('file', 'wb') as a:\n pickle.dump(usermessage,a)\n # 判断可登录的剩余次数 是否=0\n count = usermessage.get('count')\n if count <= 0:\n lasttime = usermessage.get('lasttime')\n\n # 判断是否超过1天,如果少于1天则不允许登陆\n drop = now_time - lasttime\n if int(drop) > 60:\n usermessage['count'] = 3\n continue\n else:\n print('没有机会了~~~')\n print('\\033[31m请在 60秒后(为调试方便,使用60s,可自定义调整)重试, 或者联系我...\\033[0m')\n break_flag = 1\n break\n\n # 输入登陆信息\n user_name = input('\\033[33m 请输入你的姓名: \\033[0m').strip()\n password = input('\\033[33m 请输入你的密码: \\033[0m').strip()\n\n if user_name == usermessage['name'] and password == usermessage['passwd']:\n print('\\033[32m login success ---> 登陆成功 \\033[0m')\n\n # 登陆欢迎信息\n print('=' * 80)\n print('''\n\\033[31m欢迎来到某某信息管理系统 \\033[0m\n''')\n print('=' * 80)\n\n while 1:\n if break_flag:\n break\n print('''\n 执行操作的序号:\n 1、 插入一个用户信息.\n 2、 查询当前某个用户信息.\n 3、 展示所有用户信息.\n 4、 更新某个用户信息.\n 5、 删掉某个用户信息.\n 6、 退出系统,并保存所有操作.\n ''')\n\n # 输入对用户信息的操作 按数据库逻辑实现,id 为主键\n action = input('\\033[34m请输入需要执行操作的序号: \\033[0m').strip()\n\n # 添加用户信息\n if action == '1':\n insert_id = int(userinfo[-1]['id']) + 1\n insert_name = input('请输入增加的姓名: ').strip()\n if len(insert_name) < 1:\n print('Illegal,输入非法↓')\n continue\n try:\n insert_age = int(input('请输入年龄: ').strip())\n except:\n print('输入类型错误')\n continue\n else:\n if insert_age < 1 or insert_age > 200:\n print('Illegal,输入非法↓')\n continue\n insert_tel = input('Please enter add tel: ').strip()\n if len(insert_tel) < 7:\n print('Illegal,输入非法↓')\n continue\n insert_add = input('Please enter add address: ').strip()\n if len(insert_add) < 1:\n print('Illegal,输入非法↓')\n continue\n insert_dict = {'id': insert_id, 'name': insert_name, 'age': insert_age, 'tel': insert_tel, 'address': insert_add}\n userinfo.append(insert_dict)\n print('这是新增的信息,请核对:')\n print(userinfo[-1])\n\n # 查询某个用户信息\n elif action == '2':\n select_name = input('请输入用户姓名: ').strip()\n if len(select_name) < 1:\n print('Illegal,输入非法↓')\n continue\n select_flag = 0\n # [i for i in userinfo if i.get('name') == select_name]\n for i in userinfo:\n if i.get('name', None) == select_name:\n select_flag = 1\n print(i)\n if select_flag == 0:\n print('Sorry, 没有这个用户信息')\n\n # 显示所有用户信息,并分页展示,默认每页显示3条\n elif action == '3':\n if len(userinfo) % 3 == 0:\n max_page = (len(userinfo) // 3)\n else:\n max_page = (len(userinfo) // 3 + 1)\n while 1:\n\n try:\n page = int(input('请输入查看的页码 (0 是全部): ').strip())\n except:\n print('输入类型错误')\n else:\n if 0 < page <= max_page:\n for m in userinfo[3 * (page - 1):3 * page]:\n print(m)\n elif page == 0:\n for a in userinfo:\n print(a)\n else:\n print('超过了正常页数的范围,请重新输入页码. eg:1 -- %d ' % max_page)\n continue\n print()\n show_quit = input('是否要继续查看信息 (输入 \\'N或n\\' 则退出,否则继续): ').strip()\n if show_quit == 'N' or show_quit == 'n':\n break\n\n # 更新某个用户信息\n elif action == '4':\n try:\n update_id = int(input('请输入更新信息的id: ').strip())\n except:\n print('输入类型错误')\n continue\n update_flag = 0\n j = 0\n for m in userinfo:\n update_flag = 1\n if update_id == m.get('id'):\n update_name = input('请输入更新用户姓名: ').strip()\n if len(update_name) < 1:\n print('Illegal,输入非法↓')\n continue\n try:\n update_age = int(input('请输入更新用户年龄: ').strip())\n except:\n print('Illegal,输入非法↓')\n break\n else:\n if update_age < 1 or update_age > 200:\n print('Illegal,输入年龄非法↓')\n continue\n update_tel = input('请输入更新用户电话: ').strip()\n if len(update_tel) < 1:\n print('Illegal,输入非法↓')\n continue\n update_add = input('请输入更新用户地址: ').strip()\n if len(update_add) < 1:\n print('Illegal,输入非法↓')\n continue\n userinfo[j] = {'id': update_id, 'name': update_name, 'age': update_age, 'tel': update_tel, 'address': update_add}\n print(userinfo[j])\n j += 1\n if update_flag == 0:\n print('Sorry, 没有这个用户id')\n\n # 删除某个用户信息\n elif action == '5':\n try:\n delete_id = int(input('请输入要删除的用户id: ').strip())\n except:\n print('Illegal,输入非法↓')\n continue\n n = 0\n delete_flag = 0\n for k in userinfo:\n if delete_id == k.get('id'):\n userinfo.pop(n)\n delete_flag = 1\n print('用户信息删除成功')\n n += 1\n if delete_flag == 0:\n print('Sorry, 没有这个用户信息')\n\n # 退出整个系统\n elif action == '6':\n print()\n print('退出成功,bye-bye ~')\n break_flag = 1\n break\n else:\n print()\n print('你输入操作的动作非法')\n\n else:\n count -= 1\n with open('/home/caozhi/file', 'w') as f:\n f.write(str(count))\n usermessage['count'] = count\n print('用户信息错误,登陆失败,还有 %d 次机会' % count)\n\n# 退出系统 自动将修改的内容写到磁盘中\nwith open('message','wb') as m:\n pickle.dump(userinfo,m)\n","sub_path":"lesson03/caozhi/message_manage_system.py","file_name":"message_manage_system.py","file_ext":"py","file_size_in_byte":9370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"312195470","text":"from rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.db.models import Q\nfrom rest_framework.views import APIView\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom .filters import TransferHistoryUserFilter\n\nfrom accounts.models import Notification\nfrom .serializers import *\nfrom .models import *\nfrom rest_framework.generics import *\nfrom datetime import date\nfrom fcm_django.models import FCMDevice\nfrom django.conf import settings\n\n\n# update put, patch API\nclass CreateCashBoxView(viewsets.generics.UpdateAPIView):\n serializer_class = CreateCashBoxSerializer\n permission_classes = [IsAuthenticated]\n\n def update(self, request, *args, **kwargs):\n user = get_user_model().objects.get(id=request.data['user_id'])\n if user:\n profile = user.profile\n\n if profile.balance < request.data['amount']:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n profile.balance = profile.balance - request.data['amount']\n profile.withdrawn_balance = profile.withdrawn_balance + \\\n request.data['amount']\n profile.save()\n\n CashBox.objects.create(\n user=user,\n method=request.data['method'],\n operator=request.data['operator'],\n props_number=request.data['props_number'],\n amount=request.data['amount'],\n )\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CreateTransferView(viewsets.generics.UpdateAPIView):\n serializer_class = CreateTransferSerializer\n permission_classes = [IsAuthenticated]\n\n def update(self, request, *args, **kwargs):\n user = get_user_model().objects.get(id=request.data['user_id'])\n receiver = get_user_model().objects.get(\n username=request.data['username'])\n if user != receiver:\n if user:\n profile = user.profile\n if profile.balance < float(request.data['amount']):\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n profile.balance = profile.balance - request.data['amount']\n profile.withdrawn_balance = profile.withdrawn_balance + \\\n request.data['amount']\n profile.save()\n\n Transfer.objects.create(\n sender=user,\n receiver=request.data['username'],\n code=request.data['code'],\n amount=request.data['amount'],\n )\n receiver = get_user_model().objects.get(\n username=request.data['username'])\n device = FCMDevice.objects.filter(user=receiver)\n device_sender = FCMDevice.objects.get(user=user)\n device.send_message(title=\"Перевод💰\",\n body=f\"Пользователь {user.username} отправил(а) вам {request.data['amount']} баллов. Введите код ч��обы получить перевод.\",\n icon=settings.GLOBAL_HOST + profile.image.url,\n data={'type': '4'})\n Notification.objects.create(user=receiver, title=\"Перевод💰\",\n body=f\"Пользователь {user.username} отправил(а) вам {request.data['amount']} баллов. Введите код чтобы получить перевод.\",\n image=settings.GLOBAL_HOST + profile.image.url,\n type='4')\n device_sender.send_message(title=\"Перевод💰\",\n body=f\"Вы перевели пользователю {receiver.username} {request.data['amount']}\",\n icon=settings.GLOBAL_HOST + profile.image.url,\n data={'type': '3'})\n Notification.objects.create(user=user, title=\"Перевод💰\",\n body=f\"Вы перевели пользователю {receiver.username} {request.data['amount']}\",\n image=settings.GLOBAL_HOST + profile.image.url,\n type='3')\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n return Response({'message': 'You cant not transfer to yourself.'})\n\n\nclass CreatePromoCodeView(viewsets.generics.UpdateAPIView):\n serializer_class = CreatePromoCodeSerializer\n permission_classes = [IsAuthenticated]\n\n def update(self, request, *args, **kwargs):\n user = get_user_model().objects.get(id=request.data['user_id'])\n print(user.profile.agent)\n if user.profile.agent:\n PromoCode.objects.create(\n user=user,\n code=request.data['code'],\n )\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ReceiveTransferView(viewsets.generics.UpdateAPIView):\n serializer_class = ReceiveTransferSerializer\n permission_classes = [IsAuthenticated]\n\n def update(self, request, *args, **kwargs):\n transfer = Transfer.objects.filter(\n receiver=request.data['username']).filter(\n code=request.data['code']).filter(\n is_paid=False).first()\n user = get_user_model().objects.get(username=request.data['username'])\n if transfer and user:\n transfer.is_paid = True\n transfer.save()\n profile = user.profile\n profile.balance = profile.balance + transfer.amount\n profile.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass TransferHistoryUserView(viewsets.generics.ListAPIView):\n serializer_class = TransferHistorySerializer\n filter_backends = [DjangoFilterBackend]\n filterset_class = TransferHistoryUserFilter\n\n def get_queryset(self):\n user = get_user_model().objects.get(id=self.kwargs['user_id'])\n if user:\n queryset = Transfer.objects.filter(\n Q(sender=self.kwargs['user_id']) | Q(\n receiver=user.username)).order_by('-create_at')\n return queryset\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass TransferHistoryDetailView(viewsets.generics.RetrieveAPIView):\n serializer_class = TransferHistorySerializer\n\n def get_queryset(self):\n queryset = get_object_or_404(Transfer, pk=self.kwargs['trans_id'])\n return queryset\n\n\nclass CashBoxHistoryUserView(viewsets.generics.ListAPIView):\n serializer_class = CashBoxHistorySerializer\n\n def get_queryset(self):\n user = get_user_model().objects.get(id=self.kwargs['user_id'])\n if user:\n queryset = CashBox.objects.filter(user=self.kwargs['user_id'],\n create_at__gte=self.kwargs[\n 'from_date'],\n create_at__lte=self.kwargs[\n 'before_date'])\n return queryset\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass TransferNotificationUserView(viewsets.generics.ListAPIView):\n serializer_class = TransferHistorySerializer\n\n def get_queryset(self):\n user = get_user_model().objects.get(id=self.kwargs['user_id'])\n if user:\n queryset = Transfer.objects.filter(receiver=user.username).order_by(\n 'is_read', 'create_at').reverse()\n return queryset\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass AgentPromoCodesView(viewsets.generics.ListAPIView):\n serializer_class = AgentPromoCodesSerializer\n\n def get_queryset(self):\n user = get_user_model().objects.get(id=self.kwargs['user_id'])\n if user.profile.agent:\n queryset = PromoCode.objects.filter(user=user)\n return queryset\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UpdateTransferReadView(viewsets.generics.UpdateAPIView):\n serializer_class = UpdateTransferReadSerializer\n permission_classes = [IsAuthenticated]\n\n def update(self, request, *args, **kwargs):\n user = get_user_model().objects.get(id=request.data['user_id'])\n transfer = Transfer.objects.filter(id=request.data['transfer_id'],\n receiver=user.username).first()\n if transfer and request.data['read']:\n transfer.is_read = True\n transfer.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CreateDonateTransferView(APIView):\n serializer_class = CreateDonateTransferSerializer\n permission_classes = [IsAuthenticated]\n\n def post(self, request, *args, **kwargs):\n user = request.user\n video = Video.objects.get(id=request.data['video_id'])\n if user != video.owner:\n if user.profile.balance < float(request.data['amount']):\n return Response({'amount': 'In your balance does not enough'\n ' this amount for transfer'},\n status.HTTP_400_BAD_REQUEST)\n user.profile.balance -= float(request.data['amount'])\n user.profile.withdrawn_balance += float(request.data['amount'])\n video.owner.profile.balance += float(request.data['amount'])\n user.profile.save()\n video.owner.profile.save()\n Transfer.objects.create(\n sender=user,\n receiver=request.data['username'],\n amount=request.data['amount'],\n )\n device = FCMDevice.objects.filter(user=video.owner)\n device_sender = FCMDevice.objects.filter(user=user)\n device.send_message(title=\"Перевод💰\",\n body=f\"Пользователь {user.username} отправил(а) вам {request.data['amount']}\",\n icon=settings.GLOBAL_HOST + user.profile.image.url,\n data={'type': '4'})\n Notification.objects.create(user=video.owner, title=\"Перевод💰\",\n body=f\"Пользователь {user.username} отправил(а) вам {request.data['amount']}\",\n image=settings.GLOBAL_HOST + video.owner.profile.image.url,\n type='4')\n device_sender.send_message(title=\"Перевод💰\",\n body=f\"Вы перевели пользователю {video.owner.username} {request.data['amount']}\",\n icon=settings.GLOBAL_HOST + user.profile.image.url,\n data={'type': '3'})\n Notification.objects.create(user=user, title=\"Перевод💰\",\n body=f\"Вы перевели пользователю {video.owner.username} {request.data['amount']}\",\n image=settings.GLOBAL_HOST + user.profile.image.url,\n type='3')\n return Response(status.HTTP_200_OK)\n return Response({'message': 'Вы не можете пожертвовать себе'},\n status.HTTP_400_BAD_REQUEST)\n\n\nclass CreateDonateForCompanyView(viewsets.generics.CreateAPIView):\n serializer_class = CreateDonateForCompanySerializer\n permission_classes = [IsAuthenticated]\n","sub_path":"cashbox/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"387830332","text":"import json\nfrom difflib import get_close_matches\nfrom flask import Flask,jsonify\n\ndata = json.load(open(\"data.json\"))\napp = Flask(__name__)\n\n@app.route('/dict/', methods=['GET'])\ndef dict(word):\n result = find(word)\n return jsonify(result)\n\ndef find(word):\n try:\n return {\"definiton\":data[word]}\n except KeyError:\n if len(get_close_matches(word,data.keys(),cutoff =0.8))>0:\n return {\"match\":get_close_matches(word,data.keys(),cutoff =0.8)[0]}\n return {\"error\":\"Word does not exist!\"}\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"327517970","text":"import paho.mqtt.client as mqtt\nimport json\nimport Robot2\n\nMQTT_SERVER = \"roboberry\"\nMQTT_PATH = \"car_command\"\nLEFT_TRIM = 0\nRIGHT_TRIM = 0\n\nrobot = Robot2.Robot2(left_trim=LEFT_TRIM, right_trim=RIGHT_TRIM)\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \" + str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(MQTT_PATH)\n\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n print(\"msg: \" + msg.topic + \" \" + str(msg.payload))\n\n payload = str(msg.payload)\n print(payload)\n jsonCmd = json.loads(payload) # type: object\n cmd = jsonCmd['command']\n duration = jsonCmd['duration']\n if(cmd == \"forward\"):\n speed = jsonCmd[\"left_speed\"]\n robot.forward(speed, duration)\n\n elif(cmd == \"backwards\"):\n speed = jsonCmd[\"left_speed\"]\n robot.backward(speed, duration)\n\n elif(cmd == \"left\"):\n speed = jsonCmd[\"right_speed\"]\n robot.right(speed, 1)\n\n elif (cmd == \"right\"):\n speed = jsonCmd[\"left_speed\"]\n robot.left(speed, 1)\n\n elif (cmd == \"stop\"):\n robot.stop()\n\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(MQTT_SERVER, 1883, 60)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()","sub_path":"pi_motor/Adafruit-Motor-HAT-Python-Library/examples/mqttjson.py","file_name":"mqttjson.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"335920991","text":"import numpy as np\nimport scipy.stats as scist\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\ndef nordata(genedata=False, filename='/work/1000.csv', header=0, usecols1=[1],usecols2=[2], datatype='ind',\nmu1=30, sigma1=2, samplen1=50, sn1s=1, sn1b=1, \nmu2=28, sigma2=1, samplen2=50, sn2s=1, sn2b=1):\n #real data\n if genedata==False:\n \n data1=pd.read_csv(filename,header=header, usecols=usecols1)\n data2=pd.read_csv(filename,header=header, usecols=usecols2)\n data1=data1.values.flatten()\n data2=data2.values.flatten()\n w1,p1=scist.shapiro(data1)\n print ('data1: \\nmu:' + str(np.mean(data1))+ ' sigma:'+str(np.std(data1))+' median:'+str(np.median(data1))+ ' 25%:'+str(np.quantile(data1,0.25,interpolation='lower')) +' 75%:'+str(np.quantile(data1,0.75,interpolation='higher')))\n w2,p2=scist.shapiro(data2)\n print ('data2: \\nmu:' + str(np.mean(data2))+ ' sigma:'+str(np.std(data2))+' median:'+str(np.median(data2))+ ' 25%:'+str(np.quantile(data2,0.25,interpolation='lower')) +' 75%:'+str(np.quantile(data2,0.75,interpolation='higher')))\n \n print ('\\nPerform the Shapiro-Wilk test for normality: ', '\\ndata1:','\\nstatics: ' +str(w1),'\\np-value: ' +str(p1))\n print ('data2:','\\nstatics: ' +str(w2),'\\np-value: ' +str(p2))\n\n #fake data\n elif genedata==True:\n \n rangebegin1=(mu1-sn1s*sigma1)\n rangeend1=(mu1+sn1b*sigma1)\n ntip1=round(samplen1/4)\n tip1=np.random.uniform(rangebegin1,rangeend1,ntip1)\n data1=np.random.normal(mu1, sigma1,(samplen1-ntip1))\n data1=np.append(data1,tip1)\n data1=abs(data1)\n #np.savetxt('/home/jovyan/work/results/data1.txt',data1, fmt='%.6f', delimiter=',')\n #plt.subplot(121)\n #plt.hist(data1)\n w1,p1=scist.shapiro(data1)\n print ('data1: \\nmu:' + str(np.mean(data1))+ ' sigma:'+str(np.std(data1))+' median:'+str(np.median(data1))+ ' 25%:'+str(np.quantile(data1,0.25,interpolation='lower')) +' 75%:'+str(np.quantile(data1,0.75,interpolation='higher')))\n #print ('Perform the Shapiro-Wilk test for normality: ', '\\n\\ndata1:','\\nstatics: ' +str(w1),'\\np-value: ' +str(p1))\n \n\n rangebegin2=(mu2-sn2s*sigma2)\n rangeend2=(mu2+sn2b*sigma2)\n ntip2=round(samplen2/4)\n tip2=np.random.uniform(rangebegin2,rangeend2,ntip2)\n data2=np.random.normal(mu2, sigma2,(samplen2-ntip2))\n data2=np.append(data2,tip2)\n data2=abs(data2)\n #np.savetxt('/home/jovyan/work/results/data2.txt',data2, fmt='%.6f', delimiter=',')\n #plt.subplot(122)\n #plt.hist(data2)\n w2,p2=scist.shapiro(data2)\n print ('data2: \\nmu:' + str(np.mean(data2))+ ' sigma:'+str(np.std(data2))+' median:'+str(np.median(data2))+ ' 25%:'+str(np.quantile(data2,0.25,interpolation='lower')) +' 75%:'+str(np.quantile(data2,0.75,interpolation='higher')))\n \n print ('\\nPerform the Shapiro-Wilk test for normality: ', '\\ndata1:','\\nstatics: ' +str(w1),'\\np-value: ' +str(p1))\n print ('data2:','\\nstatics: ' +str(w2),'\\np-value: ' +str(p2))\n\n\n #normal distribution\n if p2>=0.05 and p1>=0.05:\n print ('\\nnormal distribution')\n # levene\n levenestatistic,levenepvalue=scist.levene(data1, data2)\n if datatype=='ind':\n \n if levenepvalue>=0.05:\n tteststatistic,ttestpvalue=scist.ttest_ind(data1,data2)\n print ('independent-sample t test')\n \n #Heterogeneity of variance\n elif levenepvalue<0.05:\n tteststatistic,ttestpvalue=scist.ttest_ind(data1,data2,equal_var = False)\n print (\"Heterogeneity of variance, use T' test\")\n \n print ('\\nstatics: ' +str(tteststatistic),'\\np-value: ' +str(ttestpvalue))\n \n elif datatype=='rel':\n tteststatistic,ttestpvalue=scist.ttest_rel(data1,data2)\n print ('paired-sample test','\\n\\nstatics: ' +str(tteststatistic),'\\np-value: ' +str(ttestpvalue))\n\n\n #non-normal distribution\n else:\n print ('\\nnon-normal distribution')\n \n if datatype=='ind':\n\n # wilcox秩序和检验\n if samplen1<20 or samplen2<20 :\n print ('\\nWilcoxon rank-sum test')\n wstatistic,wpvalue=scist.ranksums(data1,data2)\n print ('\\nstatics: ' +str(wstatistic),'\\np-value: ' +str(wpvalue))\n\n # Mann-Whitney U检验\n else:\n print ('\\nMann–Whitney U test')\n wstatistic,wpvalue=scist.mannwhitneyu(data1,data2)\n print ('\\nstatics: ' +str(wstatistic),'\\np-value: ' +str(wpvalue))\n\n elif datatype=='rel':\n # Wilcox检验\n print ('Wilcoxon signed-rank test ')\n wstatistic,wpvalue=scist.wilcoxon(data1,data2, zero_method='wilcox', correction=False)\n print ('\\nstatics: ' +str(wstatistic),'\\np-value: ' +str(wpvalue))\n\n return data1, data2\n\ndef violinconcat(crange,a0=([1,2,3]),b0=([1,2,3]),a1=([1,2,3]),b1=([1,2,3]),a2=([1,2,3]),b2=([1,2,3]),\na3=([1,2,3]),b3=([1,2,3]),a4=([1,2,3]),b4=([1,2,3]),a5=([1,2,3]),b5=([1,2,3])): #a0,b0,a1,b1\n \n numdata0=np.zeros(len(a0))\n numdata1=np.ones(len(b0))\n\n wholedata=[]\n #print (a0)\n for i in range(crange):\n locals()['measurement'+str(i)]=np.zeros(len(a0))+int(i)\n\n for j in range(crange):\n locals()['cdata'+str(j)]=pd.DataFrame({'group':numdata0,'measurement':locals()['measurement'+str(j)],'data': locals()['a'+str(j)]})\n \n locals()['cdatb'+str(j)]=pd.DataFrame({'group':numdata1,'measurement':locals()['measurement'+str(j)],'data': locals()['b'+str(j)]})\n \n wholedata.append(locals()['cdata'+str(j)])\n wholedata.append(locals()['cdatb'+str(j)])\n \n wholedata=pd.concat(wholedata,axis=0)\n return wholedata \n\ndef violinhalfplot(group,measurement,plotdata,wholedata,groupname=['A','B'],savefig=False,zerol='shit',onel='fuck off'):\n plt.figure(figsize=(12, 6),dpi=900)\n\n tips = sns.load_dataset('tips')\n ax=sns.violinplot(x=measurement, y=plotdata, hue=group,\n data=wholedata,split=True)\n\n legend = ax.legend() \n legend.texts[0].set_text(zerol)\n legend.texts[1].set_text(onel)\n\n plt.xlabel('Group', fontsize=12)\n plt.ylabel('Score', fontsize=12)\n groupsn=np.arange(0,(np.max(group)+1),1)\n\n plt.xticks(groupsn,groupname)\n\n if savefig==True:\n plt.savefig('/home/jovyan/work/results/viohalf.png')\n \n\ndef vioscahalfplot(group,measurement,plotdata,wholedata,groupname=['A','B'],savefig=False,zerol='shit',onel='fuck off',usetitle=False, newtitle = 'My title'):\n plt.figure(figsize=(12, 6),dpi=900)\n tips = sns.load_dataset('tips')\n axa=sns.violinplot(x=measurement, y=plotdata, hue=group,\n data=wholedata,split=True)\n\n axa=sns.stripplot(x=measurement, y=plotdata, hue=group, data=wholedata,\n dodge=True,split=True,\n jitter=True,palette=sns.color_palette(\"plasma_r\",n_colors=3), linewidth=1,\n alpha=0.35,marker='D',size=5, edgecolor='white') #h\n \n legend = axa.legend() \n legend.texts[0].set_text(zerol)\n legend.texts[1].set_text(onel)\n legend.texts[2].set_text(zerol)\n legend.texts[3].set_text(onel)\n \n if usetitle==True:\n legend.set_title(newtitle)\n\n plt.xlabel('Group', fontsize=12)\n plt.ylabel('Score', fontsize=12)\n groupsn=np.arange(0,(np.max(group)+1),1)\n\n plt.xticks(groupsn,groupname)\n\n if savefig==True:\n plt.savefig('/home/jovyan/work/results/vio&sca.png')\n\ndef normaldataconcat(a1,b1,a2,b2,a3,b3,a4,b4):\n numdata1=np.zeros(len(a1))\n numdata2=np.ones(len(b1))\n\n cdata1=pd.DataFrame({'group':numdata1,'data': a1})\n cdatb1=pd.DataFrame({'group':numdata2,'data': b1})\n\n cdata2=pd.DataFrame({'data': a2})\n cdatb2=pd.DataFrame({'data': b2})\n\n cdata3=pd.DataFrame({'data': a3})\n cdatb3=pd.DataFrame({'data': b3}) \n\n cdata4=pd.DataFrame({'data': a4})\n cdatb4=pd.DataFrame({'data': b4})\n\n\n dataf1=pd.concat([cdata1, cdatb1],axis=0)\n dataf2=pd.concat([cdata2, cdatb2],axis=0)\n dataf3=pd.concat([cdata3, cdatb3],axis=0)\n dataf4=pd.concat([cdata4, cdatb4],axis=0)\n\n wholedata=pd.concat([dataf1, dataf2, dataf3, dataf4],axis=1)\n np.savetxt('/home/jovyan/work/results/alldata.txt',wholedata, fmt='%.6f', delimiter=',')\n","sub_path":"Feature Engineering/def/plot/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":8455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"627535250","text":"# Made with python3\n# (C) @FayasNoushad\n# Copyright permission under MIT License\n# All rights reserved by FayasNoushad\n# License -> https://github.com/FayasNoushad/Image-Search-Bot/blob/main/LICENSE\n\nimport os\nimport requests\nfrom requests.utils import requote_uri\nfrom pyrogram import Client, filters\nfrom pyrogram.types import *\n\n\nBot = Client(\n \"Image-Search-Bot\",\n bot_token = os.environ[\"BOT_TOKEN\"],\n api_id = int(os.environ[\"API_ID\"]),\n api_hash = os.environ[\"API_HASH\"]\n)\n\nAPI = \"https://apibu.herokuapp.com/api/y-images?query=\"\n\n\n@Bot.on_message(filters.private & filters.command([\"start\"]))\nasync def start(bot, update):\n await update.reply_text(\n text=f\"Hello {update.from_user.mention}, I am an image search bot. You can use me in inline.\\n\\nMade by @MCC_LINKS\",\n disable_web_page_preview=True,\n quote=True\n )\n\n\n@Bot.on_message(filters.private & filters.text)\nasync def filter_text(bot, update):\n await update.reply_text(\n text=f\"Click the button below for searching your query.\\n\\nQuery: `{update.text}`\",\n reply_markup=InlineKeyboardMarkup(\n [\n [InlineKeyboardButton(text=\"Search Here\", switch_inline_query_current_chat=update.text)],\n [InlineKeyboardButton(text=\"Search in another chat\", switch_inline_query=update.text)]\n ]\n ),\n disable_web_page_preview=True,\n quote=True\n )\n\n\n@Bot.on_inline_query()\nasync def search(bot, update):\n results = requests.get(API + requote_uri(update.query)).json()[\"result\"][:50]\n answers = []\n for result in results:\n answers.append(\n InlineQueryResultPhoto(\n title=update.query,\n description=result,\n caption=\"Made by @MCC_LINKS\",\n photo_url=result\n )\n )\n await update.answer(answers)\n\n\nBot.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"641830500","text":"'''\r\nCreated on Sep 21, 2016\r\n\r\n@author: Jimbo\r\n'''\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nspread = np.random.rand(50) * 100\r\ncenter = np.ones(25) * 50\r\nflier_high = np.random.rand(10) * 100 + 100\r\nflier_low = np.random.rand(10) * -100\r\ndata = np.concatenate((spread, center, flier_high, flier_low), 0)\r\n\r\nplt.boxplot(data)\r\nplt.show()\r\n\r\n ","sub_path":"graph/box_plot.py","file_name":"box_plot.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"217140494","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 17 15:49:42 2019\n\n@author: Vivekanandan | Techvantage\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nfrom keras.preprocessing.image import img_to_array, load_img\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport cv2\n\n\n\nSIZE = (300, 300)\nIMAGE_PATH ='C:/Users/100119/Desktop/kyc_retrain/fire_base_images/aadhar/Aadhar - I.jpg'\nMODEL_PATH ='C:/Users/100119/Desktop/kyc_retrain/kyc_model_07_02_2020/kyc_model_tflite_07_02_2020.tflite'\ndef transform_image(size):\n # function for transforming images into a format supported by CNN\n x = load_img('aa.jpg', target_size=(size[0], size[1]) )\n x = img_to_array(x) / 255\n x = np.expand_dims(x, axis=0)\n return (x)\n\ndef show_image(image):\n plt.figure(figsize=(10,10))\n plt.imshow(image, aspect = 'auto')\n plt.show()\n\ndef model_test(image_path,model_path,size):\n interpreter = tf.lite.Interpreter(model_path=model_path)\n # interpreter = tf.lite.Interpreter(model_path='adhar_pan_license_17-10-19.tflite')\n interpreter.allocate_tensors()\n \n \n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n \n input_shape = input_details[0]['shape']\n \n #CLASSES = ['name','dob','gender','no','front','address','back']\n #CLASSES =['permit_no','kitas_front']\n \n CLASSES = [\"cow_eartag\", \"laptop\", \"mobile\", \"cup\", \"chair\",\n \"pen\", \"mouse\", \"monitor\", \"book\", \"bottle\", \"key_board\",\n \"tablet\", \"person\", \"eye_glass\",\"cpu\", \"adhar_front\", \"adhar_back\", \"pan\", \"license\",\"atm\",\"business_card\",\"office_id\",\"paper\",\"adhar_no\"]\n \n #CLASSES =['pan','pan_no','pan_name','pan_dob']\n \n COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n \n \n frame = cv2.imread(image_path)\n (h, w) = frame.shape[:2]\n cv2.imwrite('aa.jpg',frame)\n img = transform_image(size)\n \n interpreter.set_tensor(input_details[0]['index'], img)\n \n interpreter.invoke()\n output_data = interpreter.get_tensor(output_details[0]['index'])\n \n Result_Data = pd.DataFrame(output_data[0],columns=['y1','x1','y2','x2'])\n output_data = interpreter.get_tensor(output_details[1]['index'])\n Result_Data['class'] = list(output_data[0])\n output_data = interpreter.get_tensor(output_details[2]['index'])\n Result_Data['score'] = list(output_data[0])\n Result_Data['score'] = Result_Data['score'] * 100\n Result_Data['x1'] = (Result_Data['x1']*w).astype(int)\n Result_Data['y1'] = (Result_Data['y1']*h).astype(int)\n Result_Data['x2'] = (Result_Data['x2']*w).astype(int)\n Result_Data['y2'] = (Result_Data['y2']*h).astype(int)\n \n count =1\n for index,detection in Result_Data.iterrows():\n confidence = int(detection['score']) \n if confidence >10:\n idx = int(detection['class'])\n (startX, startY, endX, endY) = detection['x1'].astype(\"int\"),detection['y1'].astype(\"int\"),detection['x2'].astype(\"int\"),detection['y2'].astype(\"int\")\n crop=frame[startY:endY,startX:endX]\n cv2.imwrite('C:/Users/100119/Desktop/kyc_retrain/fire_base_images/' +str(count)+'.jpg',crop)\n \n print(CLASSES[idx],\"\\t Score : \",confidence)\n # if CLASSES[idx] == 'adhar_no':\n # cv2.rectangle(frame, (startX, startY), (endX, endY),(0,0,255), -1)\n \n count+=1\n \n cv2.imshow('crop',crop)\n cv2.waitKey(0)\n \n label = \"{}: {:.2f}%\".format(CLASSES[idx],confidence)\n cv2.rectangle(frame, (startX, startY), (endX, endY),COLORS[idx], 3)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(frame, label, (startX, y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n else:\n print(\"Not Found Any Object\")\n \n cv2.imshow(\"Frame\", cv2.resize(frame,(400,400)))\n cv2.waitKey(0)\n cv2.destroyAllWindows() \n \nmodel_test(IMAGE_PATH,MODEL_PATH,SIZE)","sub_path":"kyc_tflite_model _test_v2.py","file_name":"kyc_tflite_model _test_v2.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"54111554","text":"'''\n100. 给定两个二叉树,编写一个函数来检验它们是否相同。\n\n如果两个树在结构上相同,并且节点具有相同的值,则认为它们是相同的。\n'''\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:\n if not p and not q:\n return True\n if p and q and p.val == q.val:\n return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)\n return False\n\n \n def isSameTree2(self, p: TreeNode, q: TreeNode) -> bool:\n stack = [(q, p)]\n while stack:\n a, b = stack.pop()\n if not a and not b:\n continue\n if a and b and a.val == b.val:\n stack.append((a.left, b.left))\n stack.append((a.right, b.right))\n else:\n return False\n return True\n\n\n","sub_path":"100_isSameTree.py","file_name":"100_isSameTree.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"351203374","text":"# encoding=UTF-8\n\n# Copyright © 2009-2014 Jakub Wilk\n#\n# This package is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 dated June, 1991.\n\nfrom common import (\n case,\n)\n\nclass test(case):\n '''\n https://bugs.debian.org/508391\n fixed in [f5d4727b2490]\n '''\n def test(self):\n self.pdf2djvu().assert_()\n self.print_outline().assert_()\n\n# vim:ts=4 sts=4 sw=4 et\n","sub_path":"tests/test-empty-outline.py","file_name":"test-empty-outline.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"104165341","text":"\"\"\"\nMDB.\n\nhttps://github.com/GII/MDB\n\"\"\"\n\n# Python 2 compatibility imports\nfrom __future__ import absolute_import, division, print_function\nfrom future import standard_library\n\nstandard_library.install_aliases()\nfrom builtins import object\n\n# Library imports\nimport rospy\nfrom std_msgs.msg import Bool, Float64, Int16\n\n# MDB imports\nfrom mdb_robots_policies.srv import (\n BaxThrow,\n BaxGrab,\n BaxPush,\n BaxGrabBoth,\n BaxDropBoth,\n GetSense,\n BaxCheckReach,\n BaxGetCompleteSense,\n JoystickControl,\n BaxThrowRequest,\n BaxGrabRequest,\n BaxPushRequest,\n BaxGrabBothRequest,\n BaxDropBothRequest,\n JoystickControlRequest,\n)\nfrom mdb_common.srv import BaxChange, BaxChangeRequest\n\n\nclass policies_manager(object):\n def __init__(self, global_exp):\n self.global_exp = global_exp\n self.fixed_height = rospy.get_param(\"~fixed_height\")\n self.super_throw = rospy.get_param(\"~super_throw\")\n self.velocity = 0.85\n # Service Proxies\n self.bax_throw_clnt = rospy.ServiceProxy(\"/baxter/policy/throw\", BaxThrow)\n self.bax_grab_clnt = rospy.ServiceProxy(\"/baxter/policy/grab\", BaxGrab)\n self.bax_push_clnt = rospy.ServiceProxy(\"/baxter/policy/push\", BaxPush)\n self.bax_change_hands_clnt = rospy.ServiceProxy(\"/baxter/policy/change_hands\", BaxChange)\n self.bax_grab_both_clnt = rospy.ServiceProxy(\"/baxter/policy/grab_both\", BaxGrabBoth)\n self.bax_drop_both_clnt = rospy.ServiceProxy(\"/baxter/policy/drop_both\", BaxDropBoth)\n self.bax_ask_help_clnt = rospy.ServiceProxy(\"/baxter/policy/ask_for_help\", BaxChange)\n self.bax_joy_control_clnt = rospy.ServiceProxy(\"/baxter/policy/joystick\", JoystickControl)\n self.bax_drop_clnt = rospy.ServiceProxy(\"/baxter/policy/drop\", BaxGrab)\n self.bax_get_sense_clnt = rospy.ServiceProxy(\"/baxter/get_sense\", GetSense)\n self.bax_get_complete_sense_clnt = rospy.ServiceProxy(\"/baxter/get_complete_sense\", BaxGetCompleteSense)\n self.bax_check_far_reach_clnt = rospy.ServiceProxy(\"/baxter/check_far_reach\", BaxCheckReach)\n # Publishers\n self.super_throwing_pub = rospy.Publisher(\"/baxter_throwing/command\", Int16, queue_size=1)\n\n def select_sweep_angle(self, arg):\n options = {\"left\": -0.3925 - 0.11, \"right\": 0.3925 + 0.11}\n return options[arg]\n\n def choose_x_dimension(self, arg):\n options = {\"exp_box\": 0.12, \"exp_small_obj\": 0.03, \"exp_big_obj\": 0.07}\n return options[arg]\n\n def choose_y_dimension(self, arg):\n options = {\"exp_box\": 0.115, \"exp_small_obj\": 0.025, \"exp_big_obj\": 0.0675}\n return options[arg]\n\n def choose_push_dist(self, arg):\n options = {\"exp_box\": 0.075, \"exp_small_obj\": 0.025, \"exp_big_obj\": 0.0675}\n return options[arg]\n\n def choose_policy_srv(self, arg):\n options = {\n \"grasp_object\": self.bax_grab_clnt,\n \"grasp_with_two_hands\": self.bax_grab_both_clnt,\n \"change_hands\": self.bax_change_hands_clnt,\n \"sweep_object\": self.bax_push_clnt,\n \"put_object_in_box\": self.bax_grab_clnt,\n \"put_object_with_robot\": self.bax_grab_clnt,\n \"throw\": self.bax_throw_clnt,\n \"ask_nicely\": self.bax_ask_help_clnt,\n \"joystick\": self.bax_joy_control_clnt,\n \"drop_object\": self.bax_drop_clnt,\n }\n return options[arg]\n\n def choose_policy_req(self, arg):\n options = {\n \"grasp_object\": BaxGrabRequest(),\n \"grasp_with_two_hands\": BaxGrabBothRequest(),\n \"change_hands\": BaxChangeRequest(),\n \"sweep_object\": BaxPushRequest(),\n \"put_object_in_box\": BaxGrabRequest(),\n \"put_object_with_robot\": BaxGrabRequest(),\n \"throw\": BaxThrowRequest(),\n \"ask_nicely\": BaxChangeRequest(),\n \"joystick\": JoystickControlRequest(),\n \"drop_object\": BaxGrabRequest(),\n }\n return options[arg]\n\n def choose_arm(self, arg, left_grip, right_grip):\n if not left_grip and not right_grip:\n if arg > 0.0:\n return \"left\"\n elif arg < 0.0:\n return \"right\"\n else:\n return \"left\"\n elif left_grip and not right_grip:\n return \"left\"\n elif not left_grip and right_grip:\n return \"right\"\n elif left_grip and right_grip:\n return \"both\"\n\n def choose_throw_angle(self, arm, box_angle):\n if (arm == \"left\" and box_angle < 0.0) or (arm == \"right\" and box_angle > 0.0):\n return -1.0 * box_angle\n else:\n return box_angle\n\n def choose_throw_distance(self, arm, box_angle, box_dist):\n if (arm == \"left\" and box_angle < 0.0) or (arm == \"right\" and box_angle > 0.0):\n return 1.25\n else:\n return box_dist\n\n def is_same_side(self, arm, box_angle):\n if (arm == \"left\" and box_angle < 0.0) or (arm == \"right\" and box_angle > 0.0):\n return False\n else:\n return True\n\n def control_orientation(self, global_s, policy_code):\n if policy_code == \"change_hands\":\n self.global_exp.pan_to(\"front\", 0.1)\n elif policy_code == \"ask_nicely\" or (global_s.left_grip > 0.0 and global_s.right_grip > 0.0):\n self.global_exp.pan_to(\"front\", 0.1)\n elif global_s.left_grip > 0.0 or global_s.right_grip > 0.0: # Look to the box\n self.global_exp.pan_to_pos(global_s.box_sens.angle, 0.1)\n elif (\n policy_code != \"change_hands\" and not global_s.left_grip > 0.0 and not global_s.right_grip > 0.0\n ): # Look to the ball\n self.global_exp.pan_to_pos(global_s.obj_sens.angle, 0.1)\n\n def gripper_sense_data(self, global_s, arm):\n if arm == \"left\":\n return global_s.left_grip\n elif arm == \"right\":\n return global_s.right_grip\n\n def choose_sweep_height(self, obj):\n options = {\"exp_small_obj\": 0.03, \"exp_big_obj\": 0.1}\n return options[obj]\n\n def policy_grasp_object(self, policy_code, global_s, arm, srv):\n if self.global_exp.obj_type == \"exp_small_obj\" and (\"gripper_and_low_friction\" in self.global_exp.world):\n srv.object_position.const_dist = global_s.obj_sens.dist\n srv.object_position.angle = global_s.obj_sens.angle\n srv.object_position.height = self.fixed_height\n srv.orientation = \"current\"\n srv.arm = arm\n srv.scale = self.velocity\n self.global_exp.adopt_expression(\"focus\")\n if not self.gripper_sense_data(global_s, arm) > 0.0:\n resp = self.choose_policy_srv(policy_code)(srv).result\n return resp\n return False\n\n def policy_grasp_with_two_hands(self, policy_code, global_s, arm, srv):\n resp = False\n if (\n (global_s.obj_sens.angle < 0.3925)\n and (global_s.obj_sens.angle > -0.3925)\n and (global_s.obj_sens.dist > 0.47)\n and (global_s.obj_sens.dist < 0.75)\n and (global_s.left_grip < 1.0 and global_s.right_grip < 1.0)\n ):\n if self.global_exp.obj_type == \"exp_big_obj\" and (\"gripper_and_low_friction\" in self.global_exp.world):\n srv.sensorization.const_dist = global_s.obj_sens.dist\n srv.sensorization.angle = global_s.obj_sens.angle\n srv.sensorization.height = self.fixed_height\n srv.size = 0.08\n self.global_exp.adopt_expression(\"focus\")\n resp = self.choose_policy_srv(policy_code)(srv).result\n elif self.global_exp.world == \"no_gripper_and_high_friction\":\n srv.sensorization.const_dist = global_s.obj_sens.dist\n srv.sensorization.angle = global_s.obj_sens.angle\n srv.sensorization.height = self.fixed_height\n if self.global_exp.obj_type == \"exp_big_obj\":\n srv.size = 0.08\n else:\n srv.size = 0.06\n self.global_exp.adopt_expression(\"focus\")\n resp = self.choose_policy_srv(policy_code)(srv).result\n return resp\n\n def policy_change_hands(self, policy_code, global_s, arm, srv):\n if \"gripper_and_low_friction\" in self.global_exp.world:\n self.global_exp.pan_to(\"front\", 0.1)\n srv.request = True\n self.global_exp.adopt_expression(\"focus\")\n resp = self.choose_policy_srv(policy_code)(srv).result\n return resp\n return False\n\n def policy_sweep_object(self, policy_code, global_s, arm, srv):\n resp = False\n srv.obj_sens.const_dist = global_s.obj_sens.dist\n srv.obj_sens.angle = global_s.obj_sens.angle\n srv.obj_sens.height = self.fixed_height + 0.005 + self.choose_sweep_height(self.global_exp.obj_type)\n srv.dest_sens.const_dist = 0.70\n if self.global_exp.obj_type == \"exp_small_obj\" and (\"gripper_and_low_friction\" in self.global_exp.world):\n srv.dest_sens.angle = self.select_sweep_angle(arm)\n else:\n srv.dest_sens.angle = 0.0\n srv.dest_sens.height = self.fixed_height + 0.005 + self.choose_sweep_height(self.global_exp.obj_type)\n srv.radius = self.choose_push_dist(self.global_exp.obj_type) + 0.02\n srv.arm = arm\n srv.scale = self.velocity\n if \"gripper_and_low_friction\" in self.global_exp.world:\n srv.grip = True\n self.global_exp.adopt_expression(\"focus\")\n if not arm == \"both\":\n resp = self.choose_policy_srv(policy_code)(srv).result\n if resp:\n rospy.set_param(\"/check_reward\", True)\n self.global_exp.complete_pan_static()\n rospy.delete_param(\"/check_reward\")\n return resp\n\n def policy_put_object_in(self, policy_code, global_s, arm, srv):\n resp = False\n if (\n global_s.left_grip < 1.0 or global_s.right_grip < 1.0\n ) and \"gripper_and_low_friction\" in self.global_exp.world:\n if policy_code == \"put_object_in_box\": # Destination = Box\n srv.object_position.const_dist = global_s.box_sens.dist\n srv.object_position.angle = global_s.box_sens.angle\n else: # Destination = Robot (predefined)\n srv.object_position.const_dist = 0.47 + 0.03\n srv.object_position.angle = 0.0\n srv.object_position.height = self.fixed_height + self.choose_sweep_height(self.global_exp.obj_type)\n srv.orientation = \"current\"\n srv.arm = arm\n srv.scale = self.velocity\n self.global_exp.adopt_expression(\"focus\")\n if not (\n self.gripper_sense_data(global_s, arm) < 1.0\n or (not self.is_same_side(arm, global_s.box_sens.angle) and policy_code == \"put_object_in_box\")\n ):\n resp = self.choose_policy_srv(policy_code)(srv).result\n elif global_s.left_grip > 0.0 and global_s.right_grip > 0.0:\n srv = BaxDropBothRequest()\n if policy_code == \"put_object_in_box\": # Destination = Box\n srv.destination.const_dist = global_s.box_sens.dist\n srv.destination.angle = global_s.box_sens.angle\n else: # Destination = Robot (predefined)\n srv.destination.const_dist = 0.47 + 0.08\n srv.destination.angle = 0.0\n srv.destination.height = self.fixed_height + self.choose_sweep_height(\"exp_big_obj\")\n srv.size = 0.15\n self.global_exp.adopt_expression(\"focus\")\n resp = self.bax_drop_both_clnt(srv).result\n if resp:\n rospy.set_param(\"/check_reward\", True)\n self.global_exp.complete_pan_static()\n rospy.delete_param(\"/check_reward\")\n return resp\n\n def policy_throw(self, policy_code, global_s, arm, srv):\n resp = False\n if self.super_throw:\n srv.arm = \"right\"\n self.global_exp.adopt_expression(\"focus\")\n self.super_throwing_pub.publish(4)\n rospy.sleep(10)\n self.super_throwing_pub.publish(5)\n rospy.sleep(20)\n resp = self.choose_policy_srv(policy_code)(srv).result\n rospy.set_param(\"/check_reward\", True)\n self.global_exp.complete_pan_static()\n rospy.delete_param(\"/check_reward\")\n elif self.global_exp.obj_type == \"exp_small_obj\" and (\"gripper_and_low_friction\" in self.global_exp.world):\n srv.sensorization.const_dist = self.choose_throw_distance(\n arm, global_s.box_sens.angle, global_s.box_sens.dist\n )\n srv.sensorization.angle = self.choose_throw_angle(arm, global_s.box_sens.angle)\n srv.arm = arm\n self.global_exp.adopt_expression(\"focus\")\n resp = self.choose_policy_srv(policy_code)(srv).result\n if resp:\n rospy.set_param(\"/check_reward\", True)\n self.global_exp.complete_pan_static()\n rospy.delete_param(\"/check_reward\")\n return resp\n\n def policy_ask_nicely(self, policy_code, global_s, arm, srv):\n resp = False\n self.global_exp.pan_to(\"front\", 0.1)\n if not (self.gripper_sense_data(global_s, \"left\") > 0.0 or self.gripper_sense_data(global_s, \"right\") > 0.0):\n srv.request = True\n self.choose_policy_srv(policy_code)(srv)\n rospy.sleep(1)\n self.global_exp.adopt_expression(\"normal\")\n self.global_exp.complete_pan_static()\n resp = True\n return resp\n\n def policy_joystick(self, policy_code, global_s, arm, srv):\n srv.joystick_pos.const_dist = 0.6\n srv.joystick_pos.angle = 0.785\n srv.joystick_pos.height = 0.09\n srv.joystick_angle = 0.0\n srv.time_to_control = 0.0\n srv.arm_to_move = arm\n srv.velocity_scale = 1.0\n self.global_exp.adopt_expression(\"focus\")\n if not self.gripper_sense_data(global_s, arm) > 0.0:\n resp = self.choose_policy_srv(policy_code)(srv).result\n return resp\n return False\n\n def policy_drop_object(self, policy_code, global_s, arm, srv):\n resp = False\n srv.object_position.const_dist = global_s.box_sens.dist\n srv.object_position.angle = global_s.box_sens.angle\n srv.object_position.height = self.fixed_height + self.choose_sweep_height(self.global_exp.obj_type)\n srv.orientation = \"current\"\n srv.arm = arm\n srv.scale = self.velocity\n self.global_exp.adopt_expression(\"focus\")\n if self.gripper_sense_data(global_s, arm) >= 1.0:\n resp = self.choose_policy(policy_code)(srv).result\n if resp:\n rospy.set_param(\"/check_reward\", True)\n self.global_exp.complete_pan()\n rospy.delete_param(\"/check_reward\")\n return resp\n\n def choose_policy(self, arg):\n options = {\n \"grasp_object\": self.policy_grasp_object,\n \"grasp_with_two_hands\": self.policy_grasp_with_two_hands,\n \"change_hands\": self.policy_change_hands,\n \"sweep_object\": self.policy_sweep_object,\n \"put_object_in_box\": self.policy_put_object_in,\n \"put_object_with_robot\": self.policy_put_object_in,\n \"throw\": self.policy_throw,\n \"ask_nicely\": self.policy_ask_nicely,\n \"joystick\": self.policy_joystick,\n \"drop_object\": self.policy_drop_object,\n }\n return options[arg]\n\n def execute_policy(self, policy_code):\n global_s = self.bax_get_sense_clnt(Bool(True))\n arm = self.choose_arm(\n global_s.obj_sens.angle,\n self.gripper_sense_data(global_s, \"left\") > 0.0,\n self.gripper_sense_data(global_s, \"right\") > 0.0,\n )\n srv = self.choose_policy_req(policy_code)\n resp = None\n # self.control_orientation(global_s, policy_code)\n\n rospy.loginfo(\n \"Iteration %s > Executing policy %s with the %s arm\", self.global_exp.exp_iteration, policy_code, arm\n )\n self.global_exp.exp_iteration += 1\n\n # Execute policy\n resp = self.choose_policy(policy_code)(policy_code, global_s, arm, srv)\n\n # Publish the next sensorization\n rospy.loginfo(\"Publishing sensorization\")\n self.bax_get_complete_sense_clnt(\n Bool(True),\n Float64(self.choose_x_dimension(\"exp_box\")),\n Float64(self.choose_x_dimension(self.global_exp.obj_type)),\n )\n\n # If it failed\n if not resp:\n self.global_exp.adopt_expression(\"confused\")\n rospy.sleep(2)\n\n # Standard face\n self.global_exp.adopt_expression(\"normal\")\n rospy.loginfo(\"Success? : %s\", resp)\n return resp\n","sub_path":"mdb_robots_experiments/src/mdb_robots_experiments/policies_manager.py","file_name":"policies_manager.py","file_ext":"py","file_size_in_byte":16938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"270049042","text":"# PROBLEM DESCRIPTION:\n# Write a function to find the longest common prefix string amongst an array of strings.\n# If there is no common prefix, return an empty string \"\".\n\n# EXAMPLE:\n# INPUT: OUTPUT:\n# [\"flower\",\"flow\",\"flight\"] \"fl\"\n# \n# INPUT: OUTPUT \n# [\"dog\",\"racecar\",\"car\"] \"\"\n# EXPLANATION: There is no common prefix among the input strings.\n\n# NOTE:\n# All given inputs are in lowercase letters a-z.\n\nclass Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n if not strs:\n return \"\"\n \n curr_str = strs[0]\n res = \"\"\n for str in strs[1:]:\n max_len = min(len(str), len(curr_str))\n print(f\"str: {str}, curr_str: {curr_str}\")\n for i in range(max_len):\n if str[i] != curr_str[i]:\n break\n \n res += str[i]\n \n curr_str = res\n res = \"\"\n \n return curr_str\n ","sub_path":"leetcode/longest_common_prefix.py","file_name":"longest_common_prefix.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"455094721","text":"import requests\n\n\ndef compare_holiday_counts(country_code1, country_code2, year=2019):\n \"\"\"Returns the difference between the holiday counts of country 1 and country 2.\n\n Args:\n country_code1: The ISO 3166-1 alpha-2 country code for country 1.\n country_code2: The ISO 3166-1 alpha-2 country code for country 2.\n year: The year to use for the comparison.\n\n Returns:\n The number of holidays of country 1 minus the number of holidays of country 2.\n \"\"\"\n # build the link depending on the country that you are looking for\n link = 'https://date.nager.at/api/v2/publicholidays/{year}/{country_code}'\n link1 = link.format(year=year, country_code=country_code1)\n link2 = link.format(year=year, country_code=country_code2)\n\n # request information as json\n holidays1 = requests.get(link1).json()\n holidays2 = requests.get(link2).json()\n\n # calculate the difference\n return len(holidays1) - len(holidays2)\n\n\nif __name__ == '__main__':\n country_code1 = \"DE\"\n country_code2 = \"NI\"\n\n print(\"{} has {} more days of holidays than {}.\".format(country_code1,\n compare_holiday_counts(country_code1, country_code2), country_code2))\n","sub_path":"2019/12_Web_scraping/holiday_comparison.py","file_name":"holiday_comparison.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"64657616","text":"from peewee import (CharField, Check, DateField, DecimalField,\r\n ForeignKeyField, IntegerField, ManyToManyField, Model,\r\n SqliteDatabase)\r\n\r\ndatabase = SqliteDatabase('betsy_workshop.db', pragmas={\"foreign_keys\": 1})\r\n# When using sqlite pragma should be put on foreign_keys (only)\r\n\r\nclass BaseModel(Model):\r\n class Meta:\r\n database = database\r\n#if no primary key is defined an implicit primary key is added\r\n\r\nclass User(BaseModel):\r\n first_name = CharField()\r\n last_name = CharField()\r\n street = CharField()\r\n town = CharField()\r\n\r\nclass Tag(BaseModel):\r\n name = CharField()\r\n\r\nclass Product(BaseModel):\r\n product_name = CharField()\r\n description = CharField()\r\n price_per_unit = DecimalField(8, 2, True)\r\n tags = ManyToManyField(Tag)\r\n\r\nclass UserProduct(BaseModel):\r\n user_id = ForeignKeyField(User, backref='userproducts')\r\n product_id = ForeignKeyField(Product, backref='product_ids')\r\n number = IntegerField(constraints=[Check('number >=0')])\r\n\r\nclass Transaction(BaseModel):\r\n user_id = ForeignKeyField(User, backref='transactions')\r\n product_id = ForeignKeyField(Product, backref='products')\r\n number = IntegerField(constraints=[Check('number>0')])\r\n sell_date = DateField()\r\n sell_price = DecimalField(8, 2, True)\r\n\r\nclass ProductTag(BaseModel):\r\n tag = ForeignKeyField(Tag, backref='product_tags')\r\n product = ForeignKeyField(Product, backref='product_tags')\r\n\r\n#ProductTag = Product.tags.get_through_model()\r\n\r\ndef create_tables():\r\n with database:\r\n database.create_tables(\r\n [User, UserProduct, Product, Transaction, ProductTag, Tag]\r\n )\r\n\r\n ","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"384127481","text":"\t# INSTRUCTIONS\n\n\t# In case it is not clear, the Question appears first, then examples, then any hints and finally the function that you need to complete appears underneath:\n\n\t# \n\n\t# \n\n\t# \n\n\t# You are NOT allowed access to the internet for this assessment, instead you should use the DOCUMENTATION that comes bundled with your Python installation. You should already be comfortable accessing this documentation, but to summarise:\n\n\t# Access Python from you CLI\n\n\t# Type help() or for example help(str)\n\n\n\n\t# \n\t\n\t# Given a string, return a string where for every char in the original string, there are three chars.\n \n # \n\n\t# one(\"The\") → \"TTThhheee\"\n\t# one(\"AAbb\") → \"AAAAAAbbbbbb\"\n\t# one(\"Hi-There\") → \"HHHiii---TTThhheeerrreee\"\n\n\t# \n\t# How does a for loop iterate through a string?\n\ndef one(input):\n answer = \"\"\n for i in input:\n answer = answer + i*3\n return answer\n\n\t# \n\n # Write a function which returns the boolean True if the input is only divisible by one and itself.\n \n # The function should return the boolean False if not.\n\n\t# \n\n # two(3) → True\n # two(8) → False\n\n\t# \n\t# What operator will give you the remainder?\n\t# Use your CLI to access the Python documentation and get help manipulating strings - help(range).\n\ndef two(input):\n if input == 2:\n return True\n if input == 0 or input == 1:\n return False\n for i in range(2, input):\n if input == i:\n continue\n if input % i == 0:\n return False\n else:\n return True\n\n\n\t# \n\n # Write a function which takes an integer input, a, and returns the sum a+aa+aaa+aaaa.\n\n\t# So if 2 was the input, the function should return 2+22+222+2222 which is 2468.\n\n\t# \n\n\t# three(9) → 11106\n\t# three(5) → 6170\n\n\t# \n\t# What happens if you multiply a string by a number?\n\ndef three(a):\n String = str(a)\n first = int(String)\n second = int(String * 2)\n third = int(String * 3)\n fourth = int(String * 4)\n added = first + second + third + fourth\n return added\n\n\t# \n\n # Given two Strings of equal length, 'merge' them into one String.\n\n # Do this by 'zipping' the Strings together.\n\n # Start with the first char of the first String.\n # Then add the first char from the second String.\n # Then add the second char from the first String.\n # And so on.\n\n # Maintain case.\n\n # You will not encounter whitespace.\n \n # \n\n\t# four(\"String\",\"Fridge\") → \"SFtrriidngge\"\n\t# four(\"Dog\",\"Cat\") → \"DCoagt\"\n\t# four(\"True\",\"Tree\") → \"TTrrueee\" \n\t# four(\"return\",\"letter\") → \"rleettutrenr\"\n\n\t# \n\t# Use your CLI to access the Python documentation and get help manipulating strings - help(list.insert).\n\t# How would you seperate a string into characters?\n\ndef four(input1, input2):\n lst1 = []\n lst2 = []\n answer=[]\n for i in input1:\n lst1.append(i)\n for n in input2:\n lst2.append(n)\n for x in range(len(input1)):\n answer.append(lst1[x])\n answer.append(lst2[x])\n finalAnswer = \"\".join(answer) \n return finalAnswer\n\n\t# \n\n\t# Write a function to randomly generate a list with 5 even numbers between 100 and 200 inclusive.\n \n # \n \n # five() → [100,102,122,198,200]\n # five() → [108,104,106,188,200]\n # five() → [154,102,132,178,164]\n \n\t# \n\t# There is a module which can be used to generate random numbers, this module is called random.\n\t# The random module contains a function called randint.\n\nimport random\ndef five():\n answer = []\n lst= list(range(100,201,2))\n for i in range(5):\n number = random.choice(lst)\n answer.append(number)\n return answer\n\n\t# \n\n\t# Given a string, return the boolean True if it ends in \"py\", and False if not. \n\t\n\t# Ignore Case.\n\n\t# For Example:\n\n\t# six(\"ilovepy\") → True\n\t# six(\"welovepy\") → True\n\t# six(\"welovepyforreal\") → False\n\t# six(\"pyiscool\") → False\n\n\t# \n\t# There are no hints for this question.\n \ndef six(input):\n input = input.lower()\n if input[-2] == \"p\" and input [-1] == \"y\":\n return True\n else:\n return False\n\n\t# \n\n # Given three ints, a b c, one of them is small, one is medium and one is large. \n\t\n\t# Return the boolean True if the three values are evenly spaced, so the\n\t# difference between small and medium is the same as the difference between\n\t# medium and large. \n\t\n\t# Do not assume the ints will come to you in a reasonable order.\n \n # \n\n\t# seven(2, 4, 6) → True\n\t# seven(4, 6, 2) → True\n\t# seven(4, 6, 3) → False\n\t# seven(4, 60, 9) → False\n\n\t# \n\t# There is a function for lists called sort.\n\t# Use the cli to access the documentation help(list.sort)\n\ndef seven(a, b, c):\n lst = [a,b,c]\n sortedlst = sorted(lst)\n if sortedlst[1] - sortedlst[0] == sortedlst[2] - sortedlst[1]:\n return True\n else:\n return False\n\n\n\t# \n\n # Given a string and an integer, n, return a string that removes n letters from the 'middle' of the string.\n\t\n\t# The string length will be at least n, and be odd when the length of the input is odd, so there will always be a 'middle'.\n\n # \n\n\t# eight(\"Hello\", 3) → \"Ho\"\n\t# eight(\"Chocolate\", 3) → \"Choate\"\n\t# eight(\"Chocolate\", 1) → \"Choclate\"\n\n\t# \n # Use the cli to access the documentation help(str.replace)\n\ndef eight(input, a):\n input = list(input)\n for i in range(a):\n middle = int(len(input)/ 2)\n input.pop(middle)\n answer = \"\".join(input)\n return answer\n\n\t# \n\n # Given two string inputs, if one can be made from the other return the boolean True, if not return the boolean False.\n\n\t# \n\n # nine(\"god\", \"dog\") → True\n # nine(\"tree\", \"tiredest\") → True\n # nine(\"cat\", \"dog\") → False\n # nine(\"tripping\", \"gin\") → True\n\n\t# \n\t# There are no hints for this question.\n\ndef nine(string1, string2):\n lst1 = list(string1)\n lst2 = list(string2)\n lst2match = []\n lst1match = []\n for letter in lst1:\n for character in lst2:\n if letter == character:\n lst2match.append(character)\n lst2.remove(character)\n break\n lst2 = list(string2)\n for letter in lst2:\n for character in lst1:\n if letter == character:\n lst1match.append(character)\n lst1.remove(character)\n break\n print(lst1match, lst2match)\n lst1 = list(string1)\n lst2 = list(string2)\n if lst1 == lst2match or lst2 == lst1match:\n return True\n else:\n return False\n\n\t# \n\n # Write a function which takes 2 integers greater than 0, X,Y as input and generates a 2-dimensional array. \n\t\n\t# The element value in the i-th row and j-th column of the array should be i*j.\n\n\t# \n\n\t# ten(3,2) → [[0,0,0],[0,1,2]]\n\t# ten(2,1) → [[0,0]]\n\t# ten(3,4) → [[0,0,0],[0,1,2],[0,2,4],[0,3,6]]\n\n\t# \n\t# Think about nesting for loops.\n\ndef ten(X,Y):\n lst= []\n for i in range(0, Y):\n nestlst = []\n for n in range(0, X):\n nestlst.append(i*n)\n lst.append(nestlst)\n return lst","sub_path":"Python/Python Assessment 2/Code/python2.py","file_name":"python2.py","file_ext":"py","file_size_in_byte":7327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"240754056","text":"# Copyright (C) 2020 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"\nAdd Primary and Secondary contact rights to delete evidences.\n\nCreate Date: 2019-05-21 10:25:26.853322\n\"\"\"\n# disable Invalid constant name pylint warning for mandatory Alembic variables.\n# pylint: disable=invalid-name\n\nfrom ggrc.migrations.utils import acr_propagation\n\n# revision identifiers, used by Alembic.\nrevision = '87fa3c8cb442'\ndown_revision = 'b881918e8cdc'\n\n\nEVIDENCE_RUD = {\n \"Relationship R\": {\n \"Evidence RUD\": {},\n },\n}\n\nNEW_ROLES_PROPAGATION = {\n \"Primary Contacts\": EVIDENCE_RUD,\n \"Secondary Contacts\": EVIDENCE_RUD,\n}\n\nGGRC_NEW_ROLES_PROPAGATION = {\n \"Assessment\": NEW_ROLES_PROPAGATION,\n}\n\n\ndef upgrade():\n \"\"\"Upgrade database schema and/or data, creating a new revision.\"\"\"\n acr_propagation.propagate_roles(\n GGRC_NEW_ROLES_PROPAGATION,\n with_update=True\n )\n\n\ndef downgrade():\n \"\"\"Downgrade database schema and/or data back to the previous revision.\"\"\"\n raise NotImplementedError(\"Downgrade is not supported\")\n","sub_path":"src/ggrc/migrations/versions/20190521_87fa3c8cb442_add_rights_to_delete_evidence.py","file_name":"20190521_87fa3c8cb442_add_rights_to_delete_evidence.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"616555253","text":"from random import *\r\n\r\nclass Unit : \r\n def __init__(self,name,hp,speed) : \r\n self.name = name\r\n self.hp = hp\r\n self.speed = speed\r\n print(\"{0} 유닛이 생성되었습니다.\".format(name))\r\n\r\n def move(self,location) :\r\n print(\"{0} : {1} 방향으로 이동합니다. [속도:{2}]\".format(self.name, location, self.speed))\r\n\r\n def damaged(self, damage) : \r\n print(\"{0} : {1} 데미지를 입었습니다.\".format(self.name, damage))\r\n self.hp -= damage\r\n print(\"{0} : 현재 체력은 {1}입니다.\".format(self.name, self.hp))\r\n if self.hp <=0 :\r\n print(\"{0} : 파괴되었습니다\".format(self.name))\r\n\r\n\r\n\r\n\r\n# 공격 유닛\r\nclass AttackUnit(Unit) :\r\n def __init__(self,name,hp,speed,damage) :\r\n Unit.__init__(self,name,hp,speed) \r\n self.damage = damage\r\n\r\n def attack(self, location) :\r\n print(\"{0} : {1} 방향으로 적군을 공격합니다. [공격력 {2}]\"\\\r\n .format(self.name,location, self.damage))\r\n\r\n\r\n# 공중 유닛\r\nclass Flyable : \r\n def __init__(self, flying_speed) :\r\n self.flying_speed = flying_speed\r\n\r\n def fly(self, name, location) :\r\n print(\"{0} : {1} 방향으로 날아갑니다. [속도 {2}]\".format(name, location, self.flying_speed))\r\n\r\n\r\nclass FlyableAttackUnit(AttackUnit , Flyable) : \r\n def __init__(self,name,hp,damage,flying_speed) :\r\n AttackUnit.__init__(self, name, hp, 0, damage) ## 지상유닛은 공중스피드가 0\r\n Flyable.__init__(self, flying_speed)\r\n\r\n def move(self, location) : \r\n self.fly(self.name,location) ##여기서의 move는 Unit class의 move와는 다르다. 자식 class 내에서 새롭게 정의한 것.\r\n\r\n\r\n\r\n\r\n# 마린\r\nclass Marine(AttackUnit) : \r\n def __init__(self) : \r\n AttackUnit.__init__(self,\"마린\",40,1,5)\r\n\r\n def steampack(self) : ## 스팀팩 .체력 10 깎이며 이속,공속 증가.\r\n if self.hp>10 :\r\n self.hp -=10\r\n print(\"{0} : steampack을 사용합니다. (HP 10감소)\".format(self.name))\r\n else :\r\n print(\"{0} : 체력이 부족하여 스팀팩을 사용하지 않았습니다\".format(self.name))\r\n\r\n\r\n# 탱크 seize모드 존재. 이동불가, 공격력 증가. 시즈모드 on/off는 전 탱크 동일하게 적용된다고 가정해보자.\r\nclass Tank(AttackUnit) :\r\n seize_developed = False # 시즈모드 개발여부\r\n\r\n def __init__(self) : \r\n AttackUnit.__init__(self,\"탱크\",150,1,35)\r\n self.seize_mode = False\r\n\r\n def set_seize_mode(self) : \r\n if Tank.seize_developed == False : \r\n return # 시즈모드 미개발시 걍 나간다.\r\n\r\n # 현재 시즈모드가 아닐 때 -> 시즈모드\r\n if self.seize_mode == False :\r\n print(\"{0} : 시즈모드로 전환합니다.\".format(self.name))\r\n self.damage *=2\r\n self.seize_mode = True\r\n\r\n # 현재 시즈모드일 때 -> 시즈 풀림\r\n\r\n else :\r\n print(\"{0} : 시즈모드를 해제합니다.\".format(self.name))\r\n self.damage /=2\r\n self.seize_mode = False\r\n\r\n\r\n\r\nclass Wraith(FlyableAttackUnit) :\r\n def __init__(self) :\r\n FlyableAttackUnit.__init__(self,\"레이스\",80,20,5)\r\n self.clocked = False #클로킹모드 해제 중\r\n\r\n\r\n def clocking(self) :\r\n\r\n if self.clocked == True : #클로킹 모드이므로 클로킹 해제\r\n print(\"{0} : 클로킹 모드 해제합니다.\".format(self.name))\r\n self.clocked =False\r\n \r\n else : #클로킹 모드 해제 -> 설정\r\n print(\"{0} : 클로킹 모드 설정합니다\".format(self.name))\r\n self.clocked = True\r\n\r\n\r\n\r\ndef game_start() :\r\n print(\"[알림] 새로운 게임을 시작합니다.\")\r\n\r\ndef game_over() :\r\n print(\"Player : gg\")\r\n print(\"[Player] 님이 게임에서 퇴장하셨습니다.\")\r\n\r\n\r\n\r\n# 실제 게임 진행해보기\r\n\r\ngame_start()\r\n\r\n# 마린 3기 생성\r\nm1 = Marine()\r\nm2 = Marine()\r\nm3 = Marine()\r\n\r\n# 탱크 2기 생성\r\nt1 =Tank()\r\nt2 =Tank()\r\n\r\n# 레이스 1기 생성\r\nw1 = Wraith()\r\n\r\n\r\n#유닛 일괄 관리 (list 만들기)\r\nAttack_units = []\r\nAttack_units.append(m1)\r\nAttack_units.append(m2)\r\nAttack_units.append(m3)\r\nAttack_units.append(t1)\r\nAttack_units.append(t2)\r\nAttack_units.append(w1)\r\n\r\n# 전군 이동\r\n\r\nfor unit in Attack_units :\r\n unit.move(\"1시\")\r\n\r\n# 탱크 시즈모드 개발\r\nTank.seize_developed = True\r\nprint(\"[알림] 탱크 시즈모드 개발이 완료되었습니다.\")\r\n\r\n# 공격 모드 준비 ( 마린 : 스팀팩, 탱크 : 시즈모드, 레이스 : 클로킹 )\r\nfor unit in Attack_units : \r\n if isinstance(unit, Marine) : #지금 만들어진 객체가 어떤 class의 인스턴스 인지 확인하는 것\r\n unit.steampack()\r\n \r\n elif isinstance(unit, Tank) :\r\n unit.set_seize_mode()\r\n\r\n elif isinstance(unit,Wraith) :\r\n unit.clocking()\r\n\r\n\r\n# 전군 공격\r\nfor unit in Attack_units :\r\n unit.attack(\"1시\")\r\n\r\n# 전군 피해\r\nfor unit in Attack_units :\r\n unit.damaged(randint(5,21)) # 피해 랜덤하게 받음. (5~20)\r\n\r\n# 게임 종료\r\ngame_over()","sub_path":"나도코딩 6시간/21.1.30 6) 스타크래프트 전반전.py","file_name":"21.1.30 6) 스타크래프트 전반전.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"371490892","text":"'''\nPaige Eckstein\nApplied Discrete Mathematics\n4/5/17\nExample of Deferred Acceptance Algorithm\n(or Gale Shapely Algorithm)\n'''\n\n#==============================================================================\n#=============================Base Entity Class================================\n#==============================================================================\n# Base Entity for both students and instructors\n# contains three fields:\n# name\n# priorities: list of instructors to be matched with, from most to least desired\n# match: holds current Instructor the student is matched with\n\n\nclass BaseEntity:\n def __init__(self, name, priorities):\n self.name = name\n self.priorities = priorities\n self.match = None\n\n#==============================================================================\n#==============================Student Class===================================\n#==============================================================================\n# Inherits from BaseEntity, use BaseEntity's initializer for name and priorities\n# matchIndex field used to iterate through list of priorities\n\n\nclass Student(BaseEntity):\n def __init__(self, name, priorities):\n BaseEntity.__init__(self, name, priorities)\n self.matchIndex = 0\n\n # goal is next instructor in priority list\n def nextMatch(self):\n goal = self.priorities[self.matchIndex]\n self.matchIndex += 1\n return goal\n\n#==============================================================================\n#==============================Instructor Class================================\n#==============================================================================\n# Inherits from BaseEntity, use BaseEntity's initializer for name and priorities\n# ranking works as reverse lookup to save time\n\n\nclass Instructor(BaseEntity):\n def __init__(self, name, priorities):\n BaseEntity.__init__(self, name, priorities)\n self.ranking = {}\n for rank in range(len(priorities)):\n self.ranking[priorities[rank]] = rank\n\n # returns true if the instructor does not currently have a match\n # or if the student currently asking to be matched is of higher priority\n # than the instructor's currently matched student\n def evaluateMatch(self, matcher):\n return self.match == None or self.ranking[matcher] < self.ranking[self.match]\n\n#==============================================================================\n#==============================Main functions==================================\n#==============================================================================\n# gets list of individuals with their ranked priority lists\n\n\ndef getInfo(type):\n entities = []\n totalEntities = int(\n input(\"How many \" + type + \"s will you be entering?\\n\"))\n print(\"Enter each \" + type + \" in the format Name : pref1, pref2, pref3,...,\")\n for _ in range(0, totalEntities):\n fullInfo = input().split(':')\n name = fullInfo[0].strip()\n if name:\n priorities = fullInfo[1].strip().split(',')\n for i in range(len(priorities)):\n priorities[i] = priorities[i].strip()\n entities.append((name, priorities))\n return entities\n\n# print student, instructor pairings\n\n\ndef printPairings(students):\n print()\n for student in students.values():\n print(student.name, 'is paired with', str(student.match))\n\n#==============================================================================\n#====================================Main======================================\n#==============================================================================\n\n\ndef main():\n # get list of student's along with their respective lists of priorities\n studentList = getInfo(\"student\")\n\n # dictionary to hold Student objects\n students = dict()\n\n # fill students dictionary with Student objects, where the student's name\n # is the key and a Student object is initialized as the value\n for student in studentList:\n students[student[0]] = Student(student[0], student[1])\n\n # unmatchedStudents initially are all of them, so set to all student names\n unmatchedStudents = list(students.keys())\n\n # get list of instructor's along with their respective lists of priorities\n instructorList = getInfo(\"instructor\")\n\n # dictionary to hold Instructor objects\n instructors = dict()\n\n # fill instructors dictionary with Instructor objects, where the instructor's\n # name is the key and an Instructor object is initialized as the value\n for instructor in instructorList:\n instructors[instructor[0]] = Instructor(instructor[0], instructor[1])\n\n print()\n\n # main part of algorithm\n while unmatchedStudents:\n # current student to match is the first student in the list of\n # unmatched students\n currentStudent = students[unmatchedStudents[0]]\n\n # current instructor to match is the instructor next in the current\n # student's priority list\n currentInstructor = instructors[currentStudent.nextMatch()]\n\n print(currentStudent.name, 'offers match with', currentInstructor.name)\n\n # evaluate match\n # successful match:\n if currentInstructor.evaluateMatch(currentStudent.name):\n print(' ', currentInstructor.name, 'accepts the match.')\n\n # if the current instructor already has a match with a student\n # unmatch them, and return the student to the unmatchedStudents list\n if currentInstructor.match:\n oldStudent = students[currentInstructor.match]\n oldStudent.match = None\n unmatchedStudents.append(oldStudent.name)\n\n # remove the newly matched student from unmatchedStudents\n unmatchedStudents.remove(currentStudent.name)\n\n # set each entities match to the appropriate name\n currentInstructor.match = currentStudent.name\n currentStudent.match = currentInstructor.name\n\n # unsuccessful match:\n else:\n print(' ', currentInstructor.name, 'rejects the match.')\n\n # print tentative matches after each round\n print()\n print(\"Tentative Matches:\")\n printPairings(students)\n print()\n\n # once there are no elements in unmatchedStudents, algorithm is complete\n # print out final matches\n print(\"Final Matches:\")\n printPairings(students)\n\n\nmain()\n","sub_path":"AppliedDiscreteMath/Deferred Acceptance Algorithm/daa3.py","file_name":"daa3.py","file_ext":"py","file_size_in_byte":6486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"135476408","text":"#!/usr/bin/env python3\n# A simple \"chat\" program to demonstrate the use of ZeroMQ interface\n# with the modem application.\n# Since the modem currently supports only fixed-length frames,\n# this program uses the first byte of a frame to indicate the\n# length of the payload and pads the rest with a constant byte.\n\nFRAME_LENGTH = 223\nPAD_BYTE = b'.'\n\nimport zmq, threading\nctx = zmq.Context()\n\nrx = ctx.socket(zmq.SUB)\nrx.connect(\"tcp://localhost:43300\")\nrx.setsockopt(zmq.SUBSCRIBE, b\"\")\n\ntx = ctx.socket(zmq.PUB)\ntx.connect(\"tcp://localhost:43301\")\n\ndef rx_loop():\n\twhile True:\n\t\trxframe = rx.recv()\n\t\tprint(rxframe[1 : 1+rxframe[0]].decode('utf-8','ignore'))\n\nrxthread = threading.Thread(target=rx_loop, daemon=True)\nrxthread.start()\n\nwhile True:\n\ttxtext = input()\n\t# Reserve space for the length byte\n\ttxpayload = txtext.encode('utf-8','ignore')\n\n\t# Truncate a too long payload, pad a too short one.\n\t# Also add the length byte.\n\tif len(txpayload) >= FRAME_LENGTH-1:\n\t\ttxframe = bytes([FRAME_LENGTH-1]) + txpayload[0:FRAME_LENGTH-1]\n\telse:\n\t\ttxframe = bytes([len(txpayload)]) + txpayload + \\\n\t\t\tPAD_BYTE * (FRAME_LENGTH-1-len(txpayload))\n\n\ttx.send(txframe)\n","sub_path":"example/zmq_example.py","file_name":"zmq_example.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"99136434","text":"#!/usr/local/bin/python\n\n\"\"\"\nThis script starts all the service classes from config.yml\nComplete configuration is passed through to each service\n\"\"\"\n#https://pythonspot.com/login-to-flask-app-with-google/\nimport signal\nimport shlex\nimport importlib\nimport subprocess\nimport time\nimport os\nimport pathlib\nimport urllib\nimport sys\nimport yaml\nimport argparse\nimport paho.mqtt.client as mqtt\nimport json\nimport random\nimport string\nimport logging\nimport asyncio\nimport os\n#from hbmqtt.broker import Broker\nfrom flask import Flask, redirect, url_for, cli, redirect\nfrom flask_dance.contrib.google import make_google_blueprint, google\nfrom subprocess import call, run\nimport AuthService\nimport WebService\nfrom dotenv import load_dotenv\nload_dotenv()\nfrom rasa.train import train\nfrom rasa.nlu.convert import convert_training_data\n \n# threads are used for external processes - mqtt, rasa, rasa action server, web server\nfrom ThreadHandler import ThreadHandler\nTHREAD_HANDLER = ThreadHandler()\n\nPARSER = argparse.ArgumentParser(description=\"Run Hermod voice suite\")\n\n\n\n# PARSER.add_argument('-sd', '--speakerdevice', type=str, default='',\n\t\t\t\t\t# help=\"Alsa device name for speaker\")\n \n# PARSER.add_argument('-md', '--microphonedevice', type=str, default='',\n\t\t\t\t\t# help=\"Alsa device name for microphone\")\n\nPARSER.add_argument('-m', '--mqttserver',action='store_true',\n\t\t\t\t\thelp=\"Run MQTT server\")\n \n \nPARSER.add_argument('-r', '--rasaserver', action='store_true',\n\t\t\t\t\thelp=\"Run RASA server\")\n \nPARSER.add_argument('-t', '--train', action='store_true',\n\t\t\t\t\thelp=\"Train RASA models when starting local RASA server\")\n \nPARSER.add_argument('-w', '--webserver', action='store_true',\n\t\t\t\t\thelp=\"Run hermod web server\")\n \nPARSER.add_argument('-a', '--actionserver', action='store_true',\n\t\t\t\t\thelp=\"Run local rasa_sdk action server\")\n \nPARSER.add_argument('-d', '--hermod', action='store_true', default=False,\n\t\t\t\t\thelp=\"Start hermod services\")\n \nPARSER.add_argument('-sm', '--satellite', action='store_true', default=False,\n\t\t\t\t\thelp=\"Only start hermod local audio and hotword services\") \n\nPARSER.add_argument('-nl', '--nolocalaudio', action='store_true', default=False,\n\t\t\t\t\thelp=\"Dont start hermod local audio or hotword service\") \n\n#cli.load_dotenv(path=os.path.dirname(__file__))\n\nARGS = PARSER.parse_args()\n#print(ARGS) \n\n\nF = open(os.path.join(os.path.dirname(__file__), 'config-all.yaml'), \"r\")\nCONFIG = yaml.load(F.read(), Loader=yaml.FullLoader)\n\n# F = open(os.path.join(os.path.dirname(__file__), 'secrets.yaml'), \"r\")\n# secrets = yaml.load(F.read(), Loader=yaml.FullLoader)\n# if not secrets: secrets = {}\n\nif ARGS.webserver > 0:\n # TODO dev mode rebuild web - (NEED docker rebuild with npm global watchify)\n # watchify index.js -v -o static/bundle.js\n THREAD_HANDLER.run(WebService.start_server,{'config':CONFIG})\n\n\n\n# start rasa action server\ndef start_rasa_action_server(run_event):\n print('START RASA ACTIONS SERVER')\n cmd = ['python','-m','rasa_sdk','--actions','actions','-vv'] \n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False, cwd=os.path.join(os.path.dirname(__file__),'../rasa'))\n while run_event.is_set():\n time.sleep(1)\n p.terminate()\n p.wait()\n \nif ARGS.actionserver > 0:\n THREAD_HANDLER.run(start_rasa_action_server)\n\n# start rasa server\ndef start_rasa_server(run_event):\n print('START RASA SERVER')\n if os.getenv('RASA_ACTIONS_URL') and len(os.getenv('RASA_ACTIONS_URL')) > 0:\n # ensure rasa endpoints file matches RASA_ACTIONS_URL env var\n endpoints_file = open(os.path.join(os.path.dirname(__file__), '../rasa/endpoints.yml'), \"r\")\n endpoints = yaml.load(endpoints_file.read(), Loader=yaml.FullLoader)\n print('ENDPOINTS')\n print(endpoints)\n endpoints['action_endpoint']={\"url\":os.getenv('RASA_ACTIONS_URL')}\n # write updates\n with open(os.path.join(os.path.dirname(__file__), '../rasa/endpoints.yml'),'w') as outfile:\n yaml.dump(endpoints,outfile, default_flow_style = False)\n print('ENDPOINTS WRITTEN')\n \n cmd = ['rasa','run','--enable-api'] \n # '--debug',,'--model','models'\n # p2 = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, cwd=os.path.join(os.path.dirname(__file__),'../rasa'), env={'RASA_ACTIONS_URL':os.getenv('RASA_ACTIONS_URL')})\n \n p2 = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False, cwd=os.path.join(os.path.dirname(__file__),'../rasa'))\n while run_event.is_set():\n time.sleep(1)\n p2.terminate()\n p2.wait()\n\ndef train_rasa():\n print('TRAIN RASA')\n \n cmd = ['npx chatito --format rasa data/']\n p = call(cmd, shell=True, cwd=os.path.join(os.path.dirname(__file__),'../rasa/chatito'))\n \n convert_training_data(data_file=os.path.join(os.path.dirname(__file__),'../rasa/chatito/rasa_dataset_training.json'), out_file=os.path.join(os.path.dirname(__file__),'../rasa/chatito/nlu.md'), output_format=\"md\", language=\"\")\n\n \n train(\n domain= os.path.join(os.path.dirname(__file__),'../rasa/domain.yml'),\n config= os.path.join(os.path.dirname(__file__),'../rasa/config.yml'),\n training_files= [os.path.join(os.path.dirname(__file__),'../rasa/data/nlu.md'),os.path.join(os.path.dirname(__file__),'../rasa/data/stories.md'),os.path.join(os.path.dirname(__file__),'../rasa/chatito/nlu.md')],\n output= os.path.join(os.path.dirname(__file__),'../rasa/models')\n )\n \n\n \nif not os.environ.get('RASA_ACTIONS_URL'):\n os.environ['RASA_ACTIONS_URL'] = 'http://localhost:5055/webhook'\nif not os.environ.get('DUCKLING_URL'):\n os.environ['DUCKLING_URL'] = 'http://localhost:8000'\n \nif ARGS.train:\n train_rasa()\n \nif ARGS.rasaserver and CONFIG['services'].get('RasaService',False):\n THREAD_HANDLER.run(start_rasa_server)\n \n# use recent version of mosquitto\ndef start_mqtt_server(run_event):\n print('START MQTT SERVER')\n # /app/mosquitto-1.6.9/src/\n cmd = ['mosquitto','-v','-c','/etc/mosquitto/mosquitto.conf'] \n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)\n while run_event.is_set():\n time.sleep(1)\n p.terminate()\n p.wait()\n\ndef start_secure_mqtt_server(run_event):\n print('START SECURE MQTT SERVER')\n cmd = ['mosquitto','-v','-c','/etc/mosquitto/mosquitto-ssl.conf'] \n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)\n while run_event.is_set():\n time.sleep(1)\n p.terminate()\n p.wait()\n \n# send HUP signal to mosquitto when password file is updated \ndef start_mqtt_auth_watcher(run_event):\n print('START MQTT WATCHER')\n #os.path.join(os.path.dirname(__file__),\n cmd = ['/app/src/mosquitto_watcher.sh']\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) # , cwd=os.path.join(os.path.dirname(__file__))\n while run_event.is_set():\n time.sleep(1)\n p.terminate()\n p.wait()\n\n \nif ARGS.mqttserver > 0:\n # ensure admin password \n if os.getenv('MQTT_USER') is not None:\n CONFIG['mqtt_user'] = os.getenv('MQTT_USER')\n if os.getenv('MQTT_PASSWORD') is not None:\n CONFIG['mqtt_password'] = os.getenv('MQTT_PASSWORD')\n print('PRESET ADMIN PASSWORD TO MOSQ DB')\n print(CONFIG)\n cmd = ['/usr/bin/mosquitto_passwd','-b','/etc/mosquitto/password',CONFIG['mqtt_user'],CONFIG['mqtt_password']] \n p = call(cmd)\n print('SET ADMIN PASSWORD TO MOSQ DB')\n cmd2 = ['/app/src/update_acl.sh' , CONFIG['mqtt_user']]\n p = call(cmd2, shell=True, cwd=os.path.join(os.path.dirname(__file__)))\n print('SET ADMIN USER IN ACL')\n if os.environ.get('SSL_CERTIFICATES_FOLDER') and os.path.isfile(os.environ.get('SSL_CERTIFICATES_FOLDER')+'/cert.pem') and os.path.isfile(os.environ.get('SSL_CERTIFICATES_FOLDER')+'/fullchain.pem') and os.path.isfile(os.environ.get('SSL_CERTIFICATES_FOLDER')+'/privkey.pem'):\n # use mosquitto conf template to rewrite mosquitto conf file including env SSL_CERTIFICATES_FOLDER\n cmd = ['/app/src/update_ssl.sh' + ' ' + os.environ.get('SSL_CERTIFICATES_FOLDER')]\n p = call(cmd, shell=True, cwd=os.path.join(os.path.dirname(__file__)))\n THREAD_HANDLER.run(start_secure_mqtt_server)\n else:\n THREAD_HANDLER.run(start_mqtt_server)\n THREAD_HANDLER.run(start_mqtt_auth_watcher)\n\t\n\t# # use hbmqtt\n\t# config = {\n\t\t# 'listeners': {\n\t\t\t# 'default': {\n\t\t\t\t# 'type': 'tcp',\n\t\t\t\t# 'bind': '0.0.0.0:1883',\n\t\t\t# },\n\t\t\t# 'ws-mqtt': {\n\t\t\t\t# 'bind': '127.0.0.1:8080',\n\t\t\t\t# 'type': 'ws',\n\t\t\t\t# 'max_connections': 30,\n\t\t\t# },\n\t\t# },\n\t\t# 'sys_interval': 10,\n\t\t# 'auth': {\n\t\t\t# 'allow-anonymous': True,\n\t\t\t# #'password-file': os.path.join(os.path.dirname(os.path.realpath(__file__)), \"passwd\"),\n\t\t\t# 'plugins': [\n\t\t\t\t# 'auth_file', 'auth_anonymous'\n\t\t\t# ]\n\t\t# },\n\t\t# 'topic-check': {\n\t\t\t# 'enabled': False\n\t\t# }\n\t# }\t\n\t\n\t# @asyncio.coroutine\n\t# def broker_coro():\n\t\t# broker = Broker(config)\n\t\t# yield from broker.start()\n\n\t# def start_mqtt_server(run_event):\n\t\t# print('START MQTT SERVER')\n\t\t# #asyncio.new_event_loop()\n\t\t# ioloop = asyncio.new_event_loop()\n\t\t# ioloop.run_until_complete(broker_coro())\n\t\t# ioloop.run_forever()\n\t\t\n\t# THREAD_HANDLER.run(start_mqtt_server)\n\n\n\nasync def async_start_hermod():\n # start hermod services as asyncio events in an event loop\n SERVICES = []\n print('START HERMOD SERVICES')\n MODULE_DIR = os.getcwd()\n sys.path.append(MODULE_DIR)\n \n # OVERRIDE CONFIG\n # admin mqtt connection\n if os.getenv('MQTT_HOSTNAME') is not None:\n CONFIG['mqtt_hostname'] = os.getenv('MQTT_HOSTNAME')\n # MQTT host from args\n # if len(ARGS.mqttserver_host) > 0 :\n # CONFIG['mqtt_hostname']= ARGS.mqttserver_host\n\n if os.getenv('MQTT_PORT') is not None:\n CONFIG['mqtt_port'] = int(os.getenv('MQTT_PORT'))\n if os.getenv('MQTT_USER') is not None:\n CONFIG['mqtt_user'] = os.getenv('MQTT_USER')\n if os.getenv('MQTT_PASSWORD') is not None:\n CONFIG['mqtt_password'] = os.getenv('MQTT_PASSWORD')\n \n \n if os.getenv('DEEPSPEECH_MODELS') is not None and 'DeepSpeechAsrService' in CONFIG['services']:\n CONFIG['services']['DeepSpeechAsrService']['model_path'] = os.getenv('DEEPSPEECH_MODELS')\n \n\n # disable deepspeech and enable IBM ASR\n \n if os.getenv('IBM_SPEECH_TO_TEXT_APIKEY',None) is not None and len(os.getenv('IBM_SPEECH_TO_TEXT_APIKEY','')) > 0 :\n print('EENABLE ibm ASR')\n #del CONFIG['services']['DeepspeechAsrService']\n CONFIG['services'].pop('DeepspeechAsrService',None)\n CONFIG['services']['IbmAsrService'] = {'vad_sensitivity':1 } #'language': os.environ.get('GOOGLE_APPLICATION_LANGUAGE','en-AU')}\n # print(CONFIG['services'])\n \n \n # disable deepspeech and enable google ASR\n if os.getenv('GOOGLE_ENABLE_ASR',False)==\"true\" and os.getenv('GOOGLE_APPLICATION_CREDENTIALS',None) is not None and os.path.isfile(os.getenv('GOOGLE_APPLICATION_CREDENTIALS')):\n print('ENABLE GOOGLE ASR')\n CONFIG['services'].pop('DeepspeechAsrService',None)\n CONFIG['services'].pop('IbmAsrService',None)\n #del CONFIG['services']['DeepspeechAsrService']\n #print(CONFIG)\n CONFIG['services']['GoogleAsrService'] = {'language': os.environ.get('GOOGLE_APPLICATION_LANGUAGE','en-AU')}\n \n print('CHECK GOOGLE TTS')\n print(os.getenv('GOOGLE_ENABLE_TTS',''))\n print(os.getenv('GOOGLE_ENABLE_APPLICATION_CREDENTIALS',''))\n if os.getenv('GOOGLE_ENABLE_TTS',False)==\"true\" and os.getenv('GOOGLE_APPLICATION_CREDENTIALS',None) is not None and len(os.getenv('GOOGLE_APPLICATION_CREDENTIALS','')) > 0 :\n print('ENABLE GOOGLE TTS')\n #del CONFIG['services']['DeepspeechAsrService']\n CONFIG['services'].pop('Pico2wavTtsService',None)\n CONFIG['services']['GoogleTtsService'] = { 'language': os.environ.get('GOOGLE_APPLICATION_LANGUAGE','en-AU'), 'cache':'/tmp/tts_cache'} #}\n # print(CONFIG['services'])\n \n \n if os.getenv('RASA_URL') and len(os.getenv('RASA_URL')) > 0:\n #print('SET RASA URL '+os.getenv('RASA_URL'))\n rasa_service = CONFIG['services'].get('RasaService',{})\n rasa_service['rasa_server'] = os.getenv('RASA_URL')\n #print(rasa_service)` \n CONFIG['services']['RasaService'] = rasa_service \n \n # print(CONFIG['services'])\n # SET SOUND DEVICES FROM ENVIRONMENT VARS\n if os.getenv('SPEAKER_DEVICE') is not None and 'AudioService' in CONFIG['services']:\n CONFIG['services']['AudioService']['outputdevice'] = os.getenv('SPEAKER_DEVICE')\n if os.getenv('MICROPHONE_DEVICE') is not None and 'AudioService' in CONFIG['services']:\n CONFIG['services']['AudioService']['inputdevice'] = os.getenv('MICROPHONE_DEVICE')\n # print('audio override')\n # print(CONFIG['services'].get('AudioService'))\n # # OVERRIDE SOUND DEVICES FROM CLI ARGS\n # if len(ARGS.speakerdevice) > 0 and 'AudioService' in CONFIG['services']:\n # CONFIG['services']['AudioService']['outputdevice'] = ARGS.speakerdevice\n # if len(ARGS.microphonedevice) > 0 and 'AudioService' in CONFIG['services']:\n # CONFIG['services']['AudioService']['inputdevice'] = ARGS.microphonedevice\n \n # satellite mode\n if ARGS.satellite:\n services = {'AudioService': CONFIG['services']['AudioService'], 'PicovoiceHotwordService':CONFIG['services']['PicovoiceHotwordService']}\n CONFIG['services']= services\n # no local audio/hotword\n if ARGS.nolocalaudio:\n if 'AudioService' in CONFIG['services']: del CONFIG['services']['AudioService']\n if 'PicovoiceHotwordService' in CONFIG['services']: del CONFIG['services']['PicovoiceHotwordService']\n \n # print('START SERVER 2')\n # print(CONFIG)\n \n while True:\n loop = asyncio.get_event_loop()\n # loop.set_debug(True)\n run_services = []\n for service in CONFIG['services']:\n # force dialog initialise if argument present\n full_path = os.path.join(MODULE_DIR, 'src',service + '.py')\n module_name = pathlib.Path(full_path).stem\n module = importlib.import_module(module_name)\n print(module_name)\n a = getattr(module, service)(CONFIG,loop)\n run_services.append(a.run())\n # extra event loop threads on init\n if hasattr(a,'also_run'):\n # print(a.also_run)\n for i in a.also_run:\n run_services.append(i())\n print('starting services')\n print(run_services)\n await asyncio.gather(*run_services, return_exceptions = True)\n \n # print('started services')\n #loop.run_until_complete()\n # print('ended services')\n\n\ndef start_hermod(run_event):\n #loop = asyncio.get_event_loop()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n #loop.set_exception_handler(handle_exception)\n while True and run_event.is_set():\n print('START HERMOD REQUEST ASYNC')\n asyncio.run(async_start_hermod())\n \n # May want to catch other signals too\n # signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)\n # for s in signals:\n # loop.add_signal_handler(\n # s, lambda s=s: asyncio.create_task(shutdown(s, loop)))\n # try:\n # loop.create_task(async_start_hermod())\n # loop.run_forever()\n # finally:\n # loop.close()\n # logging.info(\"Successfully shutdown the Hermod service.\") \n \n# https://www.roguelynn.com/words/asyncio-exception-handling/ \nasync def shutdown(loop, signal=None):\n print('HERMOD SHUTDOWN')\n \"\"\"Cleanup tasks tied to the service's shutdown.\"\"\"\n if signal:\n print(f\"Received exit signal {signal.name}...\")\n \"\"\"Cleanup tasks tied to the service's shutdown.\"\"\"\n print(\"Closing database connections\")\n print(\"Nacking outstanding messages\")\n tasks = [t for t in asyncio.all_tasks() if t is not\n asyncio.current_task()]\n\n [task.cancel() for task in tasks]\n\n print(f\"Cancelling {len(tasks)} outstanding tasks\")\n await asyncio.gather(*tasks)\n print(f\"Flushing metrics\")\n loop.stop()\n \ndef handle_exception(loop, context):\n # context[\"message\"] will always be there; but context[\"exception\"] may not\n msg = context.get(\"exception\", context[\"message\"])\n print(f\"Caught exception: {msg}\")\n print(\"Shutting down...\")\n asyncio.create_task(shutdown(loop)) \n \nif ARGS.hermod:\n THREAD_HANDLER.run(start_hermod)\n \n# start all threads\nTHREAD_HANDLER.start_run_loop()\n\n","sub_path":"hermod-python/src/hermod.py","file_name":"hermod.py","file_ext":"py","file_size_in_byte":16890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"543156761","text":"import os\nimport sys\nsys.path.append(\"Model/PreProcessing\")\nsys.path.append(\"Model/Models\")\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(\n \"/Users/jerome/Documents/NYU/Capstone/DashOfData/Model/Models\"\n)\nfrom posData_preprocessing import get_posData\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# # NY\n# account_id = 1\n# city_id = 5128581\n# start_date = '2014-03-01'\n# end_date = '2018-05-01'\n\n# CT\naccount_id = 2\ncity_id = 4843564\nstart_date = '2018-01-02'\nend_date = '2019-01-30'\n\nposData = get_posData(account_id, start_date, end_date)\n\ndef train_test_split(data, n_test):\n return data[:-n_test], data[-n_test:]\n\n# Target variable\ntarget_variable = pd.DataFrame(posData.guests)\n\n# days to forecast\nn_test = 7\n\n# Split target variable into training/test set\ntrain, test = train_test_split(target_variable, n_test)\n\n#Model 2\ny_hat_avg = test.copy()\ny_hat_avg['avg_forecast'] = train['guests'].mean()\nplt.figure(figsize=(12,8))\nplt.plot(test['guests'], label='Test')\nplt.plot(y_hat_avg['avg_forecast'], label='Average Forecast')\nplt.title(\"CT Simple Average\")\nplt.legend(loc='best')\nplt.show()\n\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nrms = sqrt(mean_squared_error(test.guests, y_hat_avg.avg_forecast))\nprint(rms)","sub_path":"Model/Models/simple_average_forecast.py","file_name":"simple_average_forecast.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"246018016","text":"def str2intlist(string, spliter=','):\n ''' String to integer list\n\n Input: '1, 2, 3, 4, 5, a, b, c, ...'\n Output: ([1, 2, 3, 4, 5, ...], ['a', 'b', 'c', ...])\n '''\n\n if not string: string = ''\n\n OK, FAIL = [], []\n L = [ x.strip() for x in string.split(spliter) ]\n for x in L:\n try:\n x = int(x)\n OK.append(x)\n except:\n FAIL.append(x)\n\n return OK, FAIL\n \n \n","sub_path":"lyweb/lib/ytool/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"164862276","text":"import AudioDude\nimport signal\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.cm as cm\nimport threading\nimport datetime as dt\nimport sys\nimport time\nimport scipy.signal\nimport scipy.io.wavfile as wavfile\nimport argparse\nimport os\nfrom PIL import Image\nfrom filtersuite import bpf\n\ndef print_data(in_data):\n data = np.frombuffer(in_data, dtype=np.int16)\n print(data)\n\nclass AudioDudeTester:\n def __init__(self):\n self.ad = AudioDude.AudioDude()\n self.data_lock = threading.Lock()\n self.sampling_frequency = 44100\n self.frames_per_buffer = 1024\n\n # Graphing\n self.ys = []\n self.fig = plt.figure(frameon=False)\n self.ax = self.fig.add_subplot(1, 1, 1)\n\n # Recording\n self.input_path = None\n self.output_path = None\n self.timeout = 10\n self.recorded_data = []\n\n # Filtering\n self.enable_filtering = False\n self.lowf = None\n self.highf = None\n self.f_order = None\n\n def print_mic_byte_stream(self):\n self.ad.start_mic_input_stream(num_channels=1, sampling_rate=self.sampling_frequency, num_frames_per_buffer=self.frames_per_buffer, callback=print_data)\n signal.pause()\n\n def graph_mic_byte_stream_animate(self, i, xs, ys):\n if self.ad.stream and len(self.ys):\n self.data_lock.acquire()\n data = self.ys\n self.data_lock.release()\n if self.enable_filtering:\n data = bpf(data, fs=self.sampling_frequency, lowf=self.lowf, highf=self.highf, order=self.f_order)\n self.ax.clear()\n self.ax.plot(data)\n self.ax.set_yticks(range(-255, 255, 500))\n\n def graph_mic_byte_stream_callback(self, in_data):\n self.data_lock.acquire()\n self.ys = np.frombuffer(in_data, dtype=np.int16).tolist()\n self.data_lock.release()\n\n def graph_mic_byte_stream(self):\n ani = animation.FuncAnimation(self.fig, self.graph_mic_byte_stream_animate, fargs=(None, self.ys), interval=1)\n self.ad.start_mic_input_stream(num_channels=1, sampling_rate=self.sampling_frequency, num_frames_per_buffer=self.frames_per_buffer, callback=self.graph_mic_byte_stream_callback)\n plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n plt.show()\n\n def record_mic_byte_stream_callback(self, in_data):\n data = np.frombuffer(in_data, dtype=np.int16).tolist()\n if self.enable_filtering:\n data = bpf(data, fs=self.sampling_frequency, lowf=self.lowf, highf=self.highf, order=self.f_order).tolist()\n self.recorded_data += data\n\n def record_mic_byte_stream(self):\n timeout = 10\n print(\"Entered recording mode with filepath='{0}' and timeout={1}s\".format(self.output_path, timeout))\n self.ad.start_mic_input_stream(num_channels=1, sampling_rate=self.sampling_frequency, num_frames_per_buffer=self.frames_per_buffer, callback=self.record_mic_byte_stream_callback)\n signal.alarm(timeout)\n input(\"Press enter to stop recording.\\n\")\n signal.alarm(0)\n self.ad.stop_mic_input_stream()\n data = np.array(self.recorded_data)\n data = data.astype(np.int16)\n wavfile.write(self.output_path, self.sampling_frequency, data)\n\n def play_wav_file(self, filepath):\n print(\"Entered playback mode with filepath='{0}'\\n\".format(self.input_path))\n self.ad.play_wav_file(filepath)\n\n def loopback(self):\n filepath = '_loopback.wav'\n print(\"Entered loopback mode\\n\")\n while True:\n timeout = 10\n self.ad.start_mic_input_stream(num_channels=1, sampling_rate=self.sampling_frequency, num_frames_per_buffer=self.frames_per_buffer, callback=self.record_mic_byte_stream_callback)\n signal.alarm(timeout)\n input(\"Recording mode... Press enter for playback mode.\")\n signal.alarm(0)\n self.ad.stop_mic_input_stream()\n data = np.array(self.recorded_data)\n data = data.astype(np.int16)\n wavfile.write(filepath, self.sampling_frequency, data)\n self.recorded_data = []\n\n print(\"Playback mode...\", end='')\n self.ad.play_wav_file(filepath)\n input(\"Press enter for recording mode\")\n\n def create_spectrogram(self, data, fs, use_logscale=False):\n f, t, Sxx = [None]*3\n\n if not use_logscale:\n nperseg = 512\n tbins = 1000\n noverlap = (len(data) - tbins*(nperseg))//(-(tbins - 1))\n t_res = (len(data)/fs)*((nperseg-noverlap)/(len(data)-noverlap))\n f_res = fs/(nperseg/2)\n\n f, t, Sxx = scipy.signal.spectrogram(data, fs, nperseg=nperseg, noverlap=noverlap)\n elif True:\n nperseg = 512\n tbins = 1000\n noverlap = (len(data) - tbins*(nperseg))//(-(tbins - 1))\n t_res = (len(data)/fs)*((nperseg-noverlap)/(len(data)-noverlap))\n f_res = fs/(nperseg/2)\n f, t, Sxx = scipy.signal.spectrogram(data, fs, nperseg=nperseg, noverlap=noverlap)\n\n fmin = 20\n fmax = 20000\n nf = 1000\n\n # The following is an excerpt from soundspec: https://github.com/FlorinAndrei/soundspec\n\n # generate an exponential distribution of frequencies\n # (as opposed to the linear distribution from FFT)\n b = fmin - 1\n a = np.log10(fmax - fmin + 1) / (nf - 1)\n freqs = np.empty(nf, int)\n for i in range(nf):\n freqs[i] = np.power(10, a * i) + b\n # list of frequencies, exponentially distributed:\n freqs = np.unique(freqs)\n\n # delete frequencies lower than fmin\n fnew = f[f >= fmin]\n cropsize = f.size - fnew.size\n f = fnew\n Sxx = np.delete(Sxx, np.s_[0:cropsize], axis=0)\n\n # delete frequencies higher than fmax\n fnew = f[f <= fmax]\n cropsize = f.size - fnew.size\n f = fnew\n if cropsize:\n Sxx = Sxx[:-cropsize, :]\n\n findex = []\n # find FFT frequencies closest to calculated exponential frequency distribution\n for i in range(freqs.size):\n f_ind = (np.abs(f - freqs[i])).argmin()\n findex.append(f_ind)\n\n # keep only frequencies closest to exponential distribution\n # this is usually a massive cropping of the initial FFT data\n fnew = []\n for i in findex:\n fnew.append(f[i])\n f = np.asarray(fnew)\n Sxxnew = Sxx[findex, :]\n Sxx = Sxxnew\n else:\n # https://www.kaggle.com/himanshurawlani/a-cnn-lstm-model\n window_size = 20\n step_size = 10\n eps = 1e-10\n nperseg = int(round(window_size * fs / 1e3))\n noverlap = int(round(step_size * fs / 1e3))\n\n f, t, Sxx = scipy.signal.spectrogram(data, fs=fs, window='hann', nperseg=nperseg, noverlap=noverlap, detrend=False)\n\n return (f, t, Sxx)\n\n def show_spectrogram(self, filepath, use_logscale=False, use_color=False):\n fs, data = wavfile.read(filepath)\n f, t, Sxx = self.create_spectrogram(data, fs, use_logscale)\n\n if use_logscale:\n plt.yscale('symlog')\n if use_color:\n plt.pcolormesh(t, f, np.log10(Sxx))\n else:\n plt.pcolormesh(t, f, np.log10(Sxx), cmap=cm.gray)\n\n plt.ylabel('f [Hz]')\n plt.xlabel('t [sec]')\n\n plt.show()\n\n def save_spectrogram_sequence(self, input_folder, output_folder, chunk_size_ms, window_size_ms, window_step_ms, use_logscale=False, use_color=False):\n audio_files = os.listdir(input_folder)\n audio_files = [f for f in audio_files if f.endswith('.wav')]\n bad_chunks = {}\n for f in audio_files:\n print(f)\n input_filename = os.path.basename(f).split('.')[0]\n new_output_folder = os.path.join(output_folder, input_filename)\n if not os.path.exists(new_output_folder):\n os.makedirs(new_output_folder)\n\n fs, data = wavfile.read(os.path.join(input_folder, f))\n\n # Segment audio into chunks\n chunk_size = round(chunk_size_ms * (fs/1000))\n chunk_leftover = len(data) % chunk_size\n chunks = [data[x:x+chunk_size] for x in range(0, len(data), chunk_size)]\n\n # Formulate sliding window\n window_size = round(window_size_ms * (fs/1000))\n window_step = round(window_step_ms * (fs/1000))\n\n # Create spectrograms using the sliding window\n n = 0\n for chunk in chunks:\n chunk_timestamp = n * chunk_size_ms\n if chunk is chunks[-1] and len(chunk) < chunk_size and len(chunks) > 1:\n chunk = data[-chunk_size:]\n chunk_timestamp = round(1000*len(data)/fs) - chunk_size_ms\n\n output_subfolder = os.path.join(new_output_folder, '%s_%d' % (input_filename, chunk_timestamp))\n if not os.path.exists(output_subfolder):\n os.makedirs(output_subfolder)\n\n window_leftover = chunk_size - ((chunk_size - window_size) % window_step)\n m = 0\n for x in range(0, len(chunk), window_step):\n window_data = chunk[x:x+window_size]\n output_file = os.path.join(output_subfolder, input_filename)\n image_path = '%s_%d_%d.png' % (output_file, chunk_timestamp, m * window_step_ms)\n\n if len(window_data) < window_size:\n break\n\n f, t, Sxx = self.create_spectrogram(window_data, fs, use_logscale=use_logscale)\n\n if not f.any() or not t.any() or not Sxx.any():\n if output_subfolder not in bad_chunks:\n print('Bad chunk: %s' % output_subfolder)\n bad_chunks[output_subfolder] = True\n\n if use_color:\n plt.pcolormesh(t, f, np.log10(Sxx))\n else:\n plt.pcolormesh(t, f, np.log10(Sxx), cmap=cm.gray)\n plt.axis('off')\n if use_logscale:\n plt.yscale('symlog')\n\n plt.tight_layout(0)\n plt.gcf().set_dpi(100)\n plt.gcf().set_figwidth(3.45)\n plt.gcf().set_figheight(2.57)\n plt.savefig(image_path, bbox_inches='tight', pad_inches=0, dpi=100)\n plt.cla()\n\n image = Image.open(image_path).convert('L')\n image.save(image_path)\n\n m += 1\n n += 1\n\n bad_chunks_path = os.path.join(output_folder, 'bad_chunks.txt')\n with open(bad_chunks_path, 'w') as bcf:\n bcf.write('\\n'.join(bad_chunks.keys()))\n\ndef main():\n mode_choices = ['print','graph','record','play', 'loopback', 'spec', 'nn']\n default_filter_specs = '2000,12000,3'\n\n custom_formatter = lambda prog: argparse.RawTextHelpFormatter(prog, max_help_position=100)\n p = argparse.ArgumentParser(formatter_class=custom_formatter)\n\n p.add_argument('-m', '--mode', type=str, required=True, choices=mode_choices, metavar=\"/\".join(mode_choices), help=\"specify mode: \" + str(mode_choices))\n p.add_argument('-f', '--filter', type=str, nargs='?', const=default_filter_specs, metavar='LOW,HIGH,ORDER', help=\"enable bandpass filtering\")\n p.add_argument('-i', '--input', type=str, help=\"specify input path\")\n p.add_argument('-o', '--output', type=str, help=\"specify output path\")\n p.add_argument('--use-logscale', action='store_true', help=\"use logscale for spectrograms\")\n p.add_argument('--use-color', action='store_true', help=\"use color for spectrograms\")\n p.add_argument('--nn-prep', action='store_true', help=\"save spectrograms to disk for neural network training\")\n p.add_argument('--chunk-size', type=int, help=\"nn prep: specify audio chunk size in seconds\")\n p.add_argument('--window-size', type=int, help=\"nn prep: specify sliding window size in milliseconds\")\n p.add_argument('--window-step', type=int, help=\"nn prep: specify sliding window step size in milliseconds\")\n\n args = p.parse_args()\n tester = AudioDudeTester()\n\n if args.filter:\n filter_specs = list(map(int, args.filter.split(',')))\n if len(filter_specs) is not 3:\n print(\"Must specify filter specs with LOW,HIGH,ORDER -- e.g. -f 20,20000,3\")\n return\n tester.lowf = filter_specs[0]\n tester.highf = filter_specs[1]\n tester.f_order = filter_specs[2]\n tester.enable_filtering = True\n print(\"\\nEnabled bandpass filtering: {0}hz-{1}hz, filter order {2}\".format(tester.lowf, tester.highf, tester.f_order))\n\n if args.input:\n tester.input_path = os.path.expanduser(args.input)\n if args.output:\n tester.output_path = os.path.expanduser(args.output)\n\n print(\"\")\n\n if args.mode == 'print':\n tester.print_mic_byte_stream()\n elif args.mode == 'graph':\n tester.graph_mic_byte_stream()\n elif args.mode == 'record':\n if not tester.output_path:\n print(\"For recording mode, must specify output filepath -- e.g. -m record -o recording.wav\")\n return\n tester.record_mic_byte_stream()\n elif args.mode == 'play':\n if not tester.input_path:\n print(\"For playback mode, must specify input filepath -- e.g. -m play -i recording.wav\")\n return\n tester.play_wav_file(tester.input_path) # Implement bandpass filtering for this mode\n elif args.mode == 'loopback':\n tester.loopback()\n elif args.mode == 'spec':\n if not tester.input_path:\n print(\"For spectrogram mode, must specify input filepath -- e.g. -m spec -i recording.wav\")\n return\n\n use_logscale = True if args.use_logscale else False\n use_color = True if args.use_color else False\n\n if args.nn_prep:\n if not os.path.isdir(tester.input_path):\n print(\"For spectrogram NN prep mode, the input path must be a directory\")\n return\n if not tester.output_path:\n print(\"For spectrogram NN prep mode, must specify the output folder to store the sequential histograms\")\n if not args.chunk_size or not args.window_size or not args.window_step:\n print(\"For spectrogram NN prep mode, must specify the chunk size (s), window size (ms), and window step (ms)\")\n return\n\n tester.save_spectrogram_sequence(tester.input_path, tester.output_path, chunk_size_ms=args.chunk_size, window_size_ms=args.window_size, window_step_ms=args.window_step, use_logscale=use_logscale, use_color=use_color)\n else:\n tester.show_spectrogram(tester.input_path, use_logscale=use_logscale, use_color=use_color)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"audio/arnet.py","file_name":"arnet.py","file_ext":"py","file_size_in_byte":15231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"558602151","text":"#Create a Person class which will have three properties:\n#Name\n#List of foods they like\n#List of foods they hate\n#In this class, create the method taste():\n#It will take in a food name as a string\n#Return {person_name} eats the {food_name}\n#If the food is in the person's like list, add 'and loves it!' to the end\n#If it is in the person's hate list, add 'and hates it!' to the end\n#If it is in neither list, simply add an exclamation mark to the end\n#A person can have an empty list for foods they hate and/or love\n#See the Resources tab for some helpful tutorials on Python classes!\nclass Person:\n def __init__(self,Name,like,hate):\n self.Name=Name\n self.like=like\n self.hate=hate\n def taste(self,food):\n if food in self.like:\n return self.Name+' eats the '+food+' and loves it!'\n elif food in self.hate:\n return self.Name+' eats the '+food+' and hates it!'\n else:\n return self.Name+' eats the '+food+'!'\np1 = Person('Sam', ['ice cream','Bhajiya'], ['carrots'])\nprint(p1.taste('ice cream'))\n#➞ 'Sam eats the ice cream and loves it!'\nprint(p1.taste('cheese'))\n#➞ 'Sam eats the cheese!'\nprint(p1.taste('carrots') )\n#➞ 'Sam eats the carrots and hates it!'","sub_path":"Edabit/Food_for_Everyone.py","file_name":"Food_for_Everyone.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"122290874","text":"import requests\nimport jwt\n\nfrom django.views import View\nfrom django.http import JsonResponse\n\nfrom users.models import User, Bookmark\nfrom resumes.models import Apply\nfrom jobpostings.models import JobPosting\nfrom my_settings import SECRET_KEY, ALGORITHM\nfrom utils import authorization\n\n\nclass KakaoLoginView(View):\n def post(self, request):\n access_token = request.headers.get(\"Authorization\")\n\n if not access_token :\n return JsonResponse({\"message\":\"INVAID_ACCESS_TOKEN\"}, status=401)\n\n headers = ({\"Authorization\": f\"Bearer {access_token}\"})\n url = \"https://kapi.kakao.com/v2/user/me\"\n response = requests.get(url, headers=headers)\n user = response.json()\n\n users, is_created = User.objects.get_or_create(kakao_api_id = user[\"id\"])\n if is_created:\n users.name = user[\"kakao_account\"][\"profile\"][\"nickname\"],\n users.email = user[\"kakao_account\"][\"email\"],\n users.profile_image = user[\"kakao_account\"][\"profile\"][\"profile_image_url\"]\n users.save()\n\n user_info = {\n \"name\" : users.name,\n \"email\" : users.email,\n \"profile_image\": users.profile_image\n }\n\n encoded_jwt = jwt.encode({\"user_id\": users.id}, SECRET_KEY, algorithm=ALGORITHM)\n\n return JsonResponse({\"user_info\":user_info, \"token\":encoded_jwt}, status=200)\n\nclass BookMarkView(View):\n @authorization\n def post(self, request, jobposting_id):\n user = request.user\n job_posting = JobPosting.objects.get(id=jobposting_id)\n\n if not Bookmark.objects.filter(user=user, job_posting=job_posting).exists():\n Bookmark.objects.create(\n user = user,\n job_posting = job_posting\n )\n return JsonResponse({\"message\":\"SUCCESS\"}, status=201)\n\n Bookmark.objects.get(user=user, job_posting=job_posting).delete()\n\n return JsonResponse({\"message\":\"SUCCESS\"}, status=200)\n\nclass UserView(View):\n @authorization\n def get(self, request):\n applies = Apply.objects.select_related(\"job_posting\", \"job_posting__experience\",\n \"job_posting__company\", \"job_posting__company__region\",\n \"job_posting__company__region__country\", \"job_posting__job\")\\\n .prefetch_related(\"resume\")\\\n .filter(user=request.user)\n user_info = {\n \"id\" : request.user.id,\n \"name\" : request.user.name,\n \"email\" : request.user.email,\n \"profileImage\" : request.user.profile_image,\n \"applies\" : [{\n \"targetedPosting\" : {\n \"id\" : apply.job_posting.id,\n \"title\" : apply.job_posting.title,\n \"salary\" : apply.job_posting.salary,\n \"experience\" : apply.job_posting.experience.name,\n \"imageUrl\" : apply.job_posting.image_url,\n \"company\" : {\n \"id\" : apply.job_posting.company.id,\n \"name\" : apply.job_posting.company.name,\n \"region\" : apply.job_posting.company.region.name,\n \"country\" : apply.job_posting.company.region.country.name,\n },\n \"job\" : {\n \"id\" : apply.job_posting.job.id,\n \"name\" : apply.job_posting.job.name,\n },\n }\n }for apply in applies]\n }\n\n return JsonResponse({\"message\":\"SUCCESS\", \"result\" : user_info}, status=200)\n\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"432356361","text":"import networkx as nx\nimport graph1\nimport graph2\nimport graph3\nimport graph4\nimport graph5\n\n\ndef find_next_vertex(G):\n\n all_adjacent_to_colored = []\n\n for vertex in G.nodes:\n # print(vertex)\n if G.nodes[vertex]['color'] != 'never coloured':\n # print(G.nodes[vertex])\n for adjacent_to_colored in G.adj[vertex]:\n if G.nodes[adjacent_to_colored]['color'] == 'never coloured':\n all_adjacent_to_colored.append(adjacent_to_colored)\n\n if all_adjacent_to_colored == []:\n return False\n\n return min(all_adjacent_to_colored)\n\ndef find_smallest_color(G,i):\n\n available_colors = [x for x in range(1, len(G.nodes) + 1)]\n\n for neighbour in G.adj[i]:\n neighbour_color = G.node[neighbour]['color']\n if neighbour_color in available_colors:\n available_colors.remove(neighbour_color)\n\n return available_colors[0]\n\n\ndef greedy(G):\n\n kmax = []\n all_colored_status = []\n\n for vertex in G.nodes:\n all_colored_status.append(G.nodes[vertex]['color'])\n\n G.nodes[1]['color'] = 1\n\n while 'never coloured' in all_colored_status:\n\n next_vertex = find_next_vertex(G)\n\n if not next_vertex:\n break\n\n new_color = find_smallest_color(G, next_vertex)\n\n kmax.append(new_color)\n G.nodes[next_vertex]['color'] = new_color\n\n for vertex in G.nodes:\n all_colored_status.append(G.nodes[vertex]['color'])\n\n kmax = max(kmax)\n\n print()\n for i in G.nodes():\n print('vertex', i, ': color', G.node[i]['color'])\n print()\n print('The number of colors that Greedy computed is:', kmax)\n print()\n\n\n\nprint('Graph G1:')\nG=graph1.Graph()\nG.add_nodes_from(G.nodes(), visited = 'no')\n# print(find_next_vertex(G))\ngreedy(G)\n\n\nprint('Graph G2:')\nG=graph2.Graph()\nG.add_nodes_from(G.nodes(), visited = 'no')\ngreedy(G)\n\n\nprint('Graph G3:')\nG=graph3.Graph()\nG.add_nodes_from(G.nodes(), visited = 'no')\ngreedy(G)\n\n\nprint('Graph G4:')\nG=graph4.Graph()\nG.add_nodes_from(G.nodes(), visited = 'no')\ngreedy(G)\n\n\nprint('Graph G5:')\nG=graph5.Graph()\nG.add_nodes_from(G.nodes(), visited = 'no')\ngreedy(G)","sub_path":"Submission/greedy_col_variation.py","file_name":"greedy_col_variation.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"546571008","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\situations\\situation_liabilities.py\n# Compiled at: 2017-10-12 22:39:13\n# Size of source mod 2**32: 8474 bytes\nfrom interactions import ParticipantType\nfrom interactions.interaction_finisher import FinishingType\nfrom interactions.liability import Liability, ReplaceableLiability\nfrom sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, Tunable\nfrom situations.situation_sim_providers import SituationSimParticipantProviderMixin\nfrom situations.situation_types import SituationCallbackOption\nfrom situations.tunable import TunableSituationStart\nimport services\nAUTO_INVITE_LIABILTIY = 'AutoInviteLiability'\n\nclass AutoInviteLiability(Liability):\n\n def __init__(self, **kwargs):\n (super().__init__)(**kwargs)\n self._target_sim = None\n self._situation_id = None\n self._interaction = None\n\n def on_add(self, interaction):\n self._interaction = interaction\n self._target_sim = interaction.get_participant(ParticipantType.TargetSim)\n situation_manager = services.get_zone_situation_manager()\n self._situation_id = situation_manager.create_visit_situation(self._target_sim)\n situation_manager.bouncer._assign_instanced_sims_to_unfulfilled_requests()\n\n def release(self):\n if not self._target_sim.is_on_active_lot():\n situation_manager = services.get_zone_situation_manager()\n situation_manager.destroy_situation_by_id(self._situation_id)\n\n def should_transfer(self, continuation):\n return False\n\n\nclass CreateSituationLiability(Liability, HasTunableFactory, AutoFactoryInit):\n LIABILITY_TOKEN = 'CreateSituationLiability'\n FACTORY_TUNABLES = {'create_situation':TunableSituationStart(), \n 'cancel_interaction_on_situation_end':Tunable(description='\\n If enabled, we will cancel the interaction with this liability\\n whenever the created situation ends. Note: this will not merge well\\n with another liability that has the opposite setting.\\n ',\n tunable_type=bool,\n default=True)}\n\n def __init__(self, interaction, **kwargs):\n (super().__init__)(**kwargs)\n self._situation_ids = set()\n self._interaction = interaction\n\n def on_add(self, interaction):\n self._interaction = interaction\n\n def on_run(self):\n if not self._is_create_situation_already_running():\n self.create_situation((self._interaction.get_resolver()), situation_created_callback=(self.on_situation_created))()\n\n def release(self):\n self._interaction = None\n self.destroy_situations()\n\n def transfer(self, interaction):\n self._interaction = interaction\n\n def should_transfer(self, continuation):\n self.validate_situations()\n if not self._situation_ids:\n return False\n return True\n\n def merge(self, interaction, key, new_liability):\n new_liability._situation_ids.update(self._situation_ids)\n situation_manager = services.get_zone_situation_manager()\n for situation_id in self._situation_ids:\n situation_manager.unregister_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._situation_end_callback)\n situation_manager.register_for_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, new_liability._situation_end_callback)\n\n return new_liability\n\n def _is_create_situation_already_running(self):\n situation_manager = services.get_zone_situation_manager()\n for situation_id in self._situation_ids:\n if isinstance(situation_manager.get(situation_id), self.create_situation._tuned_values.situation):\n return True\n\n return False\n\n def validate_situations(self):\n situation_manager = services.get_zone_situation_manager()\n invalid_ids = set()\n for situation_id in self._situation_ids:\n if situation_manager.get(situation_id) is None:\n invalid_ids.add(situation_id)\n\n self._situation_ids.difference_update(invalid_ids)\n\n def destroy_situations(self):\n situation_manager = services.get_zone_situation_manager()\n for situation_id in self._situation_ids:\n situation_manager.unregister_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._situation_end_callback)\n situation_manager.destroy_situation_by_id(situation_id)\n\n def on_situation_created(self, situation_id):\n self._situation_ids.add(situation_id)\n situation_manager = services.get_zone_situation_manager()\n situation_manager.register_for_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._situation_end_callback)\n\n def _situation_end_callback(self, situation_id, callback_option, _):\n if callback_option == SituationCallbackOption.END_OF_SITUATION:\n if self.cancel_interaction_on_situation_end:\n if self._interaction is not None:\n self._interaction.cancel(FinishingType.SITUATIONS, 'Situation owned by liability was destroyed.')\n self._situation_ids.discard(situation_id)\n\n\nclass SituationSimParticipantProviderLiability(ReplaceableLiability, SituationSimParticipantProviderMixin):\n LIABILITY_TOKEN = 'SituationSimParticipantProviderLiability'\n\n def __init__(self, interaction=None, **__):\n (super().__init__)(**__)","sub_path":"Scripts/simulation/situations/situation_liabilities.py","file_name":"situation_liabilities.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"206405546","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nC# Bindings Tester\nCopyright (C) 2012-2018 Matthias Bolte \n\ntest_csharp_bindings.py: Tests the C# bindings\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; either version 2\nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nGeneral Public License for more details.\n\nYou should have received a copy of the GNU General Public\nLicense along with this program; if not, write to the\nFree Software Foundation, Inc., 59 Temple Place - Suite 330,\nBoston, MA 02111-1307, USA.\n\"\"\"\n\nimport sys\n\nif sys.hexversion < 0x3040000:\n print('Python >= 3.4 required')\n sys.exit(1)\n\nimport os\nimport shutil\nimport importlib.util\nimport importlib.machinery\n\ndef create_generators_module():\n generators_dir = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]\n\n if sys.hexversion < 0x3050000:\n generators_module = importlib.machinery.SourceFileLoader('generators', os.path.join(generators_dir, '__init__.py')).load_module()\n else:\n generators_spec = importlib.util.spec_from_file_location('generators', os.path.join(generators_dir, '__init__.py'))\n generators_module = importlib.util.module_from_spec(generators_spec)\n\n generators_spec.loader.exec_module(generators_module)\n\n sys.modules['generators'] = generators_module\n\nif 'generators' not in sys.modules:\n create_generators_module()\n\nfrom generators import common\n\nclass CSharpExamplesTester(common.Tester):\n def __init__(self, root_dir, extra_paths):\n common.Tester.__init__(self, 'csharp', '.cs', root_dir, extra_paths=extra_paths)\n\n def test(self, cookie, tmp_dir, path, extra):\n if extra:\n shutil.copy(path, tmp_dir)\n path = os.path.join(tmp_dir, os.path.split(path)[-1])\n\n output = path[:-3] + '.exe'\n\n args = ['mcs',\n '/warn:4',\n '/optimize',\n '/target:exe',\n '/sdk:2',\n '/out:' + output,\n '/reference:System.Drawing.dll',\n '/reference:System.Windows.Forms.dll',\n '/reference:' + os.path.join(tmp_dir, 'Tinkerforge.dll'),\n path]\n\n self.execute(cookie, args)\n\ndef test(root_dir):\n extra_paths = [os.path.join(root_dir, '../../weather-station/button_control/csharp/WeatherStationButton.cs'),\n os.path.join(root_dir, '../../weather-station/write_to_lcd/csharp/WeatherStation.cs'),\n os.path.join(root_dir, '../../hardware-hacking/remote_switch/csharp/RemoteSwitch.cs'),\n os.path.join(root_dir, '../../hardware-hacking/smoke_detector/csharp/SmokeDetector.cs')]\n\n return CSharpExamplesTester(root_dir, extra_paths).run()\n\nif __name__ == '__main__':\n common.dockerize('csharp', __file__)\n\n test(os.getcwd())\n","sub_path":"csharp/test_csharp_bindings.py","file_name":"test_csharp_bindings.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"152144845","text":"# 秒表自动计时\n\nimport time\n\nprint('按回车开始计时,按ctrl + c结束计时')\nwhile True:\n try:\n input()\n start_time = time.time() \n print('开始')\n while True:\n print('计时', round(time.time() - start_time, 0), '秒', end = '\\n')\n time.sleep(1)\n except KeyboardInterrupt:\n print('结束')\n break\n","sub_path":"data_structure/2/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"296723328","text":"import numpy as np\nimport random\nimport tensorflow as tf\nfrom loader import load_data\n\ndef create_sample(reconstructed, simulated, center, indeces, rand=random, cl_min=1, cl_max=3):\n \"\"\"\n Randomly combines clusters from different events into one sample and returns data to be fed \n into the network:\n - X: numpy array (dim: 171x360) containing reco energy information of the clusters\n - ysort: tensor with center sim coordinates of each cluster from X (padded with -1 for constant dim: (cl_max,2))\n - ye: sim energy info of each cluster from X (padded with 0 for constant dim:(cl_max,11,11))\n \n Args: \n - reconstructed: numpy array with reco energy info of each cluster\n - simulated: numpy array with sim energy info of each cluster\n - center: numpy array with sim coordinated of the cluster center\n - indeces: all the indeces of the existing events \n - rand (default = random): random generator\n - cl_min: minimum number of clusters per sample \n - cl_max: maximum number of clusters per sample\n \"\"\"\n # Randomly choose events and the number of clusters and fill the arrays. \n index = random.sample(indeces, random.randint(cl_min,cl_max))\n # print(index)\n X = sum((reconstructed[ind[0]][ind[1]] for ind in index))\n y = np.asarray([(center[ind[0]][ind[1]]) for ind in index])\n ye = np.asarray([(simulated[ind[0]][ind[1]]) for ind in index])\n \n # Sort the clusters based on the distance from (0,0) coordinate of the image. \n dr = np.sqrt(1**2 + 1**2)\n ysort = y[np.argsort((np.sqrt(np.power(y[:,0],2) + np.power(y[:,1]+85,2))//dr + y[:,0]/360))] \n ye = ye[np.argsort((np.sqrt(np.power(y[:,0],2) + np.power(y[:,1]+85,2))//dr + y[:,0]/360))] \n\n return X, tf.pad(ysort, ((0, cl_max-len(index)), (0,0)), constant_values=-1), tf.pad(ye,((0,cl_max-len(index)),(0,0), (0,0)))\n\ndef save_sample(name, nevt=10000):\n \"\"\"\n Create and save data sample in .npy format.\n \n Args: \n - name: name of the file to save into \n - nevent (default = 10000): number of samples to save in one file\n \"\"\"\n reco, sim, center, indeces = load_data(0,2500)\n \n # Create numpy arrays to be filled. \n X = np.full((nevt, 171, 360), 0.)\n y = np.full((nevt, 3, 2), 0.)\n ye = np.full((nevt, 3, 11, 11), 0.)\n \n # Fill the arrays.\n for i in range(nevt):\n X[i], y[i], ye[i] = create_sample(reco, sim, center, indeces, cl_min=1, cl_max=3)\n # Normalize the center coordinates to (0,1). \n phi, eta = y[:,:,0], y[:,:,1]\n phi[phi!=-1]/=360\n eta[eta!=-1]+=85\n eta[eta!=-1]/=170\n y[:,:,0], y[:,:,1] = phi, eta\n X = X.reshape(nevt,171,360,1)\n \n # Save files. \n np.save('X' + str(name), X)\n np.save('y' + str(name), y)\n np.save('ye' + str(name), ye)","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"7732641","text":"# Расширенная версия, в которой вывод происходит после ввода\r\n# всех данных, а не после ввода каждого имени пользователя\r\nw = ''\r\nd = {}\r\nn = int(input())\r\nfor i in range(n):\r\n surname, name = input().split()\r\n if surname not in d.keys():\r\n d[surname] = 0\r\n d[surname] += 1\r\n if d[surname] == 1:\r\n w += (surname + ' ')\r\n else:\r\n w += ((surname + str(d[surname])) + ' ')\r\n#print(w) #- отладочный принт для вывода строки с логинами, разделенными пробелами\r\nfor login in w.split():\r\n print(login)\r\n\r\n\r\n","sub_path":"27-4(extented).py","file_name":"27-4(extented).py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"229058831","text":"'''\nfit_models uses sklearns GridSearchCV to perform an exhaustive search over specified\nparameter values for an estimator. GridSearchCV implements a “fit” and a “score”\nmethod. It also implements “predict”, “predict_proba”, “decision_function”,\n“transform” and “inverse_transform” if they are implemented in the estimator used.\n\nThe parameters of the estimator used to apply these methods are optimized by\ncross-validated grid-search over a parameter grid. It is computationally\nintensive and intended to be run on AWS servers.\n\neval_model implements a model directly in lieu of GridSearchCV to compare results.\n'''\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom model_parameters import RANDOM_FOREST\nfrom model_parameters import K_NEAREST\nfrom model_parameters import GRADIENT_BOOSTING\n\ndef fit_models(model_parameters, X, y):\n '''\n Process each of the models with GridSearchCV and return a dictionary where\n the key is the estimator abreviation and the value is a GridSearch object\n fit to the best parameters.\n\n This will be a serial for loop to start but my intention is to parallelize\n this operation to compute the fits as fast as possible.\n\n GridSearchCV is provided a list of scorers for flexibility, however, 'f1'\n is anticipated to be the best score given an expected imbalance in the\n y labels (~20% survived the sinking of the titanic)\n '''\n best_models_dict = {}\n for model_name, (model, param_grid) in model_parameters.items():\n gs = GridSearchCV(estimator=model,\n param_grid=param_grid,\n scoring=['accuracy','average_precision','f1','precision','recall','roc_auc'],\n fit_params=None, # deprecated\n n_jobs=1, # come back to this for parallelization\n iid=False, # I think I want mean loss across folds\n refit='f1', # fit to the best f1 score\n cv=None, # use the default 3-fold CV\n verbose=False, # run in AWS, don't need log\n pre_dispatch='2*n_jobs', # come back to this for parallelization\n error_score='raise', # raise errors\n return_train_score=True) # I want training scores\n gs.fit(X, y)\n best_models_dict[model_name] = gs\n\n return best_models_dict\n\ndef eval_model(model_name, X, y, splits=5):\n '''\n Split the data into K folds and perform cross validation on a model.\n Parameters for the model are set prior to calling this function.\n '''\n # build the model\n model = None\n if model_name == RANDOM_FOREST:\n model = get_rf_model()\n if model_name == K_NEAREST:\n model = get_knn_model()\n if model_name == GRADIENT_BOOSTING:\n model = get_gbc_model()\n model.fit(X, y)\n print(model.score(X, y))\n return model\n\ndef get_rf_model():\n model = RandomForestClassifier(n_estimators=30,\n criterion='entropy', # 'gini', 'entropy'\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.0,\n max_features='auto', # 'sqrt', 'log2', None, int, float\n max_leaf_nodes=None,\n min_impurity_decrease=0.0,\n min_impurity_split=None,\n bootstrap=True,\n oob_score=False,\n n_jobs=1,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None)\n return model\n\ndef get_knn_model():\n model = KNeighborsClassifier(n_neighbors=7,\n weights='distance', #'distance', 'uniform'\n algorithm='auto', # auto, ball_tree, kd_tree, brute\n leaf_size=30,\n p=2, # int (power metric for minkowski)\n metric='minkowski',\n metric_params=None,\n n_jobs=1)\n return model\n\ndef get_gbc_model():\n model = GradientBoostingClassifier(loss='deviance', # deviance or exponential (loss function to be optimized)\n learning_rate=1.20,\n n_estimators=300,\n subsample=1.0,\n criterion='mse',\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.0,\n max_depth=3,\n min_impurity_decrease=0.0,\n min_impurity_split=None,\n init=None,\n random_state=None,\n max_features=None,\n verbose=0,\n max_leaf_nodes=None,\n warm_start=False,\n presort='auto')\n return model\n","sub_path":"titanic/src/cross_val.py","file_name":"cross_val.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"596755906","text":"import os\nimport argparse\nimport shutil\nimport tempfile\nimport time\nimport pydevd_pycharm\n\nimport pandas as pd\n\nfrom azureml.core import Run\n\nfrom mev.prepare import Decoder\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input_name\", required=True)\nparser.add_argument(\"--output\", required=True)\nparser.add_argument(\"--id\", required=True)\nparser.add_argument(\n '--with_labels',\n default=False,\n type=lambda x: (str(x).lower() == 'true'),\n required=False)\nparser.add_argument(\"--nrows\", required=True)\nargs = parser.parse_args()\n\n# Attach PyCharm debugger\n# print(f\"Starting debugger at host == {os.environ.get('PYCHARM_DEBUG_HOST')} and port == {os.environ.get('PYCHARM_DEBUG_PORT')}\")\n# pydevd_pycharm.settrace(\n# os.environ.get(\"PYCHARM_DEBUG_HOST\"),\n# port=int(os.environ.get(\"PYCHARM_DEBUG_PORT\")),\n# stdoutToServer=False,\n# stderrToServer=False)\n# print('PyCharm debugger attached to prepare.py.')\n#\ncurrent_directory = os.getcwd()\ntemporary_prefix = f\"{current_directory}/\"\ntemporary_path = tempfile.mkdtemp(prefix=temporary_prefix)\n\ninput_dataset_filepaths = Run.get_context() \\\n .input_datasets[args.input_name] \\\n .download(target_path=temporary_path)\n\nmongo_connection_string = os.getenv(\"MONGO_CONNECTION_STRING\")\nmongo_database_name = os.getenv(\"MONGO_DATABASE_NAME\")\netherscan_api_key = os.getenv(\"ETHERSCAN_API_KEY\")\nmoralis_node = os.getenv(\"MORALIS_NODE\")\nalchemy_node = os.getenv(\"ALCHEMY_NODE\")\nsleep_time = float(os.getenv(\"SLEEP_TIME\"))\n\n\ndecoder = Decoder(\n mongo_connection_string,\n mongo_database_name,\n etherscan_api_key,\n sleep_time,\n node_url=[moralis_node, alchemy_node]\n)\n\nfilepath = sorted(input_dataset_filepaths)[int(args.id)]\n\nif (args.nrows is not None) and (args.nrows != 'None'):\n example_df = pd.read_csv(filepath, nrows=int(args.nrows))\nelse:\n example_df = pd.read_csv(filepath)\n\ntransactions = example_df.to_dict('records')\n\ntry:\n res = []\n for tx in transactions:\n t1 = time.time()\n try:\n tx_decoded = decoder.decode_tx(tx, with_labels=args.with_labels)\n except Exception as e:\n print(\"Exception during decoding:\")\n print(str(e))\n print(\"Appending empty transaction and continuing...\")\n tx_decoded = {\n 'events': [],\n 'call': {},\n 'transfers': [],\n 'balances': [],\n 'metadata': {},\n }\n\n if args.with_labels:\n tx_decoded['label_0'] = 0.0\n tx_decoded['label_1'] = 0.0\n\n res.append(tx_decoded)\n\n print(f\"Decoding time: {time.time() - t1}\")\nexcept Exception as e:\n print(\"Exception:\")\n print(str(e))\n print(\"Removing tree\")\n shutil.rmtree(temporary_path)\n\ntry:\n os.makedirs(args.output, exist_ok=True)\n res_df = pd.DataFrame(res)\n filepath = f\"{args.output}/decoded_{args.id}.csv\"\n res_df.to_csv(filepath, index=False)\n print(f\"Saved in: {filepath}\")\nexcept Exception as e:\n print(\"Exception:\")\n print(str(e))\n if os.path.exists(temporary_path):\n print(\"Exception:\")\n print(str(e))\n print(\"Removing tree\")\n shutil.rmtree(temporary_path)\nif os.path.exists(temporary_path):\n shutil.rmtree(temporary_path)\n","sub_path":"mev/azure/src/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"349480933","text":"\"\"\" definitions/constants \"\"\"\n\nsyslog_host= (\"dlaptop\", 5555)\n\nfps = 20\n\ngravity = (0, -750)\nbaloon_force = (300, 7900)\nmax_hints = 3\n\nfriction = 0.55\nfloor_level = -5\nkill_level = -1000\n\nwizard_mass = 200\nwizard_touch_impulse_x = 1000\nwizard_hand = (40, -20) # offset from center, where wizard carry things\nwizard_max_speed = 400\nwizard_friction = 4 * 9 / 13\nwizard_release_impulse = (-100, 2200)\n\nshoot_force = 7700\n\nmintouchdist = 30\n\nmap_size = (1280, 720)\n\nskip_drop_time = 3 # how much time to skip dropping new elements, before we know what is result of shot\ndrop_useless_chance = 0.000\ndrop_chance = 0.005\ndrop_zone = (0, 400)\nnum_elements_in_zone = (2, 8)\nexplode_when_nocomb = False # explode elements when they make impossible combination\n\nleft_beam_fine_pos = 0\nbeam_speed = 20 # number of pixels per minute\n\n# constants\n\nNORMAL_LAYER = 1\nCARRIED_THINGS_LAYER = (1 << 1)\nSHOOTED_THINGS_LAYER = (1 << 2)\nVISUAL_EFFECTS_LAYER = (1 << 3)\nPLATFORMS_LAYER = (1 << 4)\n\nLEFT_BOUND = 1001\nRIGHT_BOUND = 1002\nBOTTOM_BOUND = 1003\n\nINTRO_TEXT = \"\"\"\nLong time ago, in the middle ages...\n\nThe Swiss Alchemists obsessed with finding way to invent way to breed dragon, bring into life theories about transmutation elements into another.\n\nBecause of lack of philosopher's stone, the Swiss Alchemists has secretly begun construction of Large Element Collider, the powerful and complex experimental facility.\n\nNow L.E.C. is constructed, help them to breed dragon from base elements.\n\n\"\"\"\n","sub_path":"defs.py","file_name":"defs.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"585251604","text":"# coding: utf-8\nimport configparser\nimport os\nimport sys\n\nimport redis\n\nfrom com.listen.tquant.service.stock.StockOneStopProcessor import StockOneStopProcessor\nfrom com.listen.tquant.dbservice.Service import DbService\nfrom com.listen.tquant.log.Logger import Logger\n\n\nimport logging\n\n\"\"\"\n股票行情一条龙服务,利用多核CPU:\n1.股票日K历史行情入库\n2.股票日K涨跌幅计算,入库\n3.股票均线计算,入库\n4.股票均线涨跌幅平均计算,入库\n5.done\n\"\"\"\n\nimport multiprocessing\nimport time\n\nlog_path = 'd:\\\\python_log\\\\one_step'\nlog_name = '\\\\list_tquant_stock_one_step_business_all.log'\nwhen = 'M'\ninterval = 1\nbackupCount = 10\nlevel = logging.INFO\n\ndef test(msg):\n i = 0\n while i <= 1000000:\n print(msg, '-', str(i))\n i += 1\n\ndef init(security_code, r, queue):\n StockOneStopProcessor.processing_single_security_code(security_code, r, queue)\n\nif __name__ == \"__main__\":\n\n config = configparser.ConfigParser()\n os.chdir('../config')\n config.read('redis.cfg')\n redis_section = config['redis']\n if redis_section:\n host = redis_section['redis.host']\n port = int(redis_section['redis.port'])\n db = redis_section['redis.db']\n queue = redis_section['redis.block.average.queue']\n pool = redis.ConnectionPool(host=host, port=port, db=db)\n r = redis.Redis(connection_pool=pool)\n else:\n raise FileNotFoundError('redis.cfg redis section not found!!!')\n\n cpu_count = multiprocessing.cpu_count()\n loop_size = 1\n dbService = DbService()\n logger = Logger(level, log_path, log_name, when, interval, backupCount)\n while True:\n worth_buying_codes = dbService.get_worth_buying_stock()\n size = len(worth_buying_codes)\n print('size', size, 'cpu_count', cpu_count)\n pool = multiprocessing.Pool(processes=cpu_count)\n security_codes = [worth_buying_codes[i][0] for i in range(len(worth_buying_codes))]\n for security_code in security_codes:\n try:\n result = pool.apply_async(init, args=(security_code, r, queue))\n except Exception:\n sys.exc_traceback()\n pool.close()\n pool.join()\n print('loop_size', loop_size)\n loop_size += 1\n time.sleep(30)","sub_path":"com/listen/tquant/bin/RunStockOneStopProcessorUseMultiCoreCPU.py","file_name":"RunStockOneStopProcessorUseMultiCoreCPU.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"445431908","text":"from tkinter import *\r\n\r\n# nessasary for all script using tkinter, should be first thing done\r\nroot = Tk()\r\nroot.title(\"Simple Calculator\")\r\n\r\n# Variables\r\n#===========================================================================\r\n\r\ncurrent_total = 0\r\nis_total_displayed = False\r\nlast_function = \"\"\r\n\r\n# Development helper functions\r\n#===========================================================================\r\n\r\ndef output_dec(func):\r\n\tdef wrapper(*args):\r\n\t\tglobal current_total, is_total_displayed\r\n\t\tf = func\r\n\t\tfunc(*args)\r\n\t\tprint(f\"{f} was just called with {args}\")\r\n\t\tprint(f\"current total = {current_total}\")\r\n\t\tprint(f\"is_total_displayed = {is_total_displayed}\")\r\n\t\tprint(\"////////////////////////////////////////////\" \r\n\t\t\t+ \"///////////////////////////////////////////\")\r\n\treturn wrapper\t\r\n\r\n# Functions\r\n#===========================================================================\r\n#someting something something\r\n@output_dec\r\ndef insert_number(num):\r\n\tglobal is_total_displayed\r\n\tif is_total_displayed:\r\n\t\t_clear()\r\n\t\tentry.insert('end', num)\r\n\telse:\r\n\t\tentry.insert('end', num)\r\n\r\n@output_dec\r\ndef addition():\r\n\tglobal current_total, is_total_displayed, last_function\r\n\tif is_total_displayed:\r\n\t\t_clear()\r\n\telse:\r\n\t\tcurrent_total += int(entry.get())\r\n\t\t_clear()\r\n\tlast_function = \"+\"\r\n\r\n@output_dec\r\ndef subtraction():\r\n\tglobal current_total, is_total_displayed, last_function\r\n\tif is_total_displayed:\r\n\t\t_clear()\r\n\telse:\r\n\t\tcurrent_total -= int(entry.get())\r\n\t\t_clear()\r\n\tlast_function = \"-\"\r\n\r\n@output_dec\r\ndef multiplication():\r\n\tglobal current_total, is_total_displayed, last_function\r\n\tif is_total_displayed:\r\n\t\t_clear()\r\n\telse:\r\n\t\tcurrent_total *= int(entry.get())\r\n\t\t_clear()\r\n\tlast_function = \"*\"\r\n\r\n@output_dec\r\ndef division():\r\n\tglobal current_total, is_total_displayed, last_function\r\n\tif is_total_displayed:\r\n\t\t_clear()\r\n\telse:\r\n\t\tcurrent_total /= int(entry.get())\r\n\t\tcurrent_total = int(current_total)\r\n\t\t_clear()\r\n\tlast_function = \"/\"\r\n\r\n@output_dec\r\ndef display_total():\r\n\tglobal current_total, is_total_displayed\r\n\tif not is_total_displayed:\r\n\t\tif last_function == \"+\":\r\n\t\t\taddition()\r\n\t\telif last_function == \"-\":\r\n\t\t\tsubtraction()\r\n\t\telif last_function == \"*\":\r\n\t\t\tmultiplication()\r\n\t\telif last_function == \"/\":\r\n\t\t\tdivision()\r\n\t\telse:\r\n\t\t\traise ValueError(\"incorrect symbol for var last_function\")\r\n\t_clear()\r\n\tentry.insert(0, str(current_total))\r\n\tis_total_displayed = True\r\n\r\ndef _clear():\r\n\tglobal is_total_displayed\r\n\tentry.delete(0, 'end')\r\n\tis_total_displayed = False\r\n\r\n@output_dec\r\ndef reset():\r\n\tglobal current_total\r\n\t_clear()\r\n\tcurrent_total = 0\r\n\r\n\r\n# Init of a entry bar where the user can enter text\r\nentry = Entry(root, width=35, borderwidth=5)\r\nentry.grid(row=0,column=0,columnspan=3,padx=10,pady=2)\r\n\r\n# Define Buttons\r\nbutton_1 = Button(root, text=\"1\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"1\"))\r\nbutton_2 = Button(root, text=\"2\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"2\"))\r\nbutton_3 = Button(root, text=\"3\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"3\"))\r\nbutton_4 = Button(root, text=\"4\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"4\"))\r\nbutton_5 = Button(root, text=\"5\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"5\"))\r\nbutton_6 = Button(root, text=\"6\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"6\"))\r\nbutton_7 = Button(root, text=\"7\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"7\"))\r\nbutton_8 = Button(root, text=\"8\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"8\"))\r\nbutton_9 = Button(root, text=\"9\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"9\"))\r\nbutton_0 = Button(root, text=\"0\", padx=40, pady=20, borderwidth=3, \r\n\tcommand= lambda: insert_number(\"0\"))\r\n\r\nbutton_add = Button(root, text=\"+\", padx=40, pady=20, borderwidth=3, \r\n\tcommand=addition)\r\nbutton_sub = Button(root, text=\"-\", padx=41.45, pady=20, borderwidth=3, \r\n\tcommand=subtraction)\r\nbutton_mult = Button(root, text=\"x\", padx=41, pady=20, borderwidth=3, \r\n\tcommand=multiplication)\r\nbutton_div = Button(root, text=\"÷\", padx=40, pady=20, borderwidth=3, \r\n\tcommand=division)\r\n\r\nbutton_equal = Button(root, text=\"=\", padx=88, pady=20, borderwidth=3, \r\n\tcommand=display_total)\r\nbutton_clear = Button(root, text=\"Clear\", padx=176, pady=20, borderwidth=3, \r\n\tcommand=reset)\r\n\r\n# Place Buttons on Screen\r\nbutton_1.grid(row=3,column=0)\r\nbutton_2.grid(row=3,column=1)\r\nbutton_3.grid(row=3,column=2)\r\nbutton_4.grid(row=2,column=0)\r\nbutton_5.grid(row=2,column=1)\r\nbutton_6.grid(row=2,column=2)\r\nbutton_7.grid(row=1,column=0)\r\nbutton_8.grid(row=1,column=1)\r\nbutton_9.grid(row=1,column=2)\r\nbutton_0.grid(row=4,column=0)\r\n\r\nbutton_add.grid(row=4,column=4)\r\nbutton_sub.grid(row=3,column=4)\r\nbutton_mult.grid(row=2, column=4)\r\nbutton_div.grid(row=1, column=4)\r\n\r\nbutton_equal.grid(row=4, column=1, columnspan=2)\r\nbutton_clear.grid(row=5, column=0, columnspan=5)\r\n\r\n#===========================================================================\r\n\r\n# nessasary for all tk scripts, it's tk's method to keep running a loop\r\n# constantly updating things like where the mouse is on the window\r\nroot.mainloop()","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"17256116","text":"\"\"\"This is a sample file for hw1. \nIt contains the function that should be submitted,\nexcept all it does is output a random value out of the\npossible values that are allowed.\n- Dr. Licato\"\"\"\n\nimport random\nimport re\nimport collections\nfrom hashlib import new\n\nkeyWords = ['OR','AND','NOT','IF']\ntoReturn = \"\"\n\n\n\"\"\"F: A string which is an S-expression of a PC formula. You can assume:\nThe operators used are IF,AND,OR,and NOT. \nAll operators (except for NOT) have two arguments\nAll propositional symbols will be lower-case alphanumberic strings\nAll formulae are well-formed (UNDERGRADUATE STUDENTS ONLY)\n\nReturns either a string or integer: \n“E” – If the formula was not well-formed (GRADUATE STUDENTS ONLY)\n“T” – If the formula is a tautology,\n“U” – If the formula is unsatisfiable, else:\ni – An integer showing the number of rows that are satisfiable\n\"\"\"\n\ndef proveFormula(F):\n\ttoReturn = \"\"\n\twff = checkFormula(F)\n\tif wff == \"E\":\n\t\ttoReturn = wff\n\telse:\n\t\toperands = getOperandCount(F)\n\t\toperandsCount = len(operands)\n\t\titerationCount = 2**(len(operands))\n\t\tcount = 0\n\t\tc=0\n\t\toperandsVal =[]\n\t\t\"\"\" Iteration over the truthtable. Rows count in the truth table is 2^(operand count). We calculate this count in above steps. Here we denote false as 0 and True as 1\"\"\"\n\t\twhile(count !=iterationCount):\n\t\t\toperandsVal=dec_to_bin(operandsCount,count)\n\t\t\tdictVal = dict(zip(operands,operandsVal))\n\t\t\tresult = getValueofFormula(F,dictVal)\n\t\t\tif int(result) ==0:\n\t\t\t\tc=c+1\n\t\t\tcount = count +1\n\t\tif c ==0:\n\t\t\ttoReturn =\"T\"\n\t\telif c= 1]\n\tuniqueList = [e for e in uniqueList if e not in (\"OR\",\"AND\",\"IF\",\"NOT\",\"(\",\")\")]\n\treturn uniqueList\n\n\"\"\" To get the value of the formula by putting value according to truth table\"\"\"\ndef getValueofFormula(F,dictVal):\n\ttokens = [i for i in re.split(r'([\\(\\)\\,])|(\\\"[^\\\"]*\\\")|\\s',F) if i]\n\tnewtokens = tokens\n\tfor n,i in enumerate(newtokens):\n\t\tif i == \"(\" or i ==\")\" or i in keyWords:\n\t\t\tcontinue\n\t\telse:\n\t\t\tif i in dictVal:\n\t\t\t\tnewtokens[n] = dictVal.get(i)\n\t\n\tval = getOperations(newtokens)\n\treturn val\n\n\"\"\" Depending upon truth value get the final true/false from the formula. This function iterate the formula until it gets the result.\"\"\"\ndef getOperations(newtokens):\n\t\n\tdef callOR(x,y):\n\t\tz=0\n\t\tif x==0 and y ==0:\n\t\t\tz=0\n\t\telif x==0 and y==1:\n\t\t\tz=1\n\t\telif x==1 and y==0:\n\t\t\tz=1\n\t\telif x==1 and y==1:\n\t\t\tz=1\n\t\treturn z\n\tdef callAND(x,y):\n\t\tz=0\n\t\tif x==0 and y ==0:\n\t\t\tz=0\n\t\telif x==0 and y==1:\n\t\t\tz=0\n\t\telif x==1 and y==0:\n\t\t\tz=0\n\t\telif x==1 and y==1:\n\t\t\tz=1\n\t\treturn z\n\tdef callIF(x,y):\n\t\tz=0\n\t\tif x==0 and y ==0:\n\t\t\tz=1\n\t\telif x==0 and y==1:\n\t\t\tz=1\n\t\telif x==1 and y==0:\n\t\t\tz=0\n\t\telif x==1 and y==1:\n\t\t\tz=1\n\t\treturn z\n\t\n\tdef callNOT(x):\n\t\tz=0\n\t\tif x==0:\n\t\t\tz=1\n\t\telif x==1:\n\t\t\tz=0\n\t\treturn z\n\t\n\twhile(len(newtokens) >1):\n\t\tfor (j,k) in enumerate(newtokens):\n\t\t\tif k in keyWords:\n\t\t\t\tif k==\"NOT\":\n\t\t\t\t\tif newtokens[j+1] != \"(\":\n\t\t\t\t\t\tp=callNOT(int(newtokens[j+1]))\n\t\t\t\t\t\tnewtokens[j-1]=p\n\t\t\t\t\t\tdel(newtokens[j:j+3])\n\t\t\t\telse:\n\t\t\t\t\tif newtokens[j+1] != \"(\" and newtokens[j+2] !=\"(\":\n\t\t\t\t\t\tif k == \"OR\":\n\t\t\t\t\t\t\tnewtokens[j-1]=callOR(int(newtokens[j+1]),int(newtokens[j+2]))\n\t\t\t\t\t\t\tdel(newtokens[j:j+4])\n\t\t\t\t\t\n\t\t\t\t\t\telif k ==\"AND\":\n\t\t\t\t\t\t\tnewtokens[j-1]=callAND(int(newtokens[j+1]),int(newtokens[j+2]))\n\t\t\t\t\t\t\tdel(newtokens[j:j+4])\n\t\t\t\t\t\telif k == \"IF\":\n\t\t\t\t\t\t\tnewtokens[j-1]=callIF(int(newtokens[j+1]),int(newtokens[j+2]))\n\t\t\t\t\t\t\tdel(newtokens[j:j+4])\n\t\n\treturn newtokens.pop(0)\n\n\"\"\" This to convert decimal number to binary. The bit count in binary depending upon the number of literal in the formula\"\"\"\ndef dec_to_bin(operandsCount,n):\n\tbits = []\n\tbits.append(str(0 if n%2 == 0 else 1))\n\twhile n > 1:\n\t\tn = n // 2\n\t\tbits.append(str(0 if n%2 == 0 else 1))\n\tbits.reverse()\n\twhile (len(bits) List[int]:\n \"\"\"\nGiven an array of integers A sorted in non-decreasing order, return an array of the squares of each number, also in sorted non-decreasing order.\n\nExample 1:\n\nInput: [-4,-1,0,3,10]\nOutput: [0,1,9,16,100]\nExample 2:\n\nInput: [-7,-3,2,3,11]\nOutput: [4,9,9,49,121]\n\n\nNote:\n\n1 <= A.length <= 10000\n-10000 <= A[i] <= 10000\nA is sorted in non-decreasing order.\n\"\"\"\n if not A:\n return []\n\n L = len(A)\n l, r, i = 0, L-1, L-1\n ret = [0] * L\n\n while l <= r:\n lSquare = A[l] * A[l]\n rSquare = A[r] * A[r]\n if lSquare > rSquare:\n ret[i] = lSquare\n l += 1\n else:\n ret[i] = rSquare\n r -= 1\n i -= 1\n\n return ret\n\n def testSortedSquares(self):\n self.assertEqual([0,1,9,16,100], self.sortedSquares([-4,-1,0,3,10]))\n self.assertEqual([4,9,9,49,121], self.sortedSquares([-7,-3,2,3,11]))\n","sub_path":"src/main/python/squares_of_a_sorted_array.py","file_name":"squares_of_a_sorted_array.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"233634961","text":"from django.urls import path\nfrom basic_app import views\n\n# Template tagging\napp_name = \"basic_app\"\n\nurlpatterns = [\n path(route=\"relative/\", view=views.relative, name=\"relative\"),\n path(route=\"other/\", view=views.other, name=\"other\"),\n]\n","sub_path":"Python3/Udemy/Python and Django Full Stack Web Developer Bootcamp/17 - DJANGO_LEVEL_FOUR/learning_templates/basic_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"479932852","text":"import os\nimport time\nimport datetime\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Verify matplotlib code\n# import matplotlib.pyplot as plt\n# plt.stackplot(range(4), [2, 2, 2, 2], [1, 2, 3, 4], labels=['a', 'b'])\n# plt.show()\n\ndef filter_test_result(test_file):\n\n data_result = []\n\n with open(test_file, 'r') as fh:\n for line in fh:\n try:\n i = line.find('--PerformanceTesting--:')\n if i == -1:\n continue\n \n data = line[i + len('--PerformanceTesting--:'):]\n times = data.split(',')\n if len(times) != 4:\n continue\n \n # start time\n st_datetm = datetime.datetime.fromtimestamp( float(times[0].strip()[3:]))\n # st_datetm = float(times[0].strip()[len('st')+1:])\n\n # end time\n # st_endtime = datetime.datetime.fromtimestamp( float(times[1].strip()[3:]) / 1e3)\n st_endtime = float(times[1].strip()[len('en')+1:])\n\n # during\n st_during = float(times[2].strip()[len('during') + 1:])\n\n # avg\n st_avg = float(times[3].strip()[len('avg') + 1:])\n\n data_result.append((st_datetm, st_endtime, st_during, st_avg))\n except:\n continue\n\n return data_result\n\n\ndef show_result_graph(data_result):\n\n x_data_st = []\n y_data_total = []\n y_data_avg = []\n y_cnt = []\n\n for t in data_result:\n\n st = t[0]\n x = '%s-%d-%d' % (st.date(), st.hour, st.minute)\n\n if not x_data_st or x_data_st[-1] != x:\n x_data_st.append(x)\n y_data_total.append(t[2])\n y_data_avg.append(t[3])\n y_cnt.append(1)\n else:\n y_data_total[-1] += t[2]\n y_data_avg[-1] = (y_data_avg[-1] * y_cnt[-1] + t[3]) / (y_cnt[-1] + 1)\n y_cnt[-1] += 1\n\n\n # y_data_avg = list(map(lambda x: x * 100, y_data_avg))\n #plt.bar(x=x_data_st, height=y_cnt, label='100times', color='steelblue', alpha=0.8)\n plt.bar(x=x_data_st, height=y_data_avg, label='avg-per-validation', color='indianred', alpha=0.8)\n\n #for x, y in enumerate(y_cnt):\n # plt.text(x, y + 100, '%s' % y, ha='center', va='bottom')\n\n for x, y in enumerate(y_data_avg):\n plt.text(x, y + 100, '{:.4f}'.format(y), ha='center', va='bottom')\n\n plt.title('avg and total of token validation spent')\n plt.xlabel('time')\n plt.ylabel('seconds')\n \n plt.legend()\n plt.xticks(range(len(x_data_st)), x_data_st)\n plt.show()\n\ndef show() :\n\n # data\n x_data = ['2012', '2013', '2014', '2015', '2016', '2017', '2018']\n y_data = [58000, 60200, 63000, 71000, 84000, 90500, 107000]\n y_data2 = [52000, 54200, 51500,58300, 56800, 59500, 62700]\n \n # draw\n plt.bar(x=x_data, height=y_data, label='C language ', color='steelblue', alpha=0.8)\n plt.bar(x=x_data, height=y_data2, label='Java language', color='indianred', alpha=0.8)\n \n # show data, v-align h-align\n for x, y in enumerate(y_data):\n plt.text(x, y , '%s' % y, ha='center', va='bottom')\n for x, y in enumerate(y_data2):\n plt.text(x, y , '%s' % y, ha='center', va='top')\n \n # title\n plt.title(\"Java and android book\")\n \n # name of x-axis, y-axis\n plt.xlabel(\"year\")\n plt.ylabel(\"amount\")\n\n try: \n # show graph\n plt.legend()\n plt.xticks(range(len(x_data)), x_data)\n plt.show()\n except Exception:\n sys.exc_clear()\n\n\nif __name__ == '__main__':\n\n show()\n\n data_result = filter_test_result('./DataAnalyze/dps.log')\n\n show_result_graph(data_result)","sub_path":"Python35/DataAnalyze/parse_test_reslt.py","file_name":"parse_test_reslt.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"535575894","text":"from django.contrib import admin\nfrom django.urls import path, include\n\n\nfrom . import views\nimport hr.views\n\nurlpatterns = [\n path('testZone', views.testZone, name='testZone'),\n path('businessReport', views.businessReport, name='businessReport'),\n path('writeBySomeoneElse', views.writeBySomeoneElse, name='writeBySomeoneElse'),\n path('businessReportForm//', views.businessReportForm, name='businessReportForm'),\n path('select2', include('django_select2.urls')),\n path('create//', views.ExternUserCreateView.as_view(), name='create_externUser'),\n path('create2//', views.NiziPartnerCreateView.as_view(), name='create_niziPartner'),\n path('createContact', views.createContact, name='createContact'),\n path('insertContactRecord', views.insertContactRecord, name='insertContactRecord'),\n path('deleteContactsUser', views.chooseUserToDeleteHisContacts, name='deleteContactsUser'),\n path('deleteContactsForEver', views.deleteContactsForEver, name='deleteContactsForEver'),\n path('addContactsGroupToUserForm', views.addContactsGroupToUserForm, name='addContactsGroupToUserForm'),\n path('addContactsGroupToUser', views.addContactsGroupToUser, name='addContactsGroupToUser'),\n]","sub_path":"hr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"465475839","text":"#!/usr/bin/env python3\n# Copyright 2020-2021 Efabless Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Original code by M. Shalan, Translated to Python by Donn\n\nimport io\nimport re\nimport math\nimport click\nimport textwrap\n\n\n@click.command()\n@click.option(\"--fanout\", required=True)\n@click.option(\"--clk-net\", required=True)\n@click.option(\"--root-clkbuf\", required=True, help=\"Clock tree root buffer type\")\n@click.option(\"--clkbuf\", required=True, help=\"Clock tree branching buffer type\")\n@click.option(\n \"--clkbuf-input-pin\", required=True, help=\"Name of input pin in clock buffers\"\n)\n@click.option(\n \"--clkbuf-output-pin\", required=True, help=\"Name of output pin in clock buffers\"\n)\n@click.option(\"--clk-port\", required=True, help=\"Name of clock pin in storage elements\")\n@click.option(\"-o\", \"--output\", required=True, help=\"Name of output netlist\")\n@click.argument(\"input_netlist\")\ndef cli(\n fanout,\n clk_net,\n clkbuf,\n root_clkbuf,\n clkbuf_input_pin,\n clkbuf_output_pin,\n clk_port,\n output,\n input_netlist,\n):\n \"\"\"\n Shalan's Simple Clock Tree Synthesizer\n\n Made to work on Yosys netlists.\n \"\"\"\n fanout = int(fanout)\n\n netlist_str = open(input_netlist).read()\n clock_port_rx = re.compile(rf\"{re.escape(clk_port)}\\(\\s*{re.escape(clk_net)}\\s*\\)\")\n leaves = len(clock_port_rx.findall(netlist_str))\n levels = math.ceil(math.log(leaves, fanout))\n\n buffers = [0] * (levels + 1)\n buffers[0] = leaves\n\n verilog_wires = []\n verilog_cells = []\n\n cell_count = 0\n for level in range(1, levels):\n level_before = level - 1\n instance = f\"_CTS_buf_{level}_\"\n input_wire = f\"clk_{level}_\"\n output_wire = f\"clk_{level_before}_\"\n\n # obviously translated c-style for loop\n i = 0\n while i < leaves:\n next_level_fanout = fanout ** (level + 1)\n ii = i // next_level_fanout * next_level_fanout\n cell_name = f\"{instance}{i}\"\n verilog_cells.append(\n textwrap.dedent(\n f\"\"\"\n {clkbuf} {cell_name} (\n \\t.{clkbuf_input_pin}({input_wire}{ii}),\n \\t.{clkbuf_output_pin}({output_wire}{i})\n );\"\"\"\n )\n )\n verilog_wires.append(f\"wire {output_wire}{i};\")\n buffers[level] += 1\n cell_count += 1\n\n i += fanout**level\n\n root_net = f\"clk_{levels - 1}_0\"\n verilog_cells.append(\n textwrap.dedent(\n f\"\"\"\n {root_clkbuf} _CTS_root(\n \\t.{clkbuf_input_pin}({clk_net}),\n \\t.{clkbuf_output_pin}({root_net})\n );\"\"\"\n )\n )\n verilog_wires.append(f\"wire clk_{levels - 1}_0;\\n\")\n\n with io.StringIO() as sio:\n state = 0\n ff_count = 0\n for line in netlist_str.split(\"\\n\"):\n if state == 0:\n if \"wire\" in line:\n state = 1\n print(line, file=sio)\n elif state == 1:\n if \"wire\" not in line and \"input\" not in line and \"output\" not in line:\n state = 2\n print(\"\\n// CTS added wires:\", file=sio)\n print(\"\\n\".join(verilog_wires), file=sio)\n print(\"\\n// CTS added buffers:\", file=sio)\n print(\"\\n\".join(verilog_cells), file=sio)\n print(line, file=sio)\n else:\n print(line, file=sio)\n elif state == 2:\n clk_port = clock_port_rx.search(line)\n if clk_port is not None:\n clk_wire_name = f\"clk_0_{ff_count // fanout * fanout}\"\n line_replaced = re.sub(re.escape(clk_net), clk_wire_name, line)\n ff_count += 1\n print(line_replaced, file=sio)\n else:\n print(line, file=sio)\n with open(output, \"w\") as f:\n f.write(sio.getvalue())\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"scripts/simple_cts.py","file_name":"simple_cts.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"147872959","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 1 10:45:01 2021\n\n@author: daniel omola\n\"\"\"\n\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\n\ndef hist(data,col=\"close_spx\"):\n \"\"\"\n\t####################################################################################\n\t#\n\t#\t\t\t\tPlot histogram on specified column\n\t#\n\t####################################################################################\n\t\"\"\"\n fig = px.histogram(data, x=col)\n fig.update_layout(\n autosize=False,\n width=600,\n height=400,)\n fig.show()\n \n\ndef plot_loss_tresshold(df):\n \"\"\"\n\t####################################################################################\n\t#\n\t#\t\t\t\tPlot time sery of prediction error and a line as a tresshold \n\t#\n\t####################################################################################\n\t\"\"\"\n fig = go.Figure()\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df.loss,\n mode='lines',\n name='error'))\n \n fig.add_trace(go.Scatter(\n x=df.index,\n y=df.threshold,\n mode='lines',\n name='threshold'))\n fig.show()\n \n\ndef plot_anomalies(df,is_spx=True, secondary_y = 'close_vix' ):\n \"\"\"\n\t####################################################################################\n\t#\n\t#\t\tPlot : points identified as anomaly and close price of S&P500 and VIX\n\t#\n\t####################################################################################\n\t\"\"\"\n col = 'close'\n anomalies = df.copy()\n anomalies.close = anomalies[[col,'anomaly']].apply(lambda r : r[0] if r[1]==True else 0,axis=1)\n anomalies=anomalies[anomalies.anomaly==True]\n\n # Create figure with secondary y-axis\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df[col],\n mode='lines',\n name='close'))\n\n\n\n fig.add_trace(go.Scatter(\n x=anomalies.index,\n y=anomalies[col],\n mode='markers',\n name='anomaly'))\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df[secondary_y],\n mode='lines',\n marker_color='lightgrey',\n name=secondary_y),\n secondary_y=True,)\n \n # Set x-axis title\n fig.update_xaxes(title_text=\"xaxis title\")\n\n # Set y-axes titles\n if secondary_y == 'close_vix':\n fig.update_yaxes(title_text=\"SP500 close\", secondary_y=False)\n fig.update_yaxes(title_text=\"VIX close\", secondary_y=True)\n else:\n fig.update_yaxes(title_text=\"VIX close\", secondary_y=False)\n fig.update_yaxes(title_text=\"SP500 close\", secondary_y=True)\n fig.show()","sub_path":"LSTM_Anomaly_Detection/mypackage/mypackage/ploter.py","file_name":"ploter.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"111321440","text":"import pygame\r\nimport sys\r\nimport math\r\n\r\npygame.init()\r\n\r\ntry:\r\n clock = pygame.time.Clock()\r\n \r\n RED = (255, 0, 0)\r\n GREEN = (0, 255, 0)\r\n BLUE = (0, 0, 255)\r\n\r\n MAX_X = 1024\r\n MAX_Y = 720\r\n \r\n window = pygame.display.set_mode((MAX_X, MAX_Y))\r\n \r\n #ComicSans = pygame.font.SysFont(\"Comic Sans MS\", 144)\r\n #text = ComicSans.render(\"Hello world!\", True, GREEN, RED)\r\n\r\n #rectArea = pygame.Rect(200, 600, 150, 75)\r\n\r\n lineStartX = 50\r\n lineStartY = 670\r\n lineStartPos = (lineStartX, lineStartY)\r\n lineLength = 400\r\n lineAngle = 0\r\n \r\n while True:\r\n \r\n window.fill(RED)\r\n\r\n pygame.draw.circle(window, GREEN, lineStartPos, lineLength)\r\n\r\n lineEndX = lineStartX + lineLength * math.cos(math.radians(lineAngle))\r\n lineEndY = lineStartY + lineLength * math.sin(math.radians(lineAngle))\r\n lineEndPos = (lineEndX, lineEndY)\r\n pygame.draw.line(window, BLUE, lineStartPos, lineEndPos, 5)\r\n\r\n #pygame.draw.rect(window, GREEN, rectArea)\r\n\r\n pygame.display.update()\r\n clock.tick(30)\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit(1)\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n sys.exit(1)\r\n elif event.key == pygame.K_DOWN:\r\n if lineAngle < 0:\r\n lineAngle += 5\r\n elif event.key == pygame.K_UP:\r\n if lineAngle > -90:\r\n lineAngle -= 5\r\n\r\nexcept BaseException as error:\r\n pygame.quit()\r\n raise error\r\n","sub_path":"game3.py","file_name":"game3.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"410886323","text":"### Run on Python 3.4 ###\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QAxContainer import *\nfrom PyQt5.QtCore import *\nimport sys, time\nimport pandas as pd\nfrom pymongo import MongoClient\n\n# Kiwoom requires that you take at least 3.8 seconds before another request,\n# (it's their new policy)\nTR_REQ_TIME_INTERVAL = 3.8\n\n# connection information for MongoDB\nUSER = \"minestoned\"\nPW = \"moneyisnoweverythingdawg\"\nIP = \"45.55.86.183\"\nDB = \"stock\"\n\ndef timeit(method):\n \"\"\"method timing decorator\"\"\"\n def timed(*args, **kwargs):\n ts = time.time()\n method(*args, **kwargs)\n te = time.time()\n print(\"Initialization took \" + str(te-ts) + \" seconds\")\n return timed\n\n\nclass Kiwoom(QAxWidget):\n \"\"\"Kiwoom class: connects to Kiwoom OpenAPI\n requests OHLCV, Buy, Sell data\"\"\"\n\n def __init__(self):\n super().__init__()\n self._create_kiwoom_instance()\n self._set_signal_slots()\n\n def prepare_data(self):\n # prepares data for database insertion\n self.data = {\"ohlcv\": [], \"buy\": [], \"sell\": []}\n\n def _add_data(self, type, data):\n # adds data to front of each self.data list\n # for date sorting purposes\n self.data[type].insert(0, data)\n\n def _create_kiwoom_instance(self):\n self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\")\n\n def _set_signal_slots(self):\n self.OnEventConnect.connect(self._event_connect)\n self.OnReceiveTrData.connect(self._receive_tr_data)\n\n def comm_connect(self):\n self.dynamicCall(\"CommConnect()\")\n self.login_event_loop = QEventLoop()\n self.login_event_loop.exec_()\n\n def _event_connect(self, err_code):\n if err_code == 0:\n print(\"connected\")\n else:\n print(\"disconnected\")\n\n self.login_event_loop.exit()\n\n def get_code_list_by_market(self, market):\n # returns full code list of each markets:\n # 0: kospi, 10: kosdaq, 8: etf\n code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market)\n code_list = code_list.split(';')\n return code_list[:-1]\n\n def get_master_code_name(self, code):\n # returns company name of each code you input\n code_name = self.dynamicCall(\"GetMasterCodeName(QString)\", code)\n return code_name\n\n def set_input_value(self, id, value):\n # setting input befor request calls\n self.dynamicCall(\"SetInputValue(QString, QString)\", id, value)\n\n def comm_rq_data(self, rqname, trcode, next, screen_no):\n # committing all inputs set and requests data\n self.dynamicCall(\"CommRqData(QString, QString, int, QString\", rqname, trcode, next, screen_no)\n self.tr_event_loop = QEventLoop()\n self.tr_event_loop.exec_()\n\n def _comm_get_data(self, code, real_type, field_name, index, item_name):\n ret = self.dynamicCall(\"CommGetData(QString, QString, QString, int, QString\", code,\n real_type, field_name, index, item_name)\n return ret.strip()\n\n def _get_repeat_cnt(self, trcode, rqname):\n # returns how many requests you have left till the end\n ret = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", trcode, rqname)\n return ret\n\n def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):\n if next == '2':\n self.remained_data = True\n else:\n self.remained_data = False\n\n if rqname == \"opt10081_req\":\n self._opt10081(rqname, trcode)\n elif rqname == \"opt10059_req\":\n self._opt10059(rqname, trcode)\n\n try:\n self.tr_event_loop.exit()\n except AttributeError:\n pass\n\n def _opt10081(self, rqname, trcode):\n data_cnt = self._get_repeat_cnt(trcode, rqname)\n\n for i in range(data_cnt):\n date = self._comm_get_data(trcode, \"\", rqname, i, \"일자\")\n open = self._comm_get_data(trcode, \"\", rqname, i, \"시가\")\n high = self._comm_get_data(trcode, \"\", rqname, i, \"고가\")\n low = self._comm_get_data(trcode, \"\", rqname, i, \"저가\")\n close = self._comm_get_data(trcode, \"\", rqname, i, \"현재가\")\n volume = self._comm_get_data(trcode, \"\", rqname, i, \"거래량\")\n\n update_data = {\"date\": int(date), \\\n \"open\": int(open), \\\n \"high\": int(high), \\\n \"low\": int(low), \\\n \"close\": int(close), \\\n \"volume\": int(volume)}\n\n self._add_data(\"ohlcv\", update_data)\n\n def _opt10059(self, rqname, trcode):\n data_cnt = self._get_repeat_cnt(trcode, rqname)\n\n for i in range(data_cnt):\n date = self._comm_get_data(trcode, \"\", rqname, i, \"일자\")\n individual = self._comm_get_data(trcode, \"\", rqname, i, \"개인투자자\")\n for_retail = self._comm_get_data(trcode, \"\", rqname, i, \"외국인투자자\")\n institution = self._comm_get_data(trcode, \"\", rqname, i, \"기관계\")\n financial = self._comm_get_data(trcode, \"\", rqname, i, \"금융투자\")\n insurance = self._comm_get_data(trcode, \"\", rqname, i, \"보험\")\n trust = self._comm_get_data(trcode, \"\", rqname, i, \"투신\")\n etc_finance = self._comm_get_data(trcode, \"\", rqname, i, \"기타금융\")\n bank = self._comm_get_data(trcode, \"\", rqname, i, \"은행\")\n pension = self._comm_get_data(trcode, \"\", rqname, i, \"연기금등\")\n private = self._comm_get_data(trcode, \"\", rqname, i, \"사모펀드\")\n nation = self._comm_get_data(trcode, \"\", rqname, i, \"국가\")\n etc_corporate = self._comm_get_data(trcode, \"\", rqname, i, \"기타법인\")\n foreign = self._comm_get_data(trcode, \"\", rqname, i, \"내외국인\")\n\n update_data = {\"date\": int(date), \\\n \"individual\": int(individual), \\\n \"foreign_retail\": int(for_retail), \\\n \"institution\": int(institution), \\\n \"financial\": int(financial), \\\n \"insurance\": int(insurance), \\\n \"trust\": int(trust), \\\n \"etc_finance\": int(etc_finance), \\\n \"bank\": int(bank), \\\n \"pension\": int(pension), \\\n \"private\": int(private), \\\n \"nation\": int(nation), \\\n \"etc_corporate\": int(etc_corporate), \\\n \"foreign\": int(foreign)}\n\n self._add_data(self.buysell_state, update_data)\n\n\nclass DataCollector:\n\n def __init__(self, user, password, ip_address, db_name):\n self.db_name = db_name\n self.mongo = MongoClient(\"mongodb://{0}:{1}@{2}/{3}\".format(user, password, ip_address, db_name))\n\n def set_collection(self, collection):\n self.collection = self.mongo[self.db_name].data\n\n def _get_total_stock_num(self, kiwoom):\n total_stock_list = list()\n for market_type in [0, 10]:\n stock_list = kiwoom.get_code_list_by_market(str(market_type))\n total_stock_list += stock_list\n return len(total_stock_list)\n\n def initialize_db(self, kiwoom):\n total_stock_num = self._get_total_stock_num(kiwoom)\n code_looped = 0\n\n # get code list (0: KOSPI, 10: KOSDAQ)\n for market_type in [0, 10]:\n if market_type == 0:\n market = \"kospi\"\n elif market_type == 10:\n market = \"kosdaq\"\n code_list = kiwoom.get_code_list_by_market(str(market_type))\n\n for code in code_list:\n self._initialize_data(code, market, kiwoom)\n code_looped += 1\n stocks_left = str(total_stock_num - code_looped)\n print(stocks_left + \" stocks left to initialize\")\n print(\"---------------------------------------------------\")\n\n @timeit\n def _initialize_data(self, code, market, kiwoom):\n global TR_REQ_TIME_INTERVAL\n\n name = kiwoom.get_master_code_name(code)\n time.sleep(TR_REQ_TIME_INTERVAL)\n print(code + \": \" + name + \" data initializing\")\n kiwoom.prepare_data()\n\n # opt10081 TR 요청\n kiwoom.set_input_value(\"종목코드\", code)\n kiwoom.set_input_value(\"기준일자\", time.strftime('%Y%m%d'))\n kiwoom.set_input_value(\"수정주가구분\", 1)\n kiwoom.comm_rq_data(\"opt10081_req\", \"opt10081\", 0, \"0101\")\n time.sleep(TR_REQ_TIME_INTERVAL)\n\n while kiwoom.remained_data == True:\n kiwoom.set_input_value(\"종목코드\", code)\n kiwoom.set_input_value(\"기준일자\", time.strftime('%Y%m%d'))\n kiwoom.set_input_value(\"수정주가구분\", 1)\n kiwoom.comm_rq_data(\"opt10081_req\", \"opt10081\", 2, \"0101\")\n time.sleep(TR_REQ_TIME_INTERVAL)\n print(\"OHLCV data initialized, ready for DB\")\n\n for buysell in [1, 2]:\n if buysell == 1:\n kiwoom.buysell_state = \"buy\"\n elif buysell == 2:\n kiwoom.buysell_state = \"sell\"\n\n # opt10059 TR 요청\n kiwoom.set_input_value(\"일자\", time.strftime('%Y%m%d'))\n kiwoom.set_input_value(\"종목코드\", code)\n kiwoom.set_input_value(\"금액수량구분\", 2)\n kiwoom.set_input_value(\"매매구분\", buysell)\n kiwoom.set_input_value(\"단위구분\", 1)\n kiwoom.comm_rq_data(\"opt10059_req\", \"opt10059\", 0, \"0101\")\n time.sleep(TR_REQ_TIME_INTERVAL)\n\n while kiwoom.remained_data == True:\n kiwoom.set_input_value(\"일자\", time.strftime('%Y%m%d'))\n kiwoom.set_input_value(\"종목코드\", code)\n kiwoom.set_input_value(\"금액수량구분\", 2)\n kiwoom.set_input_value(\"매매구분\", buysell)\n kiwoom.set_input_value(\"단위구분\", 1)\n kiwoom.comm_rq_data(\"opt10059_req\", \"opt10059\", 2, \"0101\")\n time.sleep(TR_REQ_TIME_INTERVAL)\n if buysell == 1:\n print(\"BUY data initialized, ready for DB\")\n elif buysell == 2:\n print(\"SELL data initialized, ready for DB\")\n\n db_initializer = {\"code\": code, \"name\": name, \"market\": market, \"ohlcv\": kiwoom.data[\"ohlcv\"], \"buy\": kiwoom.data[\"buy\"], \"sell\": kiwoom.data[\"sell\"]}\n self.collection.insert_one(db_initializer)\n print(code + \": \" + name + \" data successfully initialized\")\n\n\nif __name__ == \"__main__\":\n print(sys.argv)\n app = QApplication(sys.argv)\n kiwoom = Kiwoom()\n kiwoom.comm_connect()\n\n # dc = DataCollector(USER, PW, IP, DB)\n # dc.set_collection(\"stock\")\n # dc.initialize_db(kiwoom)\n","sub_path":"gobble/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"345389376","text":"#!/usr/bin/env python\nimport tensorflow as tf\nfrom data import load_dict,BUCKET,pad_sequences,process_label_pattern\nimport os\nimport modeling\nclass SeqModel(object):\n def __init__(self, args, logger,hvd=False):\n self.args=args\n self.logger = logger \n self.hvd=hvd\n self.optimizer = args.optimizer\n self.num_hidden_layers = args.layer_depth\n self.embedding_dim = args.embedding_dim\n self.bucket = BUCKET\n self.ngram_dim=args.ngram_dim\n self.tag2id, self.id2tag = load_dict(args.tag_to_id_path,args.encoding)\n self.num_tags = 3 # len(self.tag2id)\n self.word2id, self.id2word = load_dict(args.word_to_id_path,args.encoding)\n self.logger.info(\"tag2id size: %d\" % self.num_tags)\n self.logger.info(\"word2id size: %d\" % len(self.word2id))\n self.lambda1 = args.lambda1\n self.lambda2 = args.lambda2\n self.lambda3 = args.lambda3\n self.lambda4 = args.lambda4\n def build_bert_graph(self,bert_base_dir):\n self.add_placeholders()\n self.bert_layer_op(bert_base_dir)\n self.softmax_pred_op()\n self.loss_op()\n self.trainstep_op()\n self.init_op()\n\n def build_cnn_ngram_graph(self,num_filters,filter_width=3,embeddings=None,bigra=None,trigram=None):\n self.add_placeholders_ngram()\n self.lookup_layer_ngram(embeddings,bigra,trigram)\n self.cnn_layer_ngram(num_filters,filter_width)\n self.softmax_pred_op()\n self.loss_op()\n self.trainstep_op()\n self.init_op()\n\n def trainstep_op(self,clip_nom=1):\n with tf.variable_scope(\"train_step\"):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n learning_rate = self.lr_pl\n if self.optimizer == 'Adam':\n optim = tf.train.AdamOptimizer(learning_rate=learning_rate)\n elif self.optimizer == 'Adadelta':\n optim = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)\n elif self.optimizer == 'Adagrad':\n optim = tf.train.AdagradOptimizer(learning_rate=learning_rate)\n elif self.optimizer == 'RMSProp':\n optim = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n elif self.optimizer == 'Momentum':\n optim = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)\n elif self.optimizer == 'SGD':\n optim = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n else:\n optim = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n if self.args.use_hvd:\n self.logger.info(\"==== use DistributedOptimizer\")\n optim= self.hvd.DistributedOptimizer(optim)\n tvars=tf.trainable_variables()\n grads_and_vars = optim.compute_gradients(self.loss,tvars)\n grads,_ = tf.clip_by_global_norm([k for k,v in grads_and_vars], clip_norm=clip_nom)\n self.train_op = optim.apply_gradients(zip(grads,tvars), global_step=self.global_step)\n #self.train_op = optim.apply_gradients(grads_and_vars, global_step=self.global_step)\n def init_op(self):\n self.init_op = tf.global_variables_initializer()\n\n def add_placeholders_ngram(self):\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name=\"word_ids\")\n self.labels = tf.placeholder(tf.int32, shape=[None, None], name=\"labels\")\n self.sequence_lengths = tf.placeholder(tf.int32, shape=[None], name=\"sequence_lengths\")\n self.dropout_pl = tf.placeholder(dtype=tf.float32, shape=[], name=\"dropout\")\n self.lr_pl = tf.placeholder(dtype=tf.float32, shape=[], name=\"lr\")\n self.before_bigram_ids = tf.placeholder(tf.int32, shape=[None, None], name=\"before_bigram_ids\")\n self.after_bigram_ids = tf.placeholder(tf.int32, shape=[None, None], name=\"after_bigram_ids\")\n self.before_trigram_ids = tf.placeholder(tf.int32, shape=[None, None], name=\"before_trigram_ids\")\n self.after_trigram_ids = tf.placeholder(tf.int32, shape=[None, None], name=\"after_trigram_ids\")\n \n def add_placeholders(self):\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name=\"word_ids\")\n self.labels = tf.placeholder(tf.int32, shape=[None, None], name=\"labels\")\n self.seg_ids = tf.placeholder(tf.int32, shape=[None, None], name=\"seg_ids\")\n self.sequence_lengths = tf.placeholder(tf.int32, shape=[None], name=\"sequence_lengths\")\n self.dropout_pl = tf.placeholder(dtype=tf.float32, shape=[], name=\"dropout\")\n self.lr_pl = tf.placeholder(dtype=tf.float32, shape=[], name=\"lr\")\n self.querys = tf.placeholder(tf.int32, shape=[None, None], name=\"labels_ori\")\n self.cls = tf.placeholder(tf.int32, shape=[None], name=\"cls_label\")\n\n def get_feed_dict(self, seqs, labels, segments, lr, dropout, ngrams=None):\n word_ids, seq_len_list = pad_sequences(seqs, pad_mark=0)\n seg_ids, _ = pad_sequences(segments, pad_mark=0)\n feed_dict = {self.word_ids: word_ids,\n self.sequence_lengths: seq_len_list,\n self.seg_ids: seg_ids}\n if labels is not None:\n labels_, query_, cls_ = process_label_pattern(labels)\n feed_dict[self.labels] = labels_\n feed_dict[self.querys] = query_\n feed_dict[self.cls] = cls_\n if lr is not None:\n feed_dict[self.lr_pl] = lr\n if dropout is not None:\n feed_dict[self.dropout_pl] = dropout\n if ngrams is not None:\n before_bigrams_, _ = pad_sequences(ngrams[0], pad_mark=0)\n feed_dict[self.before_bigram_ids] = before_bigrams_\n after_bigrams_, _ = pad_sequences(ngrams[1], pad_mark=0)\n feed_dict[self.after_bigram_ids] = after_bigrams_\n before_trigrams_, _ = pad_sequences(ngrams[2], pad_mark=0)\n feed_dict[self.before_trigram_ids] = before_trigrams_\n after_trigrams_, _ = pad_sequences(ngrams[3], pad_mark=0)\n feed_dict[self.after_trigram_ids] = after_trigrams_\n return feed_dict, seq_len_list, len(word_ids), len(word_ids[0])\n\n\n def bert_layer_op(self,bert_base_dir):\n bert_config_path = os.path.join(bert_base_dir, \"bert_config.json\") \n self.bert_config = modeling.BertConfig.from_json_file(bert_config_path)\n self.bert_config.num_hidden_layers = self.num_hidden_layers\n self.input_mask = tf.sequence_mask(self.sequence_lengths, dtype=tf.int32)\n self.logger.info(self.bert_config.to_json_string())\n self.model = modeling.BertModel(config=self.bert_config, dropout_rate=self.dropout_pl, \n input_ids=self.word_ids, input_mask=self.input_mask, token_type_ids=self.seg_ids,\n use_one_hot_embeddings=False)\n if self.args.mode == \"train\":\n checkpoint_file = os.path.join(bert_base_dir, \"bert_model.ckpt\")\n if checkpoint_file:\n assignment_map, initialized_variable_names = modeling.get_assigment_map_from_checkpoint(\n tf.trainable_variables(), checkpoint_file)\n tf.train.init_from_checkpoint(checkpoint_file, assignment_map)\n self.logger.info(\"load checkpoint_file successfully!!\") \n \n self.embedding_output = self.model.get_embedding_output()\n self.sequence_outputs = self.model.get_sequence_output()\n\n with tf.variable_scope(\"bert-encoder\"):\n out_shape = self.sequence_outputs.shape.as_list()\n last_channel_size = out_shape[-1]\n\n o_w = tf.get_variable(\"logits-w\", shape=[last_channel_size, self.num_tags], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n o_b = tf.get_variable(\"logits-b\", initializer=tf.constant(0.0, shape=[self.num_tags]))\n output_reshape = tf.reshape(self.sequence_outputs, [-1, last_channel_size])\n pred = tf.nn.xw_plus_b(output_reshape, o_w, o_b)\n s = tf.shape(self.sequence_outputs)\n logits = tf.reshape(pred, [-1, s[1], self.num_tags])\n self.logits = logits\n tf.add_to_collection(\"logits\", self.logits)\n\n cls_input_reshape = self.sequence_outputs[:, 0, :]\n # cls_input_reshape = tf.reshape(cls_output, [-1, last_channel_size])\n c_w = tf.get_variable(\"classes-w\", shape=[last_channel_size, 2], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n c_b = tf.get_variable(\"classes-b\", initializer=tf.constant(0.0, shape=[2]))\n classes = tf.nn.xw_plus_b(cls_input_reshape, c_w, c_b)\n self.classes = classes\n tf.add_to_collection(\"classes\", self.classes)\n\n\n def lookup_layer_ngram(self,embeddings=None,bigram=None,trigram=None):\n feature_embs = []\n with tf.variable_scope(\"words\"):\n if embeddings:\n _word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"word_embeddings\")\n else:\n _word_embeddings = tf.get_variable('word_embeddings', shape=[len(self.word2id), self.embedding_dim],\n initializer=tf.contrib.layers.xavier_initializer())\n\n if bigram:\n _bigram_embeddings = tf.Variable(bigram, dtype=tf.float32, name=\"bigram_embeddings\")\n else:\n _bigram_embeddings = tf.get_variable('bigram_embeddings', shape=[self.bucket, self.ngram_dim],\n initializer=tf.contrib.layers.xavier_initializer())\n if trigram:\n _trigram_embeddings = tf.Variable(trigram, dtype=tf.float32, name=\"trigram_embeddings\")\n else:\n _trigram_embeddings = tf.get_variable('trigram_embeddings', shape=[self.bucket, self.ngram_dim],\n initializer=tf.contrib.layers.xavier_initializer())\n\n # word embedding\n self.word_embeddings = tf.nn.embedding_lookup(params=_word_embeddings, ids=self.word_ids,\n name=\"word_embeddings\")\n feature_embs.append(self.word_embeddings)\n # bigram\n before_bigram_embeddings = tf.nn.embedding_lookup(params=_bigram_embeddings,\n ids=self.before_bigram_ids)\n after_bigram_embeddings = tf.nn.embedding_lookup(params=_bigram_embeddings, ids=self.after_bigram_ids)\n feature_embs.append(before_bigram_embeddings)\n feature_embs.append(after_bigram_embeddings)\n # trigram\n before_trigram_embeddings = tf.nn.embedding_lookup(params=_trigram_embeddings,\n ids=self.before_trigram_ids)\n after_trigram_embeddings = tf.nn.embedding_lookup(params=_trigram_embeddings,\n ids=self.after_trigram_ids)\n feature_embs.append(before_trigram_embeddings)\n feature_embs.append(after_trigram_embeddings)\n self.embs_len = len(feature_embs)\n \n feature_concat = tf.concat(feature_embs, -1)\n self.logger.info(\"feature_concat shape: {}\".format(feature_concat.shape))\n zero_one_mask = tf.sequence_mask(self.sequence_lengths, dtype=tf.float32)\n zero_one_pad = tf.expand_dims(zero_one_mask, -1)\n no_zero_embeddings = tf.multiply(feature_concat, zero_one_pad) \n self.zero_one_pad = tf.expand_dims(zero_one_pad, 1)\n \n self.uni_bi_tri_embeddings = tf.nn.dropout(no_zero_embeddings, 1-self.dropout_pl)\n self.logger.info(\"no_zero_embeddings shape: {}\".format(no_zero_embeddings.shape))\n self.logger.info(\"uni_bi_tri_embeddings shape: {}\".format(self.uni_bi_tri_embeddings.shape))\n self.input_len = (self.embs_len - 1) * self.ngram_dim + self.embedding_dim\n\n def cnn_layer_ngram(self,num_filters,filter_width=3):\n self.logger.info(\"cnn_layer_op beigin\")\n with tf.variable_scope(\"cnn\"):\n initial_num_filters = num_filters\n self.logger.info(\"uni_bi_tri_embeddings shape: {}\".format(self.uni_bi_tri_embeddings.shape))\n input_feats_expanded = tf.expand_dims(self.uni_bi_tri_embeddings, 1)\n\n last_channel_size = self.input_len\n lastoutput = input_feats_expanded\n features = []\n self.conv = []\n features.append(input_feats_expanded)\n self.conv.append(())\n for i in range(0, self.num_hidden_layers):\n if i == 0:\n width = 1\n else:\n width = filter_width\n\n with tf.variable_scope(\"layer-%d\" % i):\n filter_shape = [1, width, last_channel_size, initial_num_filters]\n w = tf.get_variable(\"w\", shape=filter_shape, initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(\"b\", initializer=tf.constant(0.0, shape=[initial_num_filters]))\n self.logger.info(\"lastoutput shape: {}\".format(lastoutput.shape))\n self.logger.info(\"w shape: {}\".format(w.shape))\n conv_op = tf.nn.conv2d(lastoutput, w, strides=[1,1,1,1], padding=\"SAME\",\n use_cudnn_on_gpu=True, data_format='NHWC', name=\"conv-layer-%d\" % i)\n conv_b = tf.nn.bias_add(conv_op, b)\n conv_relu = tf.nn.relu(conv_b)\n no_zero_conv_relu = tf.multiply(conv_relu, self.zero_one_pad)\n # update\n lastoutput = no_zero_conv_relu\n self.conv.append((conv_b, w, b))\n features.append(no_zero_conv_relu)\n last_channel_size = initial_num_filters\n\n '''\n # dense cnn\n lastoutput = tf.concat([lastoutput, conv_relu], 3) # [batch, 1, length, num_filer + last_output_channel_num]\n last_channel_size += initial_num_filters\n '''\n # self.cnn_output = lastoutput\n self.features = features\n self.cnn_output = tf.concat(features, 3)\n last_channel_size = self.input_len + initial_num_filters * self.num_hidden_layers\n\n o_w = tf.get_variable(\"logits-w\", shape=[last_channel_size, self.num_tags], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n o_b = tf.get_variable(\"logits-b\", initializer=tf.constant(0.0, shape=[self.num_tags]))\n output_squeeze = tf.squeeze(self.cnn_output, [1])\n output_reshape = tf.reshape(output_squeeze, [-1, last_channel_size])\n self.logger.info(\"output_reshape shape: {}\".format(output_reshape.shape))\n self.logger.info(\"o_w shape: {}\".format(o_w.shape))\n self.logger.info(\"o_b shape: {}\".format(o_b.shape))\n pred = tf.nn.xw_plus_b(output_reshape, o_w, o_b)\n s = tf.shape(output_squeeze)\n logits = tf.reshape(pred, [-1, s[1], self.num_tags])\n self.logits = logits\n tf.add_to_collection(\"logits\", self.logits)\n self.logger.info(\"cnn_layer_op end\")\n\n def softmax_pred_op(self):\n self.softmax = tf.nn.softmax(self.logits)\n self.labels_softmax_ = tf.argmax(self.logits, axis=-1)\n self.labels_softmax_ = tf.cast(self.labels_softmax_, tf.int32)\n tf.add_to_collection(\"label_softmax\",self.labels_softmax_)\n\n def loss_op(self):\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,labels=self.labels)\n mask = tf.sequence_mask(self.sequence_lengths)\n losses = tf.boolean_mask(losses, mask)\n self.loss1 = tf.reduce_mean(losses)\n # loss2 desc consim\n softmax_true = tf.argmax(self.logits, axis=-1)\n softmax_false = tf.zeros_like(softmax_true)\n labels_softmax_ = tf.where(mask, softmax_true, softmax_false)\n labels_pred_ = tf.cast(labels_softmax_, dtype=tf.bool)\n labels_gold_ = tf.cast(self.labels, dtype=tf.bool)\n\n labels_softmax_ = tf.expand_dims(tf.cast(labels_pred_,dtype=tf.float32),-1)\n labels_softmax_ori = tf.expand_dims(tf.cast(labels_gold_,dtype=tf.float32),-1)\n\n input_desc = tf.multiply(labels_softmax_ori,self.sequence_outputs)\n pred_desc = tf.multiply(labels_softmax_,self.sequence_outputs)\n \n input_desc = tf.reduce_sum(input_desc,axis=1) # batch*seq_len*hidden_size --> batch*hidden_size\n pred_desc = tf.reduce_sum(pred_desc,axis=1)\n\n input_elem = tf.sqrt(tf.reduce_sum(tf.multiply(input_desc,input_desc),-1)) # batch\n pred_elem = tf.sqrt(tf.reduce_sum(tf.multiply(pred_desc,pred_desc), -1))\n\n desc_cos_matrix = tf.multiply(input_desc,pred_desc)\n desc_cos_vec = tf.reduce_sum(desc_cos_matrix,axis=-1) # batch\n\n desc_cos_score = tf.div(desc_cos_vec, input_elem * pred_elem + 1e-8) # batch\n # desc_cos_vec_sum = tf.reduce_sum(desc_cos_score,axis=-1)\n loss2 = tf.log(2/(desc_cos_score+1+1e-8))\n self.loss2 = tf.reduce_mean(loss2)\n\n # loss4\n tmp_one = tf.ones_like(self.labels,dtype=tf.int32)\n tmp_zero = tf.zeros_like(self.labels,dtype=tf.int32)\n labels_diff = tf.where(tf.equal(labels_gold_, labels_pred_), tmp_zero, tmp_one) # batch*seq_len\n cls_true = tf.ones_like(self.cls,dtype=tf.int32)\n cls_false = tf.zeros_like(self.cls,dtype=tf.int32)\n cls_bool = tf.cast(tf.reduce_sum(labels_diff, axis=-1), dtype=tf.bool)\n self.cls_pair = tf.where(cls_bool, cls_false, cls_true) # >1:0,=0:1\n label_name = tf.expand_dims(tf.cast(self.querys, dtype=tf.float32), -1)\n query_name = tf.reduce_mean(tf.multiply(label_name, self.sequence_outputs), axis=1)\n\n query_desc = tf.reduce_mean(tf.multiply(labels_softmax_,self.sequence_outputs), axis=1)\n\n cls_input = tf.concat([query_name, query_desc], 1) # batch*2hidden\n out_shape = self.sequence_outputs.shape.as_list()\n last_channel_size = out_shape[-1]\n # pair cls\n p_w = tf.get_variable(\"pair-w\", shape=[2*last_channel_size, 2], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n p_b = tf.get_variable(\"pair-b\", initializer=tf.constant(0.0, shape=[2]))\n self.paires = tf.nn.xw_plus_b(cls_input, p_w, p_b)\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.paires, labels=self.cls_pair)\n self.loss4 = tf.reduce_mean(losses)\n\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.classes, labels=self.cls)\n self.loss3 = tf.reduce_mean(losses)\n\n self.loss = self.lambda1 * self.loss1 + self.lambda2 * self.loss2 + self.lambda3 * self.loss3 + self.lambda4 * self.loss4\n\n\n\n\n\n\n","sub_path":"seq2seq/seq_model.py","file_name":"seq_model.py","file_ext":"py","file_size_in_byte":19233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"234456113","text":"\"\"\"\nFile: weather_master.py\nAuthor: Wade Chao\nDependencies: Application.py\n-----------------------\nThis program should implement a console program\nthat asks weather data from user to compute the\naverage, highest, lowest, cold days among the inputs.\n\"\"\"\nfrom Application import Application\n\n\nclass WeatherMaster(Application):\n\n\tdef __init__(self):\n\t\tself.__APP_NAME = 'Weather Master 4.0'\n\t\tsuper().__init__(self.__APP_NAME)\n\t\tself.__QUIT_COMMAND = -100\n\t\tself.__LOW_TEMPERATURE_ALARM = 16\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0\n\n\tdef _start_up(self):\n\t\tself.__set_temperatures()\n\t\tself.__reset()\n\n\tdef __set_temperatures(self):\n\t\t\"\"\"\n\t\tSet data, get the highest/lowest value and calculate the total and steps\n\t\t\"\"\"\n\t\twhile True:\n\t\t\ttemp = self.__get_correct_input()\n\t\t\tif temp != self.__QUIT_COMMAND:\n\t\t\t\tself.__total += temp\n\t\t\t\tself.__steps += 1\n\t\t\t\tif temp > self.__highest:\n\t\t\t\t\tself.__highest = temp\n\t\t\t\tif temp < self.__lowest:\n\t\t\t\t\tself.__lowest = temp\n\t\t\t\tif temp < self.__LOW_TEMPERATURE_ALARM:\n\t\t\t\t\tself.__cold_days += 1\n\t\t\telse:\n\t\t\t\tself.__print_ans()\n\t\t\t\tbreak\n\n\tdef __print_ans(self):\n\t\t\"\"\"\n\t\tpre-condition: data are set and user enter __QUIT_COMMAND\n\t\tpost-condition: print the answer of the calculation\n\t\t\"\"\"\n\t\tif self.__steps == 0:\n\t\t\tprint('============================')\n\t\t\tprint('No temperatures were entered')\n\t\t\tprint('============================')\n\t\telse:\n\t\t\tprint('============================')\n\t\t\tprint(f'Highest temperature: {self.__highest}')\n\t\t\tprint(f'Lowest temperature: {self.__lowest}')\n\t\t\tprint(f'Average: {self.__total / self.__steps}')\n\t\t\tprint(f'{self.__cold_days} cold day(s)')\n\t\t\tprint('===============================')\n\n\tdef __reset(self):\n\t\t\"\"\"\n\t\tReset all record and get ready for another calculation\n\t\t\"\"\"\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0\n\n\tdef __get_correct_input(self):\n\t\t\"\"\"\n\t\tTo make sure the input is a number\n\t\t:return: user input number\n\t\t\"\"\"\n\t\twhile True:\n\t\t\ttemp = input(f'Next Temperature: (or {self.__QUIT_COMMAND} to quit)? ')\n\t\t\ttry:\n\t\t\t\treturn int(temp)\n\t\t\texcept ValueError:\n\t\t\t\tprint('The input is not valid')\n\n\ndef main():\n\tWeatherMaster().start()\n\n\n# DO NOT EDIT CODE BELOW THIS LINE #\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"weather_master/weather_master.py","file_name":"weather_master.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"432462828","text":"#config.py\n\nimport os\n\n#graps the working directory\nbasedir = os.path.abspath( os.path.dirname(__file__) )\n\nDATABASE = 'flasktaskr.db'\nWTF_CSRF_ENABLED = True\nSECRET_KEY = 'my_precious'\nDEBUG = False\n#defines the full path for the the database\nDATABASE_PATH = os.path.join(basedir,DATABASE)\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"413495920","text":"#!/usr/bin/env python3\n\nimport cv2\nfrom cv_bridge import CvBridge\nimport os\nimport yaml\nimport numpy as np\n\nimport rospy\nfrom duckietown.dtros import DTROS, NodeType, TopicType\nfrom image_processing.ground_projection_geometry import Point, GroundProjectionGeometry\nfrom image_processing.rectification import Rectify\nfrom image_geometry import PinholeCameraModel\n\nfrom sensor_msgs.msg import CameraInfo, CompressedImage\nfrom geometry_msgs.msg import Point as PointMsg\nfrom duckietown_msgs.msg import Segment, SegmentList\n\n\nclass GroundProjectionNode(DTROS):\n \"\"\"\n This node projects the line segments detected in the image to the ground plane and in the robot's reference frame.\n In this way it enables lane localization in the 2D ground plane. This projection is performed using the homography\n matrix obtained from the extrinsic calibration procedure.\n\n Args:\n node_name (:obj:`str`): a unique, descriptive name for the node that ROS will use\n\n Subscribers:\n ~camera_info (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera. Needed for rectifying the segments.\n ~lineseglist_in (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in pixel space from unrectified images\n\n Publishers:\n ~lineseglist_out (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in the ground plane relative to the robot origin\n ~debug/ground_projection_image/compressed (:obj:`sensor_msgs.msg.CompressedImage`): Debug image that shows the robot relative to the projected segments. Useful to check if the extrinsic calibration is accurate.\n \"\"\"\n\n def __init__(self, node_name):\n # Initialize the DTROS parent class\n super(GroundProjectionNode, self).__init__(\n node_name=node_name,\n node_type=NodeType.PERCEPTION\n )\n\n self.bridge = CvBridge()\n self.ground_projector = None\n self.rectifier = None\n self.homography = self.load_extrinsics()\n self.first_processing_done = False\n self.camera_info_received = False\n\n # subscribers\n self.sub_camera_info = rospy.Subscriber(\"~camera_info\", CameraInfo, self.cb_camera_info, queue_size=1)\n self.sub_lineseglist_ = rospy.Subscriber(\"~lineseglist_in\", SegmentList, self.lineseglist_cb, queue_size=1)\n\n # publishers\n self.pub_lineseglist = rospy.Publisher(\"~lineseglist_out\",\n SegmentList, queue_size=1, dt_topic_type=TopicType.PERCEPTION)\n self.pub_debug_img = rospy.Publisher(\"~debug/ground_projection_image/compressed\",\n CompressedImage, queue_size=1, dt_topic_type=TopicType.DEBUG)\n\n self.bridge = CvBridge()\n\n self.debug_img_bg = None\n\n # Seems to be never used:\n # self.service_homog_ = rospy.Service(\"~estimate_homography\", EstimateHomography, self.estimate_homography_cb)\n # self.service_gnd_coord_ = rospy.Service(\"~get_ground_coordinate\", GetGroundCoord, self.get_ground_coordinate_cb)\n # self.service_img_coord_ = rospy.Service(\"~get_image_coordinate\", GetImageCoord, self.get_image_coordinate_cb)\n\n def cb_camera_info(self, msg):\n \"\"\"\n Initializes a :py:class:`image_processing.GroundProjectionGeometry` object and a\n :py:class:`image_processing.Rectify` object for image rectification\n\n Args:\n msg (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera.\n\n \"\"\"\n if not self.camera_info_received:\n self.rectifier = Rectify(msg)\n self.ground_projector = GroundProjectionGeometry(im_width=msg.width,\n im_height=msg.height,\n homography=np.array(self.homography).reshape((3, 3)))\n self.camera_info_received=True\n\n def pixel_msg_to_ground_msg(self, point_msg):\n \"\"\"\n Creates a :py:class:`ground_projection.Point` object from a normalized point message from an unrectified\n image. It converts it to pixel coordinates and rectifies it. Then projects it to the ground plane and\n converts it to a ROS Point message.\n\n Args:\n point_msg (:obj:`geometry_msgs.msg.Point`): Normalized point coordinates from an unrectified image.\n\n Returns:\n :obj:`geometry_msgs.msg.Point`: Point coordinates in the ground reference frame.\n\n \"\"\"\n # normalized coordinates to pixel:\n norm_pt = Point.from_message(point_msg)\n pixel = self.ground_projector.vector2pixel(norm_pt)\n # rectify\n rect = self.rectifier.rectify_point(pixel)\n # convert to Point\n rect_pt = Point.from_message(rect)\n # project on ground\n ground_pt = self.ground_projector.pixel2ground(rect_pt)\n # point to message\n ground_pt_msg = PointMsg()\n ground_pt_msg.x = ground_pt.x\n ground_pt_msg.y = ground_pt.y\n ground_pt_msg.z = ground_pt.z\n\n return ground_pt_msg\n\n def lineseglist_cb(self, seglist_msg):\n \"\"\"\n Projects a list of line segments on the ground reference frame point by point by\n calling :py:meth:`pixel_msg_to_ground_msg`. Then publishes the projected list of segments.\n\n Args:\n seglist_msg (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in pixel space from unrectified images\n\n \"\"\"\n if self.camera_info_received:\n seglist_out = SegmentList()\n seglist_out.header = seglist_msg.header\n for received_segment in seglist_msg.segments:\n new_segment = Segment()\n new_segment.points[0] = self.pixel_msg_to_ground_msg(received_segment.pixels_normalized[0])\n new_segment.points[1] = self.pixel_msg_to_ground_msg(received_segment.pixels_normalized[1])\n new_segment.color = received_segment.color\n # TODO what about normal and points\n seglist_out.segments.append(new_segment)\n\n rospy.sleep(0.15)\n self.pub_lineseglist.publish(seglist_out)\n\n if not self.first_processing_done:\n self.log('First projected segments published.')\n self.first_processing_done = True\n\n if self.pub_debug_img.get_num_connections() > 0:\n debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(self.debug_image(seglist_out))\n debug_image_msg.header = seglist_out.header\n self.pub_debug_img.publish(debug_image_msg)\n else:\n self.log('Waiting for a CameraInfo message', 'warn')\n\n # def get_ground_coordinate_cb(self, req):\n # return GetGroundCoordResponse(self.pixel_msg_to_ground_msg(req.uv))\n #\n # def get_image_coordinate_cb(self, req):\n # return GetImageCoordResponse(self.gpg.ground2pixel(req.gp))\n #\n # def estimate_homography_cb(self, req):\n # rospy.loginfo(\"Estimating homography\")\n # rospy.loginfo(\"Waiting for raw image\")\n # img_msg = rospy.wait_for_message(\"/\" + self.robot_name + \"/camera_node/image/raw\", Image)\n # rospy.loginfo(\"Got raw image\")\n # try:\n # cv_image = self.bridge.imgmsg_to_cv2(img_msg, desired_encoding=\"bgr8\")\n # except CvBridgeError as e:\n # rospy.logerr(e)\n # self.gp.estimate_homography(cv_image)\n # rospy.loginfo(\"wrote homography\")\n # return EstimateHomographyResponse()\n\n def load_extrinsics(self):\n \"\"\"\n Loads the homography matrix from the extrinsic calibration file.\n\n Returns:\n :obj:`numpy array`: the loaded homography matrix\n\n \"\"\"\n # load intrinsic calibration\n cali_file_folder = '/data/config/calibrations/camera_extrinsic/'\n #cali_file_folder = '/packages/ground_projection/src/data/config/calibrations/camera_extrinsic/'\n cali_file = cali_file_folder + rospy.get_namespace().strip(\"/\") + \".yaml\"\n\n # Locate calibration yaml file or use the default otherwise\n if not os.path.isfile(cali_file):\n self.log(\"Can't find calibration file: %s.\\n Using default calibration instead.\"\n % cali_file, 'warn')\n cali_file = (cali_file_folder + \"default.yaml\")\n\n # Shutdown if no calibration file not found\n if not os.path.isfile(cali_file):\n msg = 'Found no calibration file ... aborting'\n self.log(msg, 'err')\n rospy.signal_shutdown(msg)\n\n try:\n with open(cali_file,'r') as stream:\n calib_data = yaml.load(stream)\n except yaml.YAMLError:\n msg = 'Error in parsing calibration file %s ... aborting' % cali_file\n self.log(msg, 'err')\n rospy.signal_shutdown(msg)\n\n return calib_data['homography']\n\n def debug_image(self, seg_list):\n \"\"\"\n Generates a debug image with all the projected segments plotted with respect to the robot's origin.\n\n Args:\n seg_list (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in the ground plane relative to the robot origin\n\n Returns:\n :obj:`numpy array`: an OpenCV image\n\n \"\"\"\n # dimensions of the image are 1m x 1m so, 1px = 2.5mm\n # the origin is at x=200 and y=300\n\n # if that's the first call, generate the background\n if self.debug_img_bg is None:\n\n # initialize gray image\n self.debug_img_bg = np.ones((400, 400, 3), np.uint8) * 128\n\n # draw vertical lines of the grid\n for vline in np.arange(40,361,40):\n cv2.line(self.debug_img_bg,\n pt1=(vline, 20),\n pt2=(vline, 300),\n color=(255, 255, 0),\n thickness=1)\n\n # draw the coordinates\n cv2.putText(self.debug_img_bg, \"-20cm\", (120-25, 300+15), cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 0), 1)\n cv2.putText(self.debug_img_bg, \" 0cm\", (200-25, 300+15), cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 0), 1)\n cv2.putText(self.debug_img_bg, \"+20cm\", (280-25, 300+15), cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 0), 1)\n\n # draw horizontal lines of the grid\n for hline in np.arange(20, 301, 40):\n cv2.line(self.debug_img_bg,\n pt1=(40, hline),\n pt2=(360, hline),\n color=(255, 255, 0),\n thickness=1)\n\n # draw the coordinates\n cv2.putText(self.debug_img_bg, \"20cm\", (2, 220+3), cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 0), 1)\n cv2.putText(self.debug_img_bg, \" 0cm\", (2, 300+3), cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 0), 1)\n\n # draw robot marker at the center\n cv2.line(self.debug_img_bg,\n pt1=(200 + 0, 300 - 20),\n pt2=(200 + 0, 300 + 0),\n color=(255, 0, 0),\n thickness=1)\n\n cv2.line(self.debug_img_bg,\n pt1=(200 + 20, 300 - 20),\n pt2=(200 + 0, 300 + 0),\n color=(255, 0, 0),\n thickness=1)\n\n cv2.line(self.debug_img_bg,\n pt1=(200 - 20, 300 - 20),\n pt2=(200 + 0, 300 + 0),\n color=(255, 0, 0),\n thickness=1)\n\n # map segment color variables to BGR colors\n color_map = {Segment.WHITE: (255, 255, 255),\n Segment.RED: (0, 0, 255),\n Segment.YELLOW: (0, 255, 255)}\n\n image = self.debug_img_bg.copy()\n\n # plot every segment if both ends are in the scope of the image (within 50cm from the origin)\n for segment in seg_list.segments:\n if not np.any(np.abs([segment.points[0].x, segment.points[0].y,\n segment.points[1].x, segment.points[1].y]) > 0.50):\n cv2.line(image,\n pt1=(int(segment.points[0].y * -400) + 200, int(segment.points[0].x * -400) + 300),\n pt2=(int(segment.points[1].y * -400) + 200, int(segment.points[1].x * -400) + 300),\n color=color_map.get(segment.color, (0, 0, 0)),\n thickness=1)\n\n return image\n\n\nif __name__ == '__main__':\n ground_projection_node = GroundProjectionNode(node_name='ground_projection')\n rospy.spin()\n\n","sub_path":"packages/ground_projection/src/ground_projection_node.py","file_name":"ground_projection_node.py","file_ext":"py","file_size_in_byte":12573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"176339438","text":"from .base import *\nfrom django.core.exceptions import ImproperlyConfigured\nfrom os import environ\nimport logging\n\nDEBUG = False\n\n\ndef get_env_setting(setting):\n \"\"\" Get the environment setting or return exception \"\"\"\n try:\n return environ[setting]\n except KeyError:\n error_msg = \"Set the %s env variable\" % setting\n raise ImproperlyConfigured(error_msg)\n\nALLOWED_HOSTS = ['.olivino.com', '.olivino.com.']\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'n%0)^irzangk6&kq_)23y#ge#8@$)9&*%q-#&@41!8*p_-!@@b'\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': u'olivino',\n 'HOST': u'rds2.dyna-code.com',\n 'USER': u'olivino',\n 'PASSWORD': u'76eldorado',\n 'PORT': 3306}\n}\n\nWAGTAILSEARCH_BACKENDS = {\n 'default': {\n 'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',\n 'INDEX': 'olivino'\n }\n}\n\n\nINSTALLED_APPS += (\n 'raven.contrib.django.raven_compat',\n)\n\nMIDDLEWARE_CLASSES += (\n 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',\n)\n\nRAVEN_CONFIG = {\n 'dsn': 'https://81c20024b1094bafad0cec7c85f5661b:3856b6fd3b3242a59588c95049d4fdbf@sentry.io/115733',\n}\n\n# Use Redis as the cache backend for extra performance\n# (requires the django-redis-cache package):\n# http://wagtail.readthedocs.org/en/latest/howto/performance.html#cache\n\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.cache.RedisCache',\n 'LOCATION': '127.0.0.1:6379',\n 'KEY_PREFIX': 'olivino',\n 'OPTIONS': {\n 'CLIENT_CLASS': 'redis_cache.client.DefaultClient',\n }\n }\n}\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n 'format': '[%(asctime)s][%(levelname)s] %(name)s '\n '%(filename)s:%(funcName)s:%(lineno)d | %(message)s',\n 'datefmt': '%H:%M:%S',\n }\n },\n 'handlers': {\n 'console': {\n 'level':'DEBUG',\n 'class':'logging.handlers.RotatingFileHandler',\n 'filename': '/var/log/olivino/olivino.log',\n 'maxBytes': 1024*1024*5, # 5 MB\n 'backupCount': 5,\n 'formatter':'console',\n },\n 'sentry': {\n 'level': 'WARNING', # To capture more than ERROR, change to WARNING, INFO, etc.\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'request_handler': {\n 'level':'DEBUG',\n 'class':'logging.handlers.RotatingFileHandler',\n 'filename': '/var/log/olivino/django_request.log',\n 'maxBytes': 1024*1024*5, # 5 MB\n 'backupCount': 5,\n 'formatter':'console',\n },\n },\n 'loggers': {\n '': {\n 'level': 'DEBUG',\n 'handlers': ['console', 'sentry'],\n 'propagate': False\n },\n 'sentry': {\n 'level': 'INFO',\n 'class': 'raven.contrib.django.handlers.SentryHandler',\n },\n 'django.request': {\n 'handlers': ['request_handler'],\n 'level': 'DEBUG',\n 'propagate': False\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n }\n}\n\nSENTRY_CELERY_LOGLEVEL = logging.INFO\n","sub_path":"olivino/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"368292692","text":"from itertools import combinations\n\n# Dictionary with compatible blood types:\n# A = A+ | Z = A- | B = B+ | Y = B- | C = AB+ | X = AB- | O = O+ | W = O-\nb_comp = {\"A\": [\"A\", \"C\"],\n \"Z\": [\"A\", \"Z\", \"C\", \"X\"],\n \"B\": [\"B\", \"C\"],\n \"Y\": [\"B\", \"Y\", \"C\", \"X\"],\n \"C\": [\"C\"],\n \"X\": [\"C\", \"X\"],\n \"O\": [\"A\", \"B\", \"C\", \"O\"],\n \"W\": [\"A\", \"Z\", \"B\", \"Y\", \"C\", \"X\", \"O\", \"W\"]}\nlast_patient = \"\"\ntotal = 0\noutput = False\n\n\n# Checks blood compatibility between two persons\ndef check_compatibility(patient1_blood, patient2_blood):\n for blood in b_comp[patient1_blood]:\n if blood == patient2_blood:\n return True\n return False\n\n\n# Gets the list of all compatible persons\ndef get_compatibility(combination):\n patient1 = combination[0]\n patient2 = combination[1]\n\n p1_blood = patient1[len(patient1) - 1]\n p2_blood = patient2[len(patient2) - 1]\n\n global last_patient\n if check_compatibility(p1_blood, p2_blood):\n if last_patient != patient1:\n last_patient = patient1\n if output:\n print(\"\\nPatient\", patient1, \"can donate to:\")\n if output:\n print(patient2)\n\n\n# Gets all combinations without repetition of the given persons\ndef get_combinations(persons, out):\n global total\n global output\n if out:\n output = True\n for comb in combinations(persons, 2):\n total = total + 1\n get_compatibility(comb)\n print(\"\\nTotal combinations:\", total)\n","sub_path":"bloodcomp.py","file_name":"bloodcomp.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"535538611","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom .forms import MovieForm,ReviewForm\nfrom .models import Movie,Review\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\ndef index(request):\n movies = Movie.objects.all()\n context = {\n 'movies':movies\n }\n return render(request,'movies/index.html',context)\n\ndef detail(request,movie_id):\n movie = get_object_or_404(Movie,id=movie_id)\n reviews = Review.objects.filter(movie_id=movie.pk)\n review_form = ReviewForm()\n context = {\n 'reviews':reviews,\n 'movie':movie,\n 'review_form':review_form,\n }\n return render(request,'movies/detail.html',context)\n\n@login_required\ndef review_create(request,movie_id):\n movie = get_object_or_404(Movie,id=movie_id)\n if request.method == 'POST':\n review_form = ReviewForm(request.POST)\n if review_form.is_valid():\n review = review_form.save(commit=False)\n review.movie_id = movie\n # review.user_id = request.user\n review.save()\n return redirect('movies:detail',movie_id)\n\ndef review_delete(request,movie_id,review_id):\n review = Review.objects.get(id=review_id)\n review.delete()\n return redirect('movies:detail',movie_id)\n\n\n@login_required\ndef like(request,movie_id):\n movie = get_object_or_404(Movie,pk=movie_id)\n if request.user in movie.like_users.all():\n movie.like_users.remove(request.user)\n else:\n movie.like_users.add(request.user)\n return redirect('movies:detail',movie_id)\n","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"145033365","text":"import numpy as np\nimport numba as nb\nimport pandas as pd\nimport awkward as awk\nimport re\n\nfrom utils.NumbaFuncs import get_bin_indices, event_to_object_var, interpolate\nfrom utils.Geometry import RadToCart2D, CartToRad2D\n\n@nb.vectorize([nb.float32(nb.float32,nb.float32,nb.float32,nb.float32,nb.float32),\n nb.float64(nb.float64,nb.float64,nb.float64,nb.float64,nb.float64)])\ndef jer_formula(x, p0, p1, p2, p3):\n return np.sqrt(p0*np.abs(p0)/(x*x)+p1*p1*np.power(x,p3)+p2*p2)\n\ndef met_shift(ev, unclust_energy):\n @nb.njit\n def met_shift_numba(met, mephi, jpt, jptshift, jphi, jstarts, jstops):\n jpx_old, jpy_old = RadToCart2D(jpt, jphi)\n jpx_new, jpy_new = RadToCart2D(jptshift, jphi)\n\n mex, mey = RadToCart2D(met[:], mephi[:])\n for iev, (start, stop) in enumerate(zip(jstarts, jstops)):\n for iob in range(start, stop):\n if jpt[iob] > unclust_energy:\n mex[iev] += jpx_old[iob]\n mey[iev] += jpy_old[iob]\n if jptshift[iob] > unclust_energy:\n mex[iev] -= jpx_new[iob]\n mey[iev] -= jpy_new[iob]\n\n return CartToRad2D(mex, mey)\n return met_shift_numba(\n ev.MET_ptJESOnly, ev.MET_phiJESOnly, ev.Jet_ptJESOnly.content,\n ev.Jet_pt.content, ev.Jet_phi.content,\n ev.Jet_pt.starts, ev.Jet_pt.stops,\n )\n\nclass JecVariations(object):\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n self.jesuncs = read_table(\n self.jes_unc_file,\n underflow_cols=[\"eta_low\"], overflow_cols=[\"eta_high\"],\n csv=True,\n )\n\n self.jersfs = read_table(\n self.jer_sf_file,\n underflow_cols=[\"eta_low\"], overflow_cols=[\"eta_high\"],\n )\n self.jers = read_table(\n self.jer_file,\n underflow_cols=[\"eta_low\", \"rho_low\"],\n overflow_cols=[\"eta_high\", \"rho_high\"],\n )\n\n def begin(self, event):\n np.random.seed(123456)\n\n # Regex the variations\n comp_jes_regex = re.compile(self.jes_regex)\n variations = []\n for v in event.attribute_variation_sources:\n match = comp_jes_regex.search(v)\n if match:\n vari = match.group(\"source\")\n if vari not in variations:\n variations.append(vari)\n\n self.jesuncs = self.jesuncs.loc[\n self.jesuncs[\"source\"].isin(variations)\n ]\n self.jesuncs[\"pt\"] = self.jesuncs[\"pt\"].apply(lambda x: list(eval(x)))\n self.jesuncs[\"corr_up\"] = self.jesuncs[\"corr_up\"].apply(lambda x: list(eval(x)))\n self.jesuncs[\"corr_down\"] = self.jesuncs[\"corr_down\"].apply(lambda x: list(eval(x)))\n self.jes_sources = self.jesuncs[\"source\"].unique()\n event.JetSources = self.jes_sources\n\n def event(self, event):\n self.do_jet_pt_resolution(event)\n self.do_jer_correction(event)\n for source in self.jes_sources:\n self.do_jes_correction(event, source)\n\n def do_jet_pt_resolution(self, event):\n indices = get_bin_indices(\n [event.Jet_eta.content,\n event_to_object_var(event.fixedGridRhoFastjetAll, event.Jet_pt.starts, event.Jet_pt.stops)],\n [self.jers[\"eta_low\"].values, self.jers[\"rho_low\"].values],\n [self.jers[\"eta_high\"].values, self.jers[\"rho_high\"].values],\n 1,\n )[:,0]\n df = self.jers.iloc[indices]\n params = df[[\"param0\", \"param1\", \"param2\", \"param3\"]].values\n ptbounds = df[[\"pt_low\", \"pt_high\"]].values\n event.Jet_ptResolution = awk.JaggedArray(\n event.Jet_pt.starts, event.Jet_pt.stops,\n jer_formula(\n np.minimum(np.maximum(event.Jet_pt.content, ptbounds[:,0]), ptbounds[:,1]),\n params[:,0], params[:,1], params[:,2], params[:,3],\n ),\n )\n\n def do_jer_correction(self, event):\n indices = get_bin_indices(\n [event.Jet_eta.content],\n [self.jersfs[\"eta_low\"].values],\n [self.jersfs[\"eta_high\"].values],\n 1,\n )[:,0]\n ressfs = self.jersfs.iloc[indices][[\"corr\", \"corr_up\", \"corr_down\"]].values\n jersfs = np.ones_like(event.Jet_pt.content, dtype=np.float32)\n jersfs_up = np.ones_like(event.Jet_pt.content, dtype=np.float32)\n jersfs_down = np.ones_like(event.Jet_pt.content, dtype=np.float32)\n\n # match gen jets\n gidx = event.Jet_genJetIdx\n gsize = event.GenJet_pt.counts\n mask = (gidx>=0) & (gidx 0:\n command = elements[0]\n if not command in COMMANDS:\n print('Invalid command: {}'.format(command))\n return\n else:\n opcode = COMMANDS[command]\n if len(elements) > 1:\n operand = elements[1]\n else:\n operand = ''\n operand_repr = operand if operand else '0'*OPERAND_LENGTH\n if operand_repr in CONSTS:\n operand_repr = CONSTS[operand_repr]\n command_number = bin(i)[2:].zfill(OPERAND_LENGTH)\n result_commands.append('\"{}\" & \"{}\", -- {} : {} {}'.format(opcode, operand_repr, command_number, command, operand))\n return '\\n'.join(result_commands)\n\n\ndef main():\n print(compile(sys.argv[1]))\n\nif __name__ == '__main__':\n main()\n","sub_path":"POTSP/source/Lab7/compiler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"648389962","text":"\"\"\"Functions for partitions.\n\nFor distributed training, a graph is partitioned and partitions are stored in files\norganized as follows:\n\n```\ndata_root_dir/\n |-- part_conf.json # partition configuration file in JSON\n |-- node_map # partition id of each node stored in a numpy array\n |-- edge_map # partition id of each edge stored in a numpy array\n |-- part0/ # data for partition 0\n |-- node_feats # node features stored in binary format\n |-- edge_feats # edge features stored in binary format\n |-- graph # graph structure of this partition stored in binary format\n |-- part1/ # data for partition 1\n |-- node_feats\n |-- edge_feats\n |-- graph\n```\n\nThe partition configuration file stores the file locations. For the above example,\nthe configuration file will look like the following:\n\n```\n{\n \"graph_name\" : \"test\",\n \"part_method\" : \"metis\",\n \"num_parts\" : 2,\n \"halo_hops\" : 1,\n \"node_map\" : \"data_root_dir/node_map.npy\",\n \"edge_map\" : \"data_root_dir/edge_map.npy\"\n \"num_nodes\" : 1000000,\n \"num_edges\" : 52000000,\n \"part-0\" : {\n \"node_feats\" : \"data_root_dir/part0/node_feats.dgl\",\n \"edge_feats\" : \"data_root_dir/part0/edge_feats.dgl\",\n \"part_graph\" : \"data_root_dir/part0/graph.dgl\",\n },\n \"part-1\" : {\n \"node_feats\" : \"data_root_dir/part1/node_feats.dgl\",\n \"edge_feats\" : \"data_root_dir/part1/edge_feats.dgl\",\n \"part_graph\" : \"data_root_dir/part1/graph.dgl\",\n },\n}\n```\n\nHere are the definition of the fields in the partition configuration file:\n * `graph_name` is the name of the graph given by a user.\n * `part_method` is the method used to assign nodes to partitions.\n Currently, it supports \"random\" and \"metis\".\n * `num_parts` is the number of partitions.\n * `halo_hops` is the number of HALO nodes we want to include in a partition.\n * `node_map` is the node assignment map, which tells the partition Id a node is assigned to.\n * `edge_map` is the edge assignment map, which tells the partition Id an edge is assigned to.\n * `num_nodes` is the number of nodes in the global graph.\n * `num_edges` is the number of edges in the global graph.\n * `part-*` stores the data of a partition.\n\nNodes in each partition is *relabeled* to always start with zero. We call the node\nID in the original graph, *global ID*, while the relabeled ID in each partition,\n*local ID*. Each partition graph has an integer node data tensor stored under name\n`dgl.NID` and each value is the node's global ID. Similarly, edges are relabeled too\nand the mapping from local ID to global ID is stored as an integer edge data tensor\nunder name `dgl.EID`.\n\nNote that each partition can contain *HALO* nodes and edges, those belonging to\nother partitions but are included in this partition for integrity or efficiency concerns.\nWe call nodes and edges that truly belong to one partition *local nodes/edges*, while\nthe rest \"HALO nodes/edges\".\n\nNode and edge features are splitted and stored together with each graph partition.\nWe do not store features of HALO nodes and edges.\n\nTwo useful functions in this module:\n * :func:`~dgl.distributed.load_partition` loads one partition and the meta data into memory.\n * :func:`~dgl.distributed.partition` partitions a graph into files organized as above.\n\n\"\"\"\n\nimport json\nimport os\nimport time\nimport numpy as np\n\nfrom .. import backend as F\nfrom ..base import NID, EID\nfrom ..random import choice as random_choice\nfrom ..data.utils import load_graphs, save_graphs, load_tensors, save_tensors\nfrom ..transform import metis_partition_assignment, partition_graph_with_halo\nfrom .graph_partition_book import GraphPartitionBook, RangePartitionBook\n\ndef load_partition(conf_file, part_id):\n ''' Load data of a partition from the data path in the DistGraph server.\n\n A partition data includes a graph structure of the partition, a dict of node tensors,\n a dict of edge tensors and some metadata. The partition may contain the HALO nodes,\n which are the nodes replicated from other partitions. However, the dict of node tensors\n only contains the node data that belongs to the local partition. Similarly, edge tensors\n only contains the edge data that belongs to the local partition. The metadata include\n the information of the global graph (not the local partition), which includes the number\n of nodes, the number of edges as well as the node assignment of the global graph.\n\n The function currently loads data through the normal filesystem interface. In the future,\n we need to support loading data from other storage such as S3 and HDFS.\n\n Parameters\n ----------\n conf_file : str\n The path of the partition config file.\n part_id : int\n The partition Id.\n\n Returns\n -------\n DGLGraph\n The graph partition structure.\n dict of tensors\n All node features.\n dict of tensors\n All edge features.\n GraphPartitionBook\n The global partition information.\n str\n The graph name\n '''\n with open(conf_file) as conf_f:\n part_metadata = json.load(conf_f)\n assert 'part-{}'.format(part_id) in part_metadata, \"part-{} does not exist\".format(part_id)\n part_files = part_metadata['part-{}'.format(part_id)]\n assert 'node_feats' in part_files, \"the partition does not contain node features.\"\n assert 'edge_feats' in part_files, \"the partition does not contain edge feature.\"\n assert 'part_graph' in part_files, \"the partition does not contain graph structure.\"\n node_feats = load_tensors(part_files['node_feats'])\n edge_feats = load_tensors(part_files['edge_feats'])\n graph = load_graphs(part_files['part_graph'])[0][0]\n\n assert NID in graph.ndata, \"the partition graph should contain node mapping to global node Id\"\n assert EID in graph.edata, \"the partition graph should contain edge mapping to global edge Id\"\n\n gpb, graph_name = load_partition_book(conf_file, part_id, graph)\n return graph, node_feats, edge_feats, gpb, graph_name\n\ndef load_partition_book(conf_file, part_id, graph=None):\n ''' Load a graph partition book from the partition config file.\n\n Parameters\n ----------\n conf_file : str\n The path of the partition config file.\n part_id : int\n The partition Id.\n graph : DGLGraph\n The graph structure\n\n Returns\n -------\n GraphPartitionBook\n The global partition information.\n str\n The graph name\n '''\n with open(conf_file) as conf_f:\n part_metadata = json.load(conf_f)\n assert 'num_parts' in part_metadata, 'num_parts does not exist.'\n num_parts = part_metadata['num_parts']\n assert 'num_nodes' in part_metadata, \"cannot get the number of nodes of the global graph.\"\n assert 'num_edges' in part_metadata, \"cannot get the number of edges of the global graph.\"\n assert 'node_map' in part_metadata, \"cannot get the node map.\"\n assert 'edge_map' in part_metadata, \"cannot get the edge map.\"\n assert 'graph_name' in part_metadata, \"cannot get the graph name\"\n\n # If this is a range partitioning, node_map actually stores a list, whose elements\n # indicate the boundary of range partitioning. Otherwise, node_map stores a filename\n # that contains node map in a NumPy array.\n is_range_part = isinstance(part_metadata['node_map'], list)\n node_map = part_metadata['node_map'] if is_range_part else np.load(part_metadata['node_map'])\n edge_map = part_metadata['edge_map'] if is_range_part else np.load(part_metadata['edge_map'])\n assert isinstance(node_map, list) == isinstance(edge_map, list), \\\n \"The node map and edge map need to have the same format\"\n\n if is_range_part:\n return RangePartitionBook(part_id, num_parts, np.array(node_map),\n np.array(edge_map)), part_metadata['graph_name']\n else:\n return GraphPartitionBook(part_id, num_parts, node_map, edge_map,\n graph), part_metadata['graph_name']\n\ndef partition_graph(g, graph_name, num_parts, out_path, num_hops=1, part_method=\"metis\",\n reshuffle=True, balance_ntypes=None, balance_edges=False):\n ''' Partition a graph for distributed training and store the partitions on files.\n\n The partitioning occurs in three steps: 1) run a partition algorithm (e.g., Metis) to\n assign nodes to partitions; 2) construct partition graph structure based on\n the node assignment; 3) split the node features and edge features based on\n the partition result.\n\n The partitioned data is stored into multiple files.\n\n First, the metadata of the original graph and the partitioning is stored in a JSON file\n named after `graph_name`. This JSON file contains the information of the originla graph\n as well as the file names that store each partition.\n\n The node assignment is stored in a separate file if we don't reshuffle node Ids to ensure\n that all nodes in a partition fall into a contiguous Id range. The node assignment is stored\n in a numpy file.\n\n All node features in a partition are stored in a file with DGL format. The node features are\n stored in a dictionary, in which the key is the node data name and the value is a tensor.\n\n All edge features in a partition are stored in a file with DGL format. The edge features are\n stored in a dictionary, in which the key is the edge data name and the value is a tensor.\n\n The graph structure of a partition is stored in a file with the DGLGraph format. The DGLGraph\n contains the mapping of node/edge Ids to the Ids in the global graph. The mappings can be\n accessed with `part.ndata[dgl.NID]` and `part.edata[dgl.NID]`, where `part` is the partition\n graph structure. In addition to the mapping, the partition graph contains node data\n (\"inner_node\" and \"orig_id\") and edge data (\"inner_edge\").\n\n * \"inner_node\" indicates whether a node belongs to a partition.\n * \"inner_edge\" indicates whether an edge belongs to a partition.\n * \"orig_id\" exists when reshuffle=True. It indicates the original node Ids in the original\n graph before reshuffling.\n\n When performing Metis partitioning, we can put some constraint on the partitioning.\n Current, it supports two constrants to balance the partitioning. By default, Metis\n always tries to balance the number of nodes in each partition.\n\n * `balance_ntypes` balances the number of nodes of different types in each partition.\n * `balance_edges` balances the number of edges in each partition.\n\n To balance the node types, a user needs to pass a vector of N elements to indicate\n the type of each node. N is the number of nodes in the input graph.\n\n Parameters\n ----------\n g : DGLGraph\n The input graph to partition\n graph_name : str\n The name of the graph.\n num_parts : int\n The number of partitions\n num_hops : int\n The number of hops of HALO nodes we construct on a partition graph structure.\n part_method : str\n The partition method. It supports \"random\" and \"metis\".\n out_path : str\n The path to store the files for all partitioned data.\n reshuffle : bool\n Reshuffle nodes and edges so that nodes and edges in a partition are in\n contiguous Id range.\n balance_ntypes : tensor\n Node type of each node\n balance_edges : bool\n Indicate whether to balance the edges.\n '''\n if num_parts == 1:\n parts = {0: g}\n node_parts = F.zeros((g.number_of_nodes(),), F.int64, F.cpu())\n g.ndata[NID] = F.arange(0, g.number_of_nodes())\n g.edata[EID] = F.arange(0, g.number_of_edges())\n g.ndata['inner_node'] = F.ones((g.number_of_nodes(),), F.int8, F.cpu())\n g.edata['inner_edge'] = F.ones((g.number_of_edges(),), F.int8, F.cpu())\n if reshuffle:\n g.ndata['orig_id'] = F.arange(0, g.number_of_nodes())\n g.edata['orig_id'] = F.arange(0, g.number_of_edges())\n elif part_method == 'metis':\n node_parts = metis_partition_assignment(g, num_parts, balance_ntypes=balance_ntypes,\n balance_edges=balance_edges)\n parts = partition_graph_with_halo(g, node_parts, num_hops, reshuffle=reshuffle)\n elif part_method == 'random':\n node_parts = random_choice(num_parts, g.number_of_nodes())\n parts = partition_graph_with_halo(g, node_parts, num_hops, reshuffle=reshuffle)\n else:\n raise Exception('Unknown partitioning method: ' + part_method)\n\n # Let's calculate edge assignment.\n if not reshuffle:\n start = time.time()\n # We only optimize for reshuffled case. So it's fine to use int64 here.\n edge_parts = np.zeros((g.number_of_edges(),), dtype=np.int64) - 1\n for part_id in parts:\n part = parts[part_id]\n # To get the edges in the input graph, we should use original node Ids.\n local_edges = F.boolean_mask(part.edata[EID], part.edata['inner_edge'])\n edge_parts[F.asnumpy(local_edges)] = part_id\n print('Calculate edge assignment: {:.3f} seconds'.format(time.time() - start))\n\n os.makedirs(out_path, mode=0o775, exist_ok=True)\n tot_num_inner_edges = 0\n out_path = os.path.abspath(out_path)\n\n # Without reshuffling, we have to store the entire node/edge mapping in a file.\n if not reshuffle:\n node_part_file = os.path.join(out_path, \"node_map\")\n edge_part_file = os.path.join(out_path, \"edge_map\")\n np.save(node_part_file, F.asnumpy(node_parts), allow_pickle=False)\n np.save(edge_part_file, edge_parts, allow_pickle=False)\n node_map_val = node_part_file + \".npy\"\n edge_map_val = edge_part_file + \".npy\"\n else:\n # With reshuffling, we can ensure that all nodes and edges are reshuffled\n # and are in contiguous Id space.\n if num_parts > 1:\n node_map_val = [F.as_scalar(F.sum(F.astype(parts[i].ndata['inner_node'], F.int64),\n 0)) for i in parts]\n node_map_val = np.cumsum(node_map_val).tolist()\n assert node_map_val[-1] == g.number_of_nodes()\n edge_map_val = [F.as_scalar(F.sum(F.astype(parts[i].edata['inner_edge'], F.int64),\n 0)) for i in parts]\n edge_map_val = np.cumsum(edge_map_val).tolist()\n assert edge_map_val[-1] == g.number_of_edges()\n else:\n node_map_val = [g.number_of_nodes()]\n edge_map_val = [g.number_of_edges()]\n\n start = time.time()\n part_metadata = {'graph_name': graph_name,\n 'num_nodes': g.number_of_nodes(),\n 'num_edges': g.number_of_edges(),\n 'part_method': part_method,\n 'num_parts': num_parts,\n 'halo_hops': num_hops,\n 'node_map': node_map_val,\n 'edge_map': edge_map_val}\n for part_id in range(num_parts):\n part = parts[part_id]\n\n # Get the node/edge features of each partition.\n node_feats = {}\n edge_feats = {}\n if num_parts > 1:\n # To get the edges in the input graph, we should use original node Ids.\n ndata_name = 'orig_id' if reshuffle else NID\n edata_name = 'orig_id' if reshuffle else EID\n local_nodes = F.boolean_mask(part.ndata[ndata_name], part.ndata['inner_node'])\n local_edges = F.boolean_mask(part.edata[edata_name], part.edata['inner_edge'])\n print('part {} has {} nodes and {} edges.'.format(\n part_id, part.number_of_nodes(), part.number_of_edges()))\n print('{} nodes and {} edges are inside the partition'.format(\n len(local_nodes), len(local_edges)))\n tot_num_inner_edges += len(local_edges)\n for name in g.ndata:\n if name in [NID, 'inner_node']:\n continue\n node_feats[name] = F.gather_row(g.ndata[name], local_nodes)\n for name in g.edata:\n if name in [EID, 'inner_edge']:\n continue\n edge_feats[name] = F.gather_row(g.edata[name], local_edges)\n else:\n for name in g.ndata:\n if name in [NID, 'inner_node']:\n continue\n node_feats[name] = g.ndata[name]\n for name in g.edata:\n if name in [EID, 'inner_edge']:\n continue\n edge_feats[name] = g.edata[name]\n\n part_dir = os.path.join(out_path, \"part\" + str(part_id))\n node_feat_file = os.path.join(part_dir, \"node_feat.dgl\")\n edge_feat_file = os.path.join(part_dir, \"edge_feat.dgl\")\n part_graph_file = os.path.join(part_dir, \"graph.dgl\")\n part_metadata['part-{}'.format(part_id)] = {'node_feats': node_feat_file,\n 'edge_feats': edge_feat_file,\n 'part_graph': part_graph_file}\n os.makedirs(part_dir, mode=0o775, exist_ok=True)\n save_tensors(node_feat_file, node_feats)\n save_tensors(edge_feat_file, edge_feats)\n save_graphs(part_graph_file, [part])\n\n with open('{}/{}.json'.format(out_path, graph_name), 'w') as outfile:\n json.dump(part_metadata, outfile, sort_keys=True, indent=4)\n print('Save partitions: {:.3f} seconds'.format(time.time() - start))\n\n num_cuts = g.number_of_edges() - tot_num_inner_edges\n if num_parts == 1:\n num_cuts = 0\n print('There are {} edges in the graph and {} edge cuts for {} partitions.'.format(\n g.number_of_edges(), num_cuts, num_parts))\n","sub_path":"python/dgl/distributed/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":17790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"604886699","text":"import tensorflow as tf\nimport numpy as np\n\n_IMAGE_SIZE = 32\n_IMAGE_CHANNELS = 3\n_NUM_CLASSES = 10\n_RESHAPE_SIZE = 4*4*128\n_NUM_CHANNELS = 3\n_SAVE_PATH = \"./tensorboard/cifar-10/\"\n\n\ndef variable_with_weight_decay( name, shape, stddev, wd):\n dtype = tf.float32\n var = variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\ndef variable_on_cpu(name, shape, initializer):\n \n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var \n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1],padding='SAME')\n\ndef conv7d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 8, 8, 1],padding='VALID')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n \ndef max_pool_3x3(x):\n return tf.nn.max_pool(x, ksize=[1, 3, 3, 1],\n strides=[1, 3, 3, 1], padding='SAME')\n\ndef setup_model_sda(noisy_cs_measurements, image_shape):\n \n num_measurement= noisy_cs_measurements.get_shape()[1].value\n \n layer_width=700\n \n var_list = []\n \n W_fc03 = variable_with_weight_decay('weights003', shape=[num_measurement, layer_width], stddev=5e-2, wd=None)\n b_fc03 = variable_on_cpu('biases003', [layer_width], tf.constant_initializer(0.0))\n\n h_fc03 = tf.nn.relu(tf.matmul(noisy_cs_measurements, W_fc03)+b_fc03)\n \n var_list.append(W_fc03)\n var_list.append(b_fc03)\n \n W_fc04 = variable_with_weight_decay('weights004', shape=[layer_width, layer_width], stddev=5e-2, wd=None)\n b_fc04 = variable_on_cpu('biases004', [layer_width], tf.constant_initializer(0.0))\n \n h_fc04 = tf.nn.relu(tf.matmul(h_fc03, W_fc04)+b_fc04)\n \n var_list.append(W_fc04)\n var_list.append(b_fc04)\n \n W_fc05 = variable_with_weight_decay('weights005', shape=[layer_width, image_shape], stddev=5e-2, wd=None)\n b_fc05 = variable_on_cpu('biases005', [image_shape], tf.constant_initializer(0.0))\n \n y_est = tf.nn.sigmoid(tf.matmul(h_fc04, W_fc05) + b_fc05)\n \n var_list.append(W_fc05)\n var_list.append(b_fc05)\n \n MM= tf.reduce_mean(tf.abs(W_fc05)) + tf.reduce_mean(tf.abs(b_fc05))\n \n return y_est, MM, var_list\n\ndef setup_model_dr2net(noisy_cs_measurements, image_shape):\n \n num_measurement= noisy_cs_measurements.get_shape()[1].value\n \n print(num_measurement)\n \n width=int(np.sqrt(image_shape))\n \n var_list =[]\n \n W_fc02 = weight_variable([num_measurement, image_shape])\n b_fc02 = bias_variable([image_shape])\n \n h_fc02 = tf.nn.relu(tf.matmul(noisy_cs_measurements, W_fc02)+b_fc02)\n \n \n var_list.append(W_fc02)\n var_list.append(b_fc02)\n \n x_image = tf.reshape(h_fc02, [-1, width, width, 1])\n \n #x_image = gaussian_noise_layer(x_image, .2)\n \n \n #x_image = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), x_image)\n \n \n # Convolutional layer 2\n W_conv1 = weight_variable([11, 11, 1, 64])\n b_conv1 = bias_variable([64])\n \n var_list.append(W_conv1)\n var_list.append(b_conv1)\n \n \n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_conv1_norm = tf.nn.lrn(h_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n \n W_conv2 = weight_variable([1, 1, 64, 32])\n b_conv2 = bias_variable([32])\n \n var_list.append(W_conv2)\n var_list.append(b_conv2)\n \n h_conv2 = tf.nn.relu(conv2d(h_conv1_norm, W_conv2) + b_conv2)\n h_conv2_norm = tf.nn.lrn(h_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n \n W_conv3 = weight_variable([7, 7, 32, 1])\n b_conv3 = bias_variable([1])\n \n var_list.append(W_conv3)\n var_list.append(b_conv3)\n \n h_conv3 = (conv2d(h_conv2_norm, W_conv3) + b_conv3)\n \n h_conv3_add= x_image + h_conv3\n \n \n \n W_conv4 = weight_variable([11, 11, 1, 64])\n b_conv4 = bias_variable([64])\n \n var_list.append(W_conv4)\n var_list.append(b_conv4)\n \n h_conv4 = tf.nn.relu(conv2d(h_conv3_add, W_conv4) + b_conv4)\n h_conv4_norm = tf.nn.lrn(h_conv4, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n \n W_conv5 = weight_variable([1, 1, 64, 32])\n b_conv5 = bias_variable([32])\n \n var_list.append(W_conv5)\n var_list.append(b_conv5)\n \n h_conv5 = tf.nn.relu(conv2d(h_conv4_norm, W_conv5) + b_conv5)\n h_conv5_norm = tf.nn.lrn(h_conv5, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n \n W_conv6 = weight_variable([7, 7, 32, 1])\n b_conv6 = bias_variable([1])\n \n var_list.append(W_conv6)\n var_list.append(b_conv6)\n \n y = (conv2d(h_conv5_norm, W_conv6) + b_conv6) + h_conv3_add\n \n y_est = tf.reshape(y, [-1, 32*32])\n \n MM= tf.reduce_mean(tf.abs(W_fc02)) + tf.reduce_mean(tf.abs(b_fc02)) \n\n\n return y_est, MM,var_list","sub_path":"code_tmp/vae_model.py","file_name":"vae_model.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"263782394","text":"from lexicon_convert import lexicon_convert\r\nfrom stats import stats, stats_dash\r\nfrom reweight import reweight, reweight_dash\r\nfrom bayes import posterior, combine, save_readable\r\n\r\nimport pickle\r\n\r\nGE = 'german'\r\nPC = 'GermanPC'\r\nWS = 'GermanSentiWS'\r\nSP = 'GermanSentiSpin'\r\n\r\nfull_lex = lexicon_convert('../../data/merged_lex_hash.pm')\r\n\r\n# Remove SentiSpin\r\nlex = {}\r\nfor item, vals in full_lex.items():\r\n if SP in vals:\r\n if len(vals) == 1:\r\n continue\r\n else:\r\n del vals[SP]\r\n lex[item] = vals\r\n\r\nrms = stats(lex, (GE, PC, WS))\r\nreweight(lex, rms)\r\nnew = stats_dash(lex, (GE, WS))\r\nreweight_dash(lex, new)\r\n\r\ninf = posterior(lex, (GE, PC, WS), tol=10**-9,\r\n bounds = ((0.001,10),(0.001,5),(0.001,5),(0.001,5)),\r\n initial = (0.6, 0.3, 0.3, 0.3))\r\n\r\ndeviation = {'prior':inf['x'][0],\r\n GE:inf['x'][1],\r\n PC:inf['x'][2],\r\n WS:inf['x'][3]}\r\n\r\nnew_lex, weight = combine(lex, deviation)\r\n\r\nwith open('../../data/sentimerge_nospin.pk', 'wb') as f:\r\n pickle.dump((new_lex, weight), f)\r\n\r\nsave_readable(new_lex, weight, '../../data/sentimerge_nospin.txt')","sub_path":"src/main/merge_nospin.py","file_name":"merge_nospin.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"317142508","text":"import pygame\nfrom pygame.sprite import Sprite\n\n\nclass Ship(Sprite):\n def __init__(self, game_settings, screen):\n \"\"\"\n Initialize the ship and set its starting position\n \"\"\"\n super(Ship, self).__init__()\n self.screen = screen\n self.game_settings = game_settings\n\n # Load the ship image and get its rect\n self.image = pygame.image.load('images/m_falcon.png')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n # Start each new ship at the bottom center of the screen\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom - 5\n\n # Store decimal value of ship's center\n self.center = float(self.rect.centerx)\n self.bottom = float(self.rect.bottom)\n\n # Movement flag\n self.moving_right = False\n self.moving_left = False\n self.moving_forward = False\n self.moving_backward = False\n\n # Set movement parameters\n self.moving_foward_parameter = self.screen_rect.bottom - 75\n self.moving_backward_parameter = self.screen_rect.bottom - 5\n\n def update(self):\n # Update ship position based on movement flag\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.center += self.game_settings.ship_speed_factor\n\n if self.moving_left and self.rect.left > 0:\n self.center -= self.game_settings.ship_speed_factor\n\n if self.moving_forward and self.rect.bottom > self.moving_foward_parameter:\n self.bottom -= self.game_settings.ship_speed_factor\n\n if self.moving_backward and self.rect.bottom < self.moving_backward_parameter:\n self.bottom += self.game_settings.ship_speed_factor\n\n # Update rect object from self.center\n self.rect.centerx = self.center\n self.rect.bottom = self.bottom\n\n def center_ship(self):\n self.center = self.screen_rect.centerx\n\n def blitme(self):\n \"\"\"\n Draw the ship at its current location\n \"\"\"\n self.screen.blit(self.image, self.rect)\n","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"528272225","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport hashlib\r\nimport logging\r\nfrom builtins import int\r\n\r\n#配置文件类\r\nclass CConfig:\r\n #用户名\r\n UserName = None\r\n #密码\r\n Password = None\r\n #是否加班餐\r\n Dinner = 1\r\n #是否乘坐班车\r\n Bus = 0\r\n #加班理由\r\n Reason = \"\"\r\n #跳过登录\r\n bSkip = False\r\n #软件版本\r\n Version = \"\"\r\n #配置文件是否被修改\r\n bFileChange = False\r\n \r\n #文件配置类\r\n _cCfgFile = None\r\n #加密算法类\r\n _cDes = None\r\n #临时成员变量:是否更新加密文件\r\n bUpdate = False\r\n\r\n #函数名称:CConfig::__init__\r\n #函数功能:构造函数\r\n #函数参数:cCfgFile :配置文件信息\r\n #函数参数:des :加解密类\r\n def __init__(self, cCfgFile, des):\r\n self._cCfgFile = cCfgFile\r\n self._cDes = des\r\n \r\n #函数名称:CConfig::ReadFile\r\n #函数功能:读取配置文件\r\n #函数返回:0成功 1打开文件失败 2解密失败 3参数错误\r\n #函数参数:无\r\n #函数参数:无\r\n def ReadFile(self):\r\n #软件版本\r\n self.Version = self._cCfgFile.ReadVersionFile()\r\n if (None == self.Version):\r\n logging.error(\"打开版本信息失败\")\r\n return 1\r\n \r\n FileText = self._cCfgFile.ReadTextFile()\r\n if None == FileText:\r\n logging.error(\"打开配置文件失败\")\r\n return 1\r\n \r\n #V2版本解密\r\n if (\"V3\" != FileText[0:2]):\r\n nRet = self.V2Decrypt(FileText)\r\n else:\r\n nRet = self.V3Decrypt(FileText)\r\n \r\n return nRet\r\n\r\n #函数名称:CConfig::WriteFile\r\n #函数功能:读取配置文件\r\n #函数返回:0成功 1打开文件失败 2加密失败 3参数错误\r\n #函数参数:无\r\n #函数参数:无\r\n def WriteFile(self):\r\n #数据缓存\r\n szTemp = \"\"\r\n \r\n #计算用户名的MD5\r\n Md5Value = self.CalcMD5(self.UserName)\r\n if (32 != len(Md5Value)):\r\n logging.error(\"数据加密失败:MD5值长度不正确\") \r\n \r\n #用户数据编成字符串\r\n Data = self.UserName + '-' + self.Password + '-'\r\n if self.Dinner:\r\n Data += '1-'\r\n else:\r\n Data += '0-'\r\n \r\n if self.Bus:\r\n Data += '1-'\r\n else:\r\n Data += '0-'\r\n Data += self.Reason\r\n \r\n #跳过密码字段\r\n szSkip = '1' if self.bSkip else '0'\r\n Data = Data + '-' + szSkip\r\n \r\n try: \r\n EncryptData = self._cDes.Encrypt(Data, Md5Value)\r\n if (None == EncryptData):\r\n logging.error(\"数据加密失败:导入的MD5值不正确\")\r\n return 2\r\n except:\r\n logging.error(\"数据加密失败:未知原因\")\r\n return 2\r\n \r\n #整理\r\n szTemp = 'V3' + '032' + Md5Value\r\n EncryptDataLen = len(EncryptData)\r\n if (EncryptDataLen < 10):\r\n szTemp = szTemp + '00' + str(EncryptDataLen)\r\n elif (EncryptDataLen < 100):\r\n szTemp = szTemp + '0' + str(EncryptDataLen)\r\n elif (EncryptDataLen < 1000):\r\n szTemp = szTemp + str(EncryptDataLen)\r\n #数据超过预期长度\r\n else:\r\n logging.error(\"数据超过预期长度,请精���加班理由\")\r\n return 3\r\n szTemp += EncryptData.decode()\r\n \r\n #写入文件\r\n if (False == self._cCfgFile.WriteTextFile(szTemp)):\r\n return 1\r\n \r\n return 0\r\n\r\n #函数名称:CConfig::CalcMD5\r\n #函数功能:计算MD5\r\n #函数返回:MD5值\r\n #函数参数:Data :要计算的数据\r\n def CalcMD5(self, Data):\r\n #计算数据的MD5\r\n cMD5 = hashlib.md5(Data.encode(encoding='utf-8'))\r\n #cMD5.update(Data)\r\n Md5Value = cMD5.hexdigest()\r\n return Md5Value\r\n\r\n #函数名称:CConfig::V2Decrypt\r\n #函数功能:V2版本解密\r\n #函数返回:解密后的消息\r\n #函数参数:FileText:读取的文件内容\r\n def V2Decrypt(self, FileText):\r\n #分割字串\r\n EncryptData = FileText.split(' ')\r\n if (3 > len(EncryptData)):\r\n logging.error(\"数据解密失败:文件内容未找到有效分割点\")\r\n return 2\r\n #读取用户名的MD5\r\n UserMD5 = EncryptData[0]\r\n \r\n #数据解密\r\n try:\r\n DecryptData = self._cDes.Decrypt(EncryptData[1], UserMD5)\r\n if (None == DecryptData):\r\n logging.error(\"数据解密失败:导入的MD5值不正确\")\r\n return 2\r\n except:\r\n logging.error(\"数据解密失败:未知原因\")\r\n return 2\r\n \r\n #对解密后的数据进行分割\r\n Couple = DecryptData.split('-')\r\n if 5 > len(Couple):\r\n logging.error(\"数据解密失败:解密后的数据不符合要求\")\r\n return 2\r\n self.UserName = Couple[0]\r\n self.Password = Couple[1]\r\n if '1' == Couple[2]:\r\n self.Dinner = True\r\n elif '0' == Couple[2]:\r\n self.Dinner = False\r\n else:\r\n return 3\r\n \r\n if '1' == Couple[3]:\r\n self.Bus = True\r\n elif '0' == Couple[3]:\r\n self.Bus = False\r\n else:\r\n return 3\r\n self.Reason = Couple[4]\r\n #验证用户名\r\n Md5Value = self.CalcMD5(self.UserName)\r\n if (Md5Value != EncryptData[0]):\r\n logging.error(\"数据解密失败:MD5值校验不通过\")\r\n return 3\r\n \r\n #是否跳过登录\r\n if ('1' == EncryptData[2]):\r\n self.bSkip = True\r\n \r\n self.bUpdate = True\r\n return 0\r\n\r\n #函数名称:CConfig::V3Decrypt\r\n #函数功能:V2版本解密\r\n #函数返回:解密后的消息\r\n #函数参数:FileText:读取的文件内容\r\n def V3Decrypt(self, FileText):\r\n #数据缓存\r\n szDataTemp = FileText\r\n #长度字符串缓存\r\n szLenTemp = \"\"\r\n #后续数据长度\r\n DataLen = 0\r\n #MD5\r\n szMD5 = \"\"\r\n \r\n #读取MD5数据长度\r\n szDataTemp = szDataTemp[2: ]\r\n szLenTemp = szDataTemp[0:3]\r\n DataLen = int(szLenTemp)\r\n if (32 != DataLen):\r\n logging.error(\"数据解密失败:MD5长度不正确\")\r\n return 2\r\n #读取MD5\r\n szDataTemp = szDataTemp[3: ]\r\n szMD5 = szDataTemp[0:DataLen]\r\n #读取加密数据长度\r\n szDataTemp = szDataTemp[DataLen: ]\r\n szLenTemp = szDataTemp[0:3]\r\n DataLen = int(szLenTemp)\r\n \r\n #数据解密\r\n szDataTemp = szDataTemp[3: ]\r\n try:\r\n DecryptData = self._cDes.Decrypt(szDataTemp[0:DataLen], szMD5)\r\n if (None == DecryptData):\r\n logging.error(\"数据解密失败:导入的MD5值不正确\")\r\n return 2\r\n except:\r\n logging.error(\"数据解密失败:未知原因\")\r\n return 2\r\n \r\n #对解密后的数据进行分割\r\n Couple = DecryptData.split('-')\r\n if 6 > len(Couple):\r\n logging.error(\"数据解密失败:解密后的数据不符合要求\")\r\n return 2\r\n self.UserName = Couple[0]\r\n self.Password = Couple[1]\r\n if '1' == Couple[2]:\r\n self.Dinner = True\r\n elif '0' == Couple[2]:\r\n self.Dinner = False\r\n else:\r\n return 3\r\n \r\n if '1' == Couple[3]:\r\n self.Bus = True\r\n elif '0' == Couple[3]:\r\n self.Bus = False\r\n else:\r\n return 3\r\n self.Reason = Couple[4]\r\n #验证用户名\r\n Md5Value = self.CalcMD5(self.UserName)\r\n if (Md5Value != szMD5):\r\n logging.error(\"数据解密失败:MD5值校验不通过\")\r\n return 3\r\n \r\n #是否跳过登录\r\n if ('1' == Couple[5]):\r\n self.bSkip = True\r\n \r\n return 0\r\n\r\n \r\n#文件读写类\r\nclass CFileMng:\r\n __m_szPath = \"\"\r\n __m_szVersionPath = \"\"\r\n\r\n #函数名称:CFileMng::__init__\r\n #函数功能:构造函数\r\n #函数返回:无\r\n #函数参数:szConfigPath:配置文件路径\r\n #函数参数:szVersionPath:版本文件路径\r\n def __init__(self, szConfigPath, szVersionPath):\r\n self.__m_szPath = szConfigPath\r\n self.__m_szVersionPath = szVersionPath\r\n\r\n\r\n #函数名称:CFileMng::ReadTextFile\r\n #函数功能:读取文本文件\r\n #函数返回:成功返回文件内容,失败返回None\r\n #函数参数:无\r\n def ReadTextFile(self):\r\n #打开文件\r\n try:\r\n File_Object = open(self.__m_szPath, 'r')\r\n except:\r\n return None\r\n\r\n #读取文件内容\r\n try:\r\n Text = File_Object.read()\r\n except:\r\n File_Object.close()\r\n return None\r\n\r\n File_Object.close()\r\n return Text\r\n\r\n\r\n #函数名称:CFileMng::WriteTextFile\r\n #函数功能:写入文本文件\r\n #函数返回:成功返回True,失败返回False\r\n #函数参数:szText :要写入的文本内容\r\n def WriteTextFile(self, szText):\r\n #打开文件\r\n try:\r\n File_Object = open(self.__m_szPath, 'w')\r\n except:\r\n return False\r\n\r\n #写入文件内容\r\n try:\r\n File_Object.write(szText)\r\n except:\r\n File_Object.close()\r\n return False\r\n\r\n File_Object.close()\r\n return True;\r\n\r\n\r\n #函数名称:CFileMng::DelTextFile\r\n #函数功能:删除文本文件\r\n #函数返回:无\r\n #函数参数:szFilePath:文件路径\r\n def DelTextFile(self):\r\n if False == os.path.exists(self.__m_szPath):\r\n return\r\n os.remove(self.__m_szPath)\r\n return\r\n \r\n #函数名称:CFileMng::ReadVersionFile\r\n #函数功能:读取软件版本\r\n #函数返回:软件版本\r\n #函数参数:无\r\n def ReadVersionFile(self):\r\n #读取文件\r\n try:\r\n File_Object = open(self.__m_szVersionPath, 'r')\r\n Text = File_Object.read()\r\n File_Object.close()\r\n except:\r\n return None\r\n \r\n #解析AddWork版本\r\n Index = Text.find(\"AddWork=\")\r\n if (-1 == Index):\r\n return None\r\n Index += len(\"AddWork=\")\r\n return Text[Index:]","sub_path":"Codes/ConfigFileIO.py","file_name":"ConfigFileIO.py","file_ext":"py","file_size_in_byte":10754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"248975117","text":"\n# contador = 0\n\n# while contador < 10:\n# contador += 1\n# print(contador)\n\n\n# contador_externo = 0\n# contador_interno = 0\n\n# while contador_externo < 5:\n# while contador_interno < 6:\n# print(contador_externo, contador_interno)\n# contador_interno += 1\n# contador_externo += 1\n# contador_interno = 0\n\n# Iteracion con objetos\n\nfrutas = ['Naranja', 'Pera', 'Manzana', 'Frutilla', 'Piña', 'Limon', 'Cereza', 'Melon', 'Chirimolla', 'Durazno']\nfrutasVacias = []\n# For nombre in objeto:\n# /*code*/\n\n# Hipotesis\n# Cuando ponemos un else en el ciclo definido del for esto nos indica que cuando termine o no encuentre nada por recorrer este se ejecutara ^_^\n\n# Ejemplo cuando no tiene nada\nfor fruta in frutasVacias:\n print(fruta)\nelse:\n print('Tengo vida cuando termina el recorrido completo o cuando no tiene nada\\n')\n# Ejemplo cuando termina con el recorrido de los datos que pueda poseer\nfor fruta in frutas :\n print(fruta)\nelse:\n print('Tengo vida cuando termina el recorrido completo o cuando no tiene nada\\n')\n\n# Iteracion cuando desconocemos el tipo de recorrido por el cual va\n# creamos un iterador para ese tipo de datos con iter(dato)\n# Cada vez que queramos ver el siguiente dato usaremos el next(iterador) el cual nos dara el valor siguiente\n\n# 1 Forma\niterador = iter(frutas)\ncantidadDeDatos = len(frutas)\n\n# for i in range(0, cantidadDeDatos):\n# print(next(iterador))\n\n# 2 Forma\n# Si intercambiamos el iter(frutas) por el iterador que creamos anteriormente luego no podremos ver los datos que contiene este ya que este lo transforma para obtener la cantidad de datos que contiene 🤯!!\n# por esto debemos nosotros crear nuevamente el iterador a quien queremos crear para que de esa forma transforme ello y no afecte el dato por el cual nosotros vamos a imprimir los datos o usarlos\ncantidadDeDatosDelIterador = sum(1 for _ in iter(frutas))\n\nfor i in range(0, cantidadDeDatosDelIterador):\n print(next(iterador))\n\n# NT: Cuando recorremos un iterador no podemos volver a recorrerlo por ello solo puedas hacerlo con la forma que tu desees","sub_path":"iteraciones/iteraciones.py","file_name":"iteraciones.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"288860098","text":"def geometric_average(numbers, total):\n\treturn numbers ** ( 1 / total)\n\neven_product = 1\ncounter = 0\nfinish = False\n\nwhile not finish:\n\ttry:\n\t\tuser_input = input('Enter number from 1 to 999 (0 to finish): ')\n\t\tuser_input = int(user_input)\n\t\tif user_input == 0:\n\t\t\tfinish = True\n\t\telif 0 < user_input <= 1000:\n\t\t\tif user_input % 2 == 0:\n\t\t\t\teven_product = even_product * int(user_input)\n\t\t\t\tcounter = counter + 1 \n\t\telse:\n\t\t\tprint('Value out of range! Please enter a positive even numbers less than 1000.', end=\" \")\n\texcept ValueError:\n\t\tprint(\"That was not a number.\", end=\" \")\n\nif counter > 0:\n\tnum = geometric_average(even_product, counter)\n\tprint('The geometric mean is: ', \"{0:.3f}\".format(num))\n \nelse:\n\tprint('No positive even number less than 1000 was entered.')","sub_path":"L1/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"412218717","text":"\"\"\"\nGenericCustomAction Module\n\"\"\"\n\nimport logging\nfrom Action import Action\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass GenericCustomAction(Action):\n \"\"\"\n GenericCustomAction Class\n \"\"\"\n\n type = \"generic\"\n\n def initialize_action(self):\n return super().initialize_action()\n\n def execute(self, action_input=None):\n super().execute(action_input)\n\n if action_input.sender == self.name:\n return None\n\n # Action\n return_value = \"\"\n\n # do stuff...\n\n # returns the output\n return (return_value, \"*\")\n","sub_path":"Actions/GenericCustomAction.py","file_name":"GenericCustomAction.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"619839542","text":"import discord\nfrom discord.ext.commands import Bot, Command\nimport os\nfrom pymongo import MongoClient\nfrom cogs.photo import Photo\nfrom cogs.games import Games\nfrom cogs.inventory import Inventory\nfrom cogs.quest_and_maps_generator import QuestsAndMaps\nfrom json import loads\ntry:\n from secrets import *\nexcept ImportError:\n pass\n\n# Main settings\nTOKEN = os.environ.get('TOKEN')\nprefix = '.'\n\n# Cogs setup\ncogs_dir = 'cogs'\ndict_of_cog_names_and_classes = {'photo': Photo, 'games': Games,\n 'inventory': Inventory, 'quest_and_maps_generator': QuestsAndMaps}\nlist_of_full_cog_path = [f\"{cogs_dir}.{cog}\" for cog in dict_of_cog_names_and_classes.keys()]\n\n# DB\nm_client = MongoClient(os.environ.get('DB'))\ndb = m_client['my_db']\n\n# Localization\nlist_of_available_languages = ['en', 'ua']\nserver_languages_collection = db['server_languages']\n\n# Bot setup\nclient = Bot(prefix)\nclient.remove_command('help')\n\n\n@client.event\nasync def on_ready():\n for cog in list_of_full_cog_path:\n client.load_extension(cog)\n print(f'Logged in as: {client.user.name}')\n print(f'With ID: {client.user.id}')\n print(f'Loaded cogs: {list(dict_of_cog_names_and_classes.keys())}')\n\n\n@client.command(aliases=['мова'])\nasync def language(ctx, new_language):\n if new_language in list_of_available_languages:\n server_languages_collection.replace_one({'id': ctx.guild.id}, {'id': ctx.guild.id, 'language': new_language})\n await ctx.send('Done')\n\n\n@client.command(aliases=['поможіть', 'help'])\nasync def _help(ctx, module=None):\n no_module = False\n # Check for module\n try:\n put_class = dict_of_cog_names_and_classes[module]\n except KeyError:\n no_module = True\n # Check for localization\n try:\n localization = server_languages_collection.find_one({'id': ctx.guild.id})['language']\n except TypeError:\n server_languages_collection.insert_one({'id': ctx.guild.id, 'language': 'en'})\n localization = 'en'\n\n if no_module:\n dict_of_answers = {\n 'en': f'No such module. Check module list by typing {prefix}modules', \n 'ua': f\"Такого модуля не існує. Перевірте наявні модулі прописавши {prefix}модулі\"\n }\n await ctx.send(dict_of_answers[localization])\n return\n\n help_string = \"\"\n try:\n for name, func in put_class.__dict__.items():\n if type(func) == Command:\n if not name == '__init__':\n f_dict = loads(func.__doc__)[localization]\n help_string += f\"{f_dict['name']}:\\n\t{f_dict['description']}\\n\"\n except KeyError:\n localization = \"en\"\n for _, func in put_class.__dict__.items():\n if type(func) == Command:\n f_dict = loads(func.__doc__)[localization]\n help_string += f\"{f_dict['name']}:\\n\t{f_dict['description']}\\n\"\n await ctx.send(help_string)\n\n\n@client.command(aliases=['модулі'])\nasync def modules(ctx):\n await ctx.send(dict_of_cog_names_and_classes.keys())\nclient.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"73337786","text":"import pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.ticker as mtick\nfrom imgurpython import ImgurClient\n\ndef grafica(general, medallas, precision, nombre, sender, heroe):\n time = float(general[5]/general[4])\n winrate = general[3]\n bronze_rate = float(medallas[1]/medallas[0])\n silver_rate = float(medallas[2]/medallas[0])\n gold_rate = float(medallas[3]/medallas[0])\n precision_mira = precision[0]\n precision_sinmira = precision[1]\n lista = [(time*100),winrate,(bronze_rate*100),(silver_rate*100),\n (gold_rate*100),(precision_mira*100),(precision_sinmira*100)]\n nueva_lista = []\n for a in lista:\n a = round(a,2)\n nueva_lista.append(a)\n crear_archivo(nueva_lista,nombre, heroe, sender)\n\ndef crear_archivo(porcentajes, nombre, heroe, user_id):\n\n mycolors = ['black','green','brown','grey','yellow','purple','red']\n raw_data = {'label': ['Time','Win Rate','Bronze','Silver','Gold','Scoped','Unscoped'],\n heroe: [porcentajes[0], porcentajes[1], porcentajes[2],\n porcentajes[3], porcentajes[4], porcentajes[5],\n porcentajes[6]]}\n\n df = pd.DataFrame(raw_data)\n plt = df.plot(x='label',kind='bar',fontsize=6, color=mycolors)\n for p in plt.patches:\n plt.annotate(str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005))\n grf = plt.get_figure()\n grf.savefig(str(nombre)+'.png')\n fichero = open(str(nombre)+'.png', 'rb')\n to_upload = fichero.read()\n nombre = str(nombre)+'.png'\n subir(grf, nombre, user_id, heroe)\n\ndef subir(grafica, nombre, user_id, heroe):\n client_id = '...'\n client_secret = '...'\n client = ImgurClient(client_id, client_secret)\n id_ = client.upload_from_path(nombre)\n print('Gráfica subida a: ', id_['link'])\n archivo = open('graficas.txt', 'a')\n os.remove(nombre)\n archivo.write(id_['link']+' '+str(user_id)+' '+heroe+'\\n')\n archivo.close()\n","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"611290729","text":"\nn = int(input(\"digite o número de pessoas: \"))\n\nsoma = 0\ni = 1\nwhile i <= n:\n idade = int(input(\"pessoa {}, digite a sua idade: \".format(i)))\n soma += idade\n i += 1\n\nmedia = soma/n\n\nif 0 <= media <= 25:\n print(\"A média {} cai no intervalo [0, 25], uma turma jovem\".format(media))\nelif 26 <= media <= 60:\n print(\"A média {} cai no intervalo [26, 60], uma turma adulta\".format(media))\nelif media > 60:\n print(\"A média {} cai no intervalo maior que 60 anos, uma turma idosa\".format(media))\nelse:\n print(\"digite uma idade válida.\")","sub_path":"lista3/18_idade.py","file_name":"18_idade.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"98798187","text":"import asyncio\nimport contextlib\nimport enum\nimport logging\nimport wave\nfrom inspect import cleandoc\nfrom typing import Tuple\n\nimport discord\nfrom d4dj_utils.master.chart_master import ChartDifficulty, ChartMaster\nfrom d4dj_utils.master.common_enums import ChartSectionType\nfrom d4dj_utils.master.music_master import MusicMaster\nfrom discord.ext import commands\n\nfrom miyu_bot.bot.bot import D4DJBot\nfrom miyu_bot.commands.common.argument_parsing import parse_arguments, ArgumentError, list_operator_for\nfrom miyu_bot.commands.common.asset_paths import get_chart_image_path, get_music_jacket_path, get_chart_mix_path\nfrom miyu_bot.commands.common.emoji import difficulty_emoji_ids\nfrom miyu_bot.commands.common.formatting import format_info\nfrom miyu_bot.commands.common.fuzzy_matching import romanize\nfrom miyu_bot.commands.common.reaction_message import run_tabbed_message, run_paged_message, run_deletable_message\n\n\nclass Music(commands.Cog):\n bot: D4DJBot\n\n def __init__(self, bot):\n self.bot = bot\n self.logger = logging.getLogger(__name__)\n\n @property\n def reaction_emojis(self):\n return [self.bot.get_emoji(eid) for eid in difficulty_emoji_ids.values()]\n\n difficulty_names = {\n 'expert': ChartDifficulty.Expert,\n 'hard': ChartDifficulty.Hard,\n 'normal': ChartDifficulty.Normal,\n 'easy': ChartDifficulty.Easy,\n 'exp': ChartDifficulty.Expert,\n 'hrd': ChartDifficulty.Hard,\n 'nrm': ChartDifficulty.Normal,\n 'esy': ChartDifficulty.Easy,\n 'ex': ChartDifficulty.Expert,\n 'hd': ChartDifficulty.Hard,\n 'nm': ChartDifficulty.Normal,\n 'es': ChartDifficulty.Easy,\n }\n\n @commands.command(name='song',\n aliases=['music'],\n description='Finds the song with the given name.',\n help='!song grgr')\n async def song(self, ctx: commands.Context, *, arg: commands.clean_content):\n self.logger.info(f'Searching for song \"{arg}\".')\n\n song = self.bot.asset_filters.music.get(arg, ctx)\n\n if not song:\n msg = f'No results for song \"{arg}\".'\n await ctx.send(msg)\n self.logger.info(msg)\n return\n self.logger.info(f'Found song \"{song}\" ({romanize(song.name)}).')\n\n embed = discord.Embed(title=song.name)\n embed.set_thumbnail(url=self.bot.asset_url + get_music_jacket_path(song))\n\n artist_info = {\n 'Lyricist': song.lyricist,\n 'Composer': song.composer,\n 'Arranger': song.arranger,\n 'Unit': song.unit.name,\n 'Special Unit Name': song.special_unit_name,\n }\n\n music_info = {\n 'Category': song.category.name,\n 'Duration': self.format_duration(self.get_music_duration(song)),\n 'BPM': song.bpm,\n 'Section Trend': song.section_trend.name,\n 'Sort Order': song.default_order,\n 'Levels': ', '.join(c.display_level for c in song.charts.values()),\n 'Release Date': song.start_datetime,\n }\n\n embed.add_field(name='Artist',\n value=format_info(artist_info),\n inline=False)\n embed.add_field(name='Info',\n value=format_info(music_info),\n inline=False)\n\n message = await ctx.send(embed=embed)\n await run_deletable_message(ctx, message)\n\n @commands.command(name='chart',\n aliases=[],\n description='Finds the chart with the given name.',\n help='!chart grgr\\n!chart grgr normal')\n async def chart(self, ctx: commands.Context, *, arg: commands.clean_content):\n self.logger.info(f'Searching for chart \"{arg}\".')\n\n name, difficulty = self.parse_chart_args(arg)\n song = self.bot.asset_filters.music.get(name, ctx)\n\n if not song:\n msg = f'Failed to find chart \"{name}\".'\n await ctx.send(msg)\n self.logger.info(msg)\n return\n self.logger.info(f'Found song \"{song}\" ({romanize(song.name)}).')\n\n embeds = self.get_chart_embeds(song)\n\n # Difficulty enum easy-expert are 1-4, one more than the embed index\n asyncio.ensure_future(run_tabbed_message(ctx, self.reaction_emojis, embeds, None, difficulty - 1))\n\n @commands.command(name='sections',\n aliases=['mixes'],\n description='Finds the sections of the chart with the given name.',\n help='!sections grgr')\n async def sections(self, ctx: commands.Context, *, arg: commands.clean_content):\n self.logger.info(f'Searching for chart sections \"{arg}\".')\n\n name, difficulty = self.parse_chart_args(arg)\n song = self.bot.asset_filters.music.get(name, ctx)\n\n if not song:\n msg = f'Failed to find chart \"{name}\".'\n await ctx.send(msg)\n self.logger.info(msg)\n return\n if not song.mix_info:\n msg = f'Song \"{song.name}\" does not have mix enabled.'\n await ctx.send(msg)\n self.logger.info(msg)\n return\n self.logger.info(f'Found song \"{song}\" ({romanize(song.name)}).')\n\n embeds = self.get_mix_embeds(song)\n\n asyncio.ensure_future(run_tabbed_message(ctx, self.reaction_emojis, embeds, None, difficulty - 1))\n\n @commands.command(name='songs',\n aliases=['songsearch', 'song_search'],\n description='Finds songs matching the given name.',\n brief='!songs lhg',\n help=cleandoc('''\n Named arguments:\n sort (<, =) [default|name|id|unit|level|difficulty|duration|date]\n [display|disp] = [default|name|id|unit|level|difficulty|duration|date]\n [difficulty|diff|level] ? ...\n \n Tags:\n unit: [happy_around|peaky_p-key|photon_maiden|merm4id|rondo|lyrical_lily|other]\n \n Extended examples:\n Songs in descending difficulty order\n !songs sort=11+ diff<=13+\n Songs with difficulty exactly 10 or 14, sorted alphabetically, displaying duration\n !songs diff=10,14 sort=name disp=duration\n Songs by happy around\n !songs $happy_around'''))\n async def songs(self, ctx: commands.Context, *, arg: commands.clean_content = ''):\n self.logger.info(f'Searching for songs \"{arg}\".' if arg else 'Listing songs.')\n arguments = parse_arguments(arg)\n\n try:\n sort, sort_op = arguments.single('sort', MusicAttribute.DefaultOrder,\n allowed_operators=['<', '>', '='], converter=music_attribute_aliases)\n reverse_sort = sort_op == '<' or arguments.tag('reverse')\n display, _ = arguments.single(['display', 'disp'], sort, allowed_operators=['='],\n converter=music_attribute_aliases)\n units = {self.bot.aliases.units_by_name[unit].id\n for unit in arguments.tags(names=self.bot.aliases.units_by_name.keys(),\n aliases=self.bot.aliases.unit_aliases)}\n\n def difficulty_converter(d):\n return int(d[:-1]) + 0.5 if d[-1] == '+' else int(d)\n\n difficulty = arguments.repeatable(['difficulty', 'diff', 'level'], is_list=True,\n converter=difficulty_converter)\n\n songs = self.bot.asset_filters.music.get_sorted(arguments.text(), ctx)\n\n arguments.require_all_arguments_used()\n except ArgumentError as e:\n await ctx.send(str(e))\n return\n\n for value, op in difficulty:\n operator = list_operator_for(op)\n songs = [song for song in songs if operator(song.charts[4].level, value)]\n\n if units:\n songs = [song for song in songs if song.unit.id in units]\n\n if not (arguments.text_argument and sort == MusicAttribute.DefaultOrder):\n songs = sorted(songs, key=lambda s: sort.get_sort_key_from_music(s))\n if sort == MusicAttribute.DefaultOrder and songs and songs[0].id == 1:\n songs = [*songs[1:], songs[0]]\n if sort in [MusicAttribute.Level, MusicAttribute.Date]:\n songs = songs[::-1]\n if reverse_sort:\n songs = songs[::-1]\n\n listing = []\n for song in songs:\n display_prefix = display.get_formatted_from_music(song)\n if display_prefix:\n listing.append(\n f'{display_prefix} : {song.name}{\" (\" + song.special_unit_name + \")\" if song.special_unit_name else \"\"}')\n else:\n listing.append(f'{song.name}{\" (\" + song.special_unit_name + \")\" if song.special_unit_name else \"\"}')\n\n embed = discord.Embed(title=f'Song Search \"{arg}\"' if arg else 'Songs')\n asyncio.ensure_future(run_paged_message(ctx, embed, listing))\n\n def get_chart_embeds(self, song):\n embeds = []\n\n for difficulty in [ChartDifficulty.Easy, ChartDifficulty.Normal, ChartDifficulty.Hard, ChartDifficulty.Expert]:\n chart = song.charts[difficulty]\n embed = discord.Embed(title=f'{song.name} [{chart.difficulty.name}]')\n embed.set_thumbnail(url=self.bot.asset_url + get_music_jacket_path(song))\n embed.set_image(url=self.bot.asset_url + get_chart_image_path(chart))\n chart_data = chart.load_chart_data()\n note_counts = chart_data.get_note_counts()\n\n embed.add_field(name='Info',\n value=f'Level: {chart.display_level}\\n'\n f'Duration: {self.format_duration(self.get_music_duration(song))}\\n'\n f'Unit: {song.special_unit_name or song.unit.name}\\n'\n f'Category: {song.category.name}\\n'\n f'BPM: {song.bpm}\\n'\n f'Designer: {chart.designer.name}',\n inline=False)\n embed.add_field(name='Combo',\n value=f'Max Combo: {chart.note_counts[ChartSectionType.Full].count}\\n'\n f'Taps: {note_counts[\"tap\"]} (dark: {note_counts[\"tap1\"]}, light: {note_counts[\"tap2\"]})\\n'\n f'Scratches: {note_counts[\"scratch\"]} (left: {note_counts[\"scratch_left\"]}, right: {note_counts[\"scratch_right\"]})\\n'\n f'Stops: {note_counts[\"stop\"]} (head: {note_counts[\"stop_start\"]}, tail: {note_counts[\"stop_end\"]})\\n'\n f'Long: {note_counts[\"long\"]} (head: {note_counts[\"long_start\"]}, tail: {note_counts[\"long_end\"]})\\n'\n f'Slide: {note_counts[\"slide\"]} (tick: {note_counts[\"slide_tick\"]}, flick {note_counts[\"slide_flick\"]})',\n inline=True)\n embed.add_field(name='Ratings',\n value=f'NTS: {round(chart.trends[0] * 100, 2)}%\\n'\n f'DNG: {round(chart.trends[1] * 100, 2)}%\\n'\n f'SCR: {round(chart.trends[2] * 100, 2)}%\\n'\n f'EFT: {round(chart.trends[3] * 100, 2)}%\\n'\n f'TEC: {round(chart.trends[4] * 100, 2)}%\\n',\n inline=True)\n embed.set_footer(text='1 column = 10 seconds')\n\n embeds.append(embed)\n\n return embeds\n\n def get_mix_embeds(self, song):\n embeds = []\n\n for difficulty in [ChartDifficulty.Easy, ChartDifficulty.Normal, ChartDifficulty.Hard, ChartDifficulty.Expert]:\n chart: ChartMaster = song.charts[difficulty]\n embed = discord.Embed(title=f'Mix: {song.name} [{chart.difficulty.name}]')\n embed.set_thumbnail(url=self.bot.asset_url + get_music_jacket_path(song))\n embed.set_image(url=self.bot.asset_url + get_chart_mix_path(chart))\n\n note_counts = chart.note_counts\n mix_info = chart.mix_info\n\n info = {\n 'Level': chart.display_level,\n 'Unit': song.unit.name,\n 'BPM': song.bpm,\n 'Section Trend': song.section_trend.name,\n }\n\n begin = {\n 'Time': f'{round(mix_info[ChartSectionType.Begin].duration, 2)}s',\n 'Combo': note_counts[ChartSectionType.Begin].count,\n }\n middle = {\n 'Time': f'{round(mix_info[ChartSectionType.Middle].duration, 2)}s',\n 'Combo': note_counts[ChartSectionType.Middle].count,\n }\n end = {\n 'Time': f'{round(mix_info[ChartSectionType.End].duration, 2)}s',\n 'Combo': note_counts[ChartSectionType.End].count,\n }\n\n embed.add_field(name='Info',\n value=format_info(info),\n inline=False)\n embed.add_field(name='Begin',\n value=format_info(begin),\n inline=True)\n embed.add_field(name='Middle',\n value=format_info(middle),\n inline=True)\n embed.add_field(name='End',\n value=format_info(end),\n inline=True)\n embed.set_footer(text='1 column = 10 seconds')\n\n embeds.append(embed)\n\n return embeds\n\n def parse_chart_args(self, arg: str) -> Tuple[str, ChartDifficulty]:\n split_args = arg.split()\n\n difficulty = ChartDifficulty.Expert\n if len(split_args) >= 2:\n final_word = split_args[-1].lower()\n if final_word in self.difficulty_names:\n difficulty = self.difficulty_names[final_word]\n arg = ''.join(split_args[:-1])\n return arg, difficulty\n\n _music_durations = {}\n\n @staticmethod\n def get_music_duration(music: MusicMaster):\n if music.id in Music._music_durations:\n return Music._music_durations[music.id]\n with contextlib.closing(wave.open(str(music.audio_path.with_name(music.audio_path.name + '.wav')), 'r')) as f:\n frames = f.getnframes()\n rate = f.getframerate()\n duration = frames / float(rate)\n Music._music_durations[music.id] = duration\n return duration\n\n @staticmethod\n def format_duration(seconds):\n minutes = int(seconds // 60)\n seconds = round(seconds % 60, 2)\n return f'{minutes}:{str(int(seconds)).zfill(2)}.{str(int(seconds % 1 * 100)).zfill(2)}'\n\n\nclass MusicAttribute(enum.Enum):\n DefaultOrder = enum.auto()\n Name = enum.auto()\n Id = enum.auto()\n Unit = enum.auto()\n Level = enum.auto()\n Duration = enum.auto()\n Date = enum.auto()\n\n def get_sort_key_from_music(self, music: MusicMaster):\n return {\n self.DefaultOrder: -music.default_order,\n self.Name: music.name,\n self.Id: music.id,\n self.Unit: music.unit.name if not music.special_unit_name else f'{music.unit.name} ({music.special_unit_name})',\n self.Level: music.charts[4].display_level,\n self.Duration: Music.get_music_duration(music),\n self.Date: music.start_datetime\n }[self]\n\n def get_formatted_from_music(self, music: MusicMaster):\n return {\n self.DefaultOrder: None,\n self.Name: None,\n self.Id: str(music.id).zfill(7),\n self.Unit: music.unit.name if not music.special_unit_name else f'{music.unit.name} ({music.special_unit_name})',\n self.Level: music.charts[4].display_level.ljust(3),\n self.Duration: Music.format_duration(Music.get_music_duration(music)),\n self.Date: str(music.start_datetime.date()),\n }[self]\n\n\nmusic_attribute_aliases = {\n 'default': MusicAttribute.DefaultOrder,\n 'name': MusicAttribute.Name,\n 'id': MusicAttribute.Id,\n 'relevance': MusicAttribute.Name,\n 'unit': MusicAttribute.Unit,\n 'level': MusicAttribute.Level,\n 'difficulty': MusicAttribute.Level,\n 'diff': MusicAttribute.Level,\n 'duration': MusicAttribute.Duration,\n 'length': MusicAttribute.Duration,\n 'date': MusicAttribute.Date,\n}\n\n\ndef setup(bot):\n bot.add_cog(Music(bot))\n","sub_path":"miyu_bot/commands/cogs/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":16914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"340703685","text":"import logging, pygame, os\nfrom punchykickgravityflipwarz.sprite_sheet import SpriteSheet\nfrom punchykickgravityflipwarz.entity import Entity\n\nlogger = logging.getLogger(__name__)\n\nclass ItemType: # A pickupable item in the world \n def __init__(self, world):\n self.name = \"Unknown\"\n self.item_size = 16\n self.world = world\n\n def action(self, player):\n logger.debug(f\"Player [{player.name}] used a [{self.name}].\")\n\nclass Grenades(ItemType):\n def __init__(self, world):\n super().__init__(world)\n self.name = \"Grenades\"\n self.item_size = 16\n self.grenades_sheet = SpriteSheet(os.path.join('punchykickgravityflipwarz', 'resources', \"grenade.png\"))\n self.explosion_sheet = SpriteSheet(os.path.join('punchykickgravityflipwarz', 'resources', \"explosion.png\"))\n self.max_time_out = 10\n self.time_out = 0\n\n def action(self, player):\n if self.time_out != 0:\n return (False, [])\n\n super().action(player)\n self.time_out = self.max_time_out\n\n grenade = Grenade(self.world, player.rect.x, player.rect.y, self.explosion_sheet)\n grenade.add_sprite(\"default\", self.grenades_sheet, (0, 0, self.item_size, self.item_size))\n grenade.set_sprite(\"default\")\n grenade.update_animation()\n\n if player.direction == 0:\n grenade.vel_x = -5\n else:\n grenade.vel_x = 5\n\n grenade.vel_y = -6\n\n return (False, [grenade]) # returns whether its used up, and any items needed to be created.\n\n def update(self):\n if self.time_out > 0:\n self.time_out -= 1\n\nclass Item(Entity): # got to be an entity to be rendered\n\n def __init__(self, world, x, y, w, h):\n super().__init__(x, y, w, h)\n self.world = world\n self.vel_x = 0\n self.vel_y = 0\n self.direction = 0\n self.timer = 100\n self.fixed = False\n\n def update(self):\n if not self.fixed:\n # left/right\n self.rect.x += self.vel_x\n tile_hit_list = pygame.sprite.spritecollide(self, self.world.tiles, False)\n for tile in tile_hit_list:\n if self.vel_x > 0:\n self.rect.right = tile.rect.left\n elif self.vel_x < 0:\n self.rect.left = tile.rect.right\n self.vel_x = 0\n\n # up/down movement \n if self.vel_y == 0:\n self.vel_y = 1\n else:\n self.vel_y += .35\n\n self.rect.y += self.vel_y\n tile_hit_list = pygame.sprite.spritecollide(self, self.world.tiles, False)\n for tile in tile_hit_list:\n self.vel_x *= 0.95\n if abs(self.vel_x) < 0.01:\n self.vel_x = 0\n\n if self.vel_y > 0:\n self.rect.bottom = tile.rect.top\n self.vel_y = -self.vel_y\n if self.vel_y < 0.01:\n self.vel_y = 0\n\n elif self.vel_y < 0:\n self.rect.top = tile.rect.bottom\n self.vel_y = 0\n\n super().update_animation()\n\nclass Grenade(Item):\n def __init__(self, world, x, y, explosion_sheet):\n super().__init__(world, x, y, 16, 16)\n self.timer = 100\n self.explosion_sheet = explosion_sheet\n\n def update(self):\n super().update()\n self.timer -= 1\n if self.timer <= 0:\n explosion = Explosion(self.world, self.rect.x, self.rect.y)\n explosion.add_sprites(\"default\", self.explosion_sheet, (0, 0, 96, 96), 6, (0, 96))\n explosion.set_sprite(\"default\")\n explosion.update_animation()\n return (True, [explosion])\n return (False, [])\n\nclass Explosion(Item):\n def __init__(self, world, x, y):\n super().__init__(world, x-44, y-44, 96, 96)\n self.timer = 100\n self.frame = 36\n self.fixed = True\n\n def update(self):\n super().update()\n self.frame -= 1\n if self.frame <= 0: \n tile_hit_list = pygame.sprite.spritecollide(self, self.world.tiles, False)\n for tile in tile_hit_list:\n tile.damage += 100\n\n return (True, [])\n else: return (False, [])","sub_path":"punchykickgravityflipwarz/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"469929479","text":"\"\"\"\nGit methods\n\"\"\"\n\n__date__ = '2013-12-13'\n__license__ = 'GPL v2.0 (or later)'\n\nimport sys\nimport os\nimport subprocess\nfrom datetime import datetime\n\nfrom re import search\nfrom collections import OrderedDict\n\nfrom dulwich import walk\nfrom dulwich import index\nfrom dulwich import porcelain\nfrom dulwich.repo import Repo\nfrom dulwich.diff_tree import tree_changes\nfrom dulwich.porcelain import push, pull, commit, tag\n\nfrom config import log, exit_codes, configure\n\n\nclass GitMethodsError(Exception):\n \"\"\" Basic exception class for UserMetric types \"\"\"\n def __init__(self, message=\"Git deploy error.\", exit_code=1):\n Exception.__init__(self, message)\n self._exit_code = int(exit_code)\n\n @property\n def exit_code(self):\n return self._exit_code\n\n\nclass GitMethods(object):\n\n # Module level attribute for tagging datetime format\n DATE_TIME_TAG_FORMAT = '%Y%m%d-%H%M%S'\n\n # Default tag message\n DEFAULT_TAG_MSG = 'GitDeploy Tag.'\n\n # Default tag message\n DEFAULT_COMMIT_MSG = 'GitDeploy Commit'\n\n # class instance\n __instance = None\n\n def __init__(self, *args, **kwargs):\n \"\"\" Initialize class instance \"\"\"\n self.__class__.__instance = self\n\n def __new__(cls, *args, **kwargs):\n \"\"\" This class is Singleton, return only one instance \"\"\"\n if not cls.__instance:\n cls.__instance = super(GitMethods, cls).__new__(cls, *args,\n **kwargs)\n # Call config\n cls.__instance._configure(**kwargs)\n\n return cls.__instance\n\n def _configure(self, **kwargs):\n self.config = configure(**kwargs)\n\n def _get_latest_deploy_tag(self):\n \"\"\"\n Returns the latest tag containing 'sync'\n Sets self._tag to tag string\n \"\"\"\n return self._get_deploy_tags()[-1]\n\n def _get_deploy_tags(self):\n \"\"\"\n Returns the all deploy tags.\n \"\"\"\n # 1. Pull last 'num_tags' sync tags\n # 2. Filter only matched deploy tags\n tags = GitMethods()._dulwich_get_tags().keys()\n f = lambda x: search(self.config['repo_name'] + '-sync-', x)\n return filter(f, tags)\n\n def _make_tag(self, tag_type):\n timestamp = datetime.now().strftime(self.DATE_TIME_TAG_FORMAT)\n return '{0}-{1}-{2}'.format(self.config['repo_name'], tag_type,\n timestamp)\n\n def _make_author(self):\n return '{0} <{1}>'.format(self.config['user.name'],\n self.config['user.email'])\n\n def _git_commit_list(self):\n \"\"\"\n Generate an in-order list of commits\n \"\"\"\n _repo = Repo(self.config['top_dir'])\n\n commits = []\n for entry in _repo.get_walker(order=walk.ORDER_DATE):\n commits.append(entry.commit.id)\n\n return commits\n\n def _git_diff(self, sha_1, sha_2):\n \"\"\"Produce the diff between sha1 & sha2\n\n :param sha_1: commit sha of \"before\" state\n :param sha_2: commit sha of \"before\" state\n \"\"\"\n _repo = Repo(self.config['top_dir'])\n\n c_old = _repo.get_object(sha_1)\n c_new = _repo.get_object(sha_1)\n\n # default writes to stdout\n try:\n porcelain.diff_tree(_repo, c_old.tree, c_new.tree)\n except:\n raise GitMethodsError(message=exit_codes[6], exit_code=6)\n\n def _git_revert(self, commit_sha):\n \"\"\"Perform a no-commit revert\n\n :param commit_sha: commit sha to revert to\n \"\"\"\n\n # TODO - replace native git\n cmd = 'git revert --no-commit {0}'.format(commit_sha)\n proc = subprocess.Popen(cmd.split(),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n proc.communicate()\n\n if proc.returncode != 0:\n raise GitMethodsError(message=exit_codes[33], exit_code=33)\n\n def _get_commit_sha_for_tag(self, tag):\n \"\"\"Obtain the commit sha of an associated tag\n\n :param tag: git tag to match to commit sha\n \"\"\"\n for repo_tag, commit_obj in self._dulwich_get_tags():\n if tag == repo_tag:\n return commit_obj.id\n\n raise GitMethodsError(message=exit_codes[8], exit_code=8)\n\n def _dulwich_tag(self, tag_text, author, message=DEFAULT_TAG_MSG):\n \"\"\"\n Creates a tag in git via dulwich calls:\n\n :param tag_text: tag string\n :param author: author string\n :param message: message string\n \"\"\"\n tag(self.config['top_dir'], tag_text, author, message)\n\n def _dulwich_reset_to_tag(self, tag=None):\n \"\"\"\n Resets the HEAD to the commit\n \"\"\"\n _repo = Repo(self.config['top_dir'])\n\n if not tag:\n sha = _repo.head()\n else:\n sha = self._get_commit_sha_for_tag(tag)\n\n try:\n _repo.refs['HEAD'] = sha\n except AttributeError:\n raise GitMethodsError(message=exit_codes[7], exit_code=7)\n\n def _dulwich_stage_all(self):\n \"\"\"\n Stage modified files in the repo\n \"\"\"\n _repo = Repo(self.config['top_dir'])\n\n # Iterate through files, those modified will be staged\n for elem in os.walk(self.config['top_dir']):\n relative_path = elem[0].split('./')[-1]\n if not search(r'\\.git', elem[0]):\n files = [relative_path + '/' +\n filename for filename in elem[2]]\n log.info(__name__ + ' :: Staging - {0}'.format(files))\n _repo.stage(files)\n\n def _dulwich_commit(self, author, message=DEFAULT_COMMIT_MSG):\n \"\"\"\n Commit staged files in the repo\n \"\"\"\n commit(self.config['top_dir'], message=message, author=author)\n\n def _dulwich_status(self):\n \"\"\"\n Return the git status\n \"\"\"\n _repo = Repo(self.config['top_dir'])\n index = _repo.open_index()\n return list(tree_changes(_repo, index.commit(_repo.object_store),\n _repo['HEAD'].tree))\n\n def _dulwich_get_tags(self):\n \"\"\"\n Get all tags & correspondin commit shas\n \"\"\"\n _repo = Repo(self.config['top_dir'])\n tags = _repo.refs.as_dict(\"refs/tags\")\n ordered_tags = {}\n # Get the commit hashes associated with the tags\n for tag, tag_commit in tags.items():\n if tag not in ordered_tags:\n ordered_tags[tag] = _repo.object_store.peel_sha(tag_commit)\n # Sort by commit_time, then by tag name, as multiple tags can have\n # the same commit_time for their commits\n ordered_tags = OrderedDict(sorted(ordered_tags.items(),\n key=lambda t: (t[1].commit_time, t)))\n return ordered_tags\n\n def _dulwich_push(self, remote_location, refs_path):\n \"\"\"Remote push with dulwich.porcelain\n\n :param repo : Path to repository\n :param remote_location: Location of the remote\n :param refs_path: relative path to the refs to push to remote\n \"\"\"\n push(self.config['top_dir'], remote_location, refs_path)\n\n def _dulwich_pull(self, remote_location, refs_path, errstream=sys.stderr):\n \"\"\" Pull from remote via dulwich.porcelain\n\n :param repo: Path to repository\n :param remote_location: Location of the remote\n :param refs_path: relative path to the fetched refs\n \"\"\"\n pull(self.config['top_dir'], remote_location, refs_path)\n\n def _dulwich_checkout(self, _repo):\n \"\"\" Perform 'git checkout .' - syncs staged changes \"\"\"\n\n indexfile = _repo.index_path()\n tree = _repo[\"HEAD\"].tree\n index.build_index_from_tree(_repo.path, indexfile,\n _repo.object_store, tree)\n","sub_path":"git_deploy/git_methods.py","file_name":"git_methods.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"9552151","text":"import wykop\n\n# https://github.com/p1c2u/wykop-sdk/blob/master/README.rst\n# http://www.wykop.pl/dla-programistow/dokumentacja/\n\nklucz_aplikacji = '4qQQFapyXT'\nsekret_aplikacji = 'Mpcpix6Bpi'\n\napi = wykop.WykopAPI(klucz_aplikacji, sekret_aplikacji, output='clear')\n\n# profile = api.observe_profile(\"m__b\")\n#p1 = api.get_link(4) # znaleziska\n#p2 = api.search_entries('#spacex') # wyszukiwanie\n\n#print(p2[0]['embed']['url'])\n# 'source, url' - zdjęcie źródłowe\n#type(p2)\n\n#x = [print('\\n\\t',i) for i in p2]\n\n#################################\n\np3 = api.get_observatory_entries()\n'''\nprint('a0: ',p3[0])\nprint('b0: ',p3[0]['embed'])\nprint('c0: ',p3[0]['embed']['url'])\n'''\n\ndef showImagesFromMirko():\n for i in range(0,24):\n if (p3[i]['embed'] is not None) and (str(p3[i]['embed']['url']).endswith('.jpg')):\n print(i, ':', p3[i]['embed']['url'])\n\n\ntext = '#spacex'\n\nprint('a0: ', p3[1]['body'])\n","sub_path":"wykopp.py","file_name":"wykopp.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"168958222","text":"class Node:\n \"\"\"\n This is Node class, which is automatically created using class Linked List.\n It contains Data and a \"link\" to next Node in order\n When initaized, it sets variables data and next, which acts like a pointer\n The Aviable Commands:\n None\n \"\"\"\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def __str__(self):\n return \"The data: \" + str(self.data)\n\n def __lt__(self, other):\n return self.data < other.data\n\n def __gt__(self, other):\n return self.data > other.data\n\n\nclass CircularLinkedList:\n def __init__(self):\n self.head = None\n\n\n def prepend(self, data):\n if not self.head:\n new_node = Node(data)\n self.head = new_node\n self.head.next = self.head\n return\n \n last = self.head\n while last.next != self.head:\n last = last.next\n \n new_node = Node(data)\n\n last.next = new_node\n new_node.next = self.head\n self.head = new_node\n\n\n def append(self, data):\n if not self.head:\n new_node = Node(data)\n self.head = new_node\n self.head.next = self.head\n return\n \n new_node = Node(data)\n\n curr = self.head\n while curr.next != self.head:\n curr = curr.next\n\n curr.next = new_node\n new_node.next = self.head\n\n\n def print_llist(self):\n curr = self.head\n\n while curr:\n print(curr.data, \"-> \", end=\"\")\n curr = curr.next\n if curr == self.head:\n print(\"(Head)\", end=\"\")\n break\n print()\n\n \n def remove(self, index):\n curr = self.head\n prev = self.head\n\n if index == 0:\n while prev.next != self.head:\n prev = prev.next\n curr = self.head\n \n prev.next = curr.next\n self.head = curr.next\n curr.next = None\n curr.data = None\n return\n\n while curr and index > 0:\n prev = curr\n curr = curr.next\n index -= 1\n if curr == self.head:\n curr = None\n break\n \n if not curr:\n return print(\"There is not that much Nodes in this Circular Linked List. No changes have been made.\")\n\n prev.next = curr.next\n curr.next = None\n curr.data = None\n curr = None\n return\n\n\n def split_list(self, index=None):\n if index == None:\n len_of_self = len(self)\n half = int(len_of_self / 2) - 1\n elif index:\n if len(self) - 1 == index:\n return print(\"No action was made. Try to instead delete last node.\")\n half = index - 1\n\n curr = self.head\n\n while curr.next != self.head and half > 0:\n curr = curr.next\n half -= 1\n\n if curr == None:\n return\n\n new_head = curr.next\n new_last = curr\n\n new_cllist = CircularLinkedList()\n\n new_cllist.head = new_head\n\n prev = curr.next\n curr = curr.next.next\n\n while curr.next != self.head:\n prev.next = curr\n new_cllist.append(curr)\n curr = curr.next\n\n curr.next = new_cllist.head\n new_last.next = self.head\n\n self.print_llist()\n new_cllist.print_llist()\n\n\n def __len__(self):\n curr = self.head\n length = 1\n\n while curr.next != self.head:\n curr = curr.next\n length += 1\n\n return length \n\n\nif __name__ == \"__main__\":\n llist = CircularLinkedList()\n\n llist.append(\"A\")\n llist.append(\"B\")\n llist.append(\"C\")\n llist.append(\"D\")\n llist.append(\"E\")\n llist.append(\"F\")\n\n llist.print_llist()","sub_path":"Data_Structures/Circular_Linked_List.py","file_name":"Circular_Linked_List.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"396964941","text":"import matplotlib.pyplot as plt\n\n\ndef plt_style(c=\"k\"):\n \"\"\"\n Set plotting style for bright (``c = 'w'``) or dark (``c = 'k'``) backgrounds\n :param c: colour, can be set to ``'w'`` or ``'k'`` (which is the default)\n :type c: str\n \"\"\"\n import matplotlib as mpl\n from matplotlib import rc\n\n # Reset previous configuration\n mpl.rcParams.update(mpl.rcParamsDefault)\n # %matplotlib inline # not from script\n get_ipython().run_line_magic(\"matplotlib\", \"inline\")\n\n # configuration for bright background\n if c == \"w\":\n plt.style.use(\"bmh\")\n\n # configurations for dark background\n if c == \"k\":\n # noinspection PyTypeChecker\n plt.style.use([\"dark_background\", \"bmh\"])\n\n # remove background colour, set figure size\n rc(\"figure\", figsize=(16, 8), max_open_warning=False)\n rc(\"axes\", facecolor=\"none\")\n","sub_path":"src/notebooks/exploratory_analysis/plt_style.py","file_name":"plt_style.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"565783022","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nfrom numpy import dot, prod, sum as np_sum, stack, exp as np_exp\nimport numpy as np\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom gensim import utils, matutils\nfrom scipy import stats\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-vec\", \"--vector_file\", help=\"binary vector file\")\nparser.add_argument(\"-context_vec\", \"--context_vector_file\", help=\"context binary vector file\")\nparser.add_argument(\"-sim\", \"--sim_file\", help=\"word similarity file\")\nparser.add_argument(\"-nor\", \"--normalized\", help=\"normalize the vector\", action=\"store_true\")\nparser.add_argument(\"-el\", \"--expected_likelihood\", help=\"using expected likelihood to calculate similarity\", action=\"store_true\")\nparser.add_argument(\"-cate_n\", \"--cate_n\", help=\"category variable num\", type=int, default=0)\nparser.add_argument(\"-cate_k\", \"--cate_k\", help=\"category variable size\", type=int, default=0)\nparser.add_argument(\"-top\", \"--top\", help=\"evaluate only top words\", type=int, default=100000000)\nargs = parser.parse_args()\n\ndef softmax_prob(logits, cate_n, cate_k):\n logits = logits.reshape(cate_n, cate_k)\n probs = np_exp(logits)\n return (probs / np_sum(probs, axis=1, keepdims=True)).reshape(-1)\n\ndef KL_distance(prob1, prob2):\n return np.sum(prob1 * np.log2(prob1 / prob2)) / prob1.shape[0]\n\ndef parse_sim_line(line, delimiter='\\t', case_insensitive=True, mode=0):\n if mode == 0:\n a, b, sim = line.split(delimiter)\n elif mode == 1:\n ind, a, ap, b, bp, asen, bsen, sim = line.split(delimiter)[:-10]\n asen = asen.split()\n bsen = bsen.split()\n aind = asen.index('')\n bind = bsen.index('')\n asen = [w.lower() if case_insensitive else w for w in asen if w != '' and w != '']\n bsen = [w.lower() if case_insensitive else w for w in bsen if w != '' and w != '']\n if case_insensitive:\n a = a.lower()\n b = b.lower()\n sim = float(sim)\n if mode == 1:\n return a, b, sim, asen, bsen, aind, bind\n return a, b, sim, None, None, None, None\n\ndef evaluate_word_pairs(self, pairs, delimiter='\\t', restrict_vocab=300000, \n case_insensitive=True, dummy4unknown=False, normalized=True,\n el=False, cate_n=0, cate_k=0, mode=0, window=0, context_emb=None):\n ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]\n ok_vocab = dict((w.lower(), v) for w, v in reversed(ok_vocab)) if case_insensitive else dict(ok_vocab)\n original_vocab = self.vocab\n self.vocab = ok_vocab\n if context_emb != None:\n context_ok_vocab = [(w, context_emb.vocab[w]) for w in context_emb.index2word[:restrict_vocab]]\n context_ok_vocab = dict((w.lower(), v) for w, v in reversed(context_ok_vocab)) if case_insensitive else dict(context_ok_vocab)\n context_original_vocab = context_emb.vocab\n context_emb.vocab = context_ok_vocab\n\n similarity_gold = []\n similarity_model = []\n oov = 0\n\n for line_no, line in enumerate(utils.smart_open(pairs)):\n line = utils.to_unicode(line)\n if line.startswith('#'):\n # May be a comment\n continue\n else:\n try:\n a, b, sim, asen, bsen, aind, bind = parse_sim_line(line, delimiter=delimiter, case_insensitive=case_insensitive, mode=mode)\n except:\n #print('skipping invalid line #%d in %s' % (line_no, pairs))\n continue\n if a not in ok_vocab or b not in ok_vocab:\n oov += 1\n #print('unknown', a, b)\n if dummy4unknown:\n similarity_model.append(0.0)\n similarity_gold.append(sim)\n continue\n else:\n #print('skipping line #%d with OOV words: %s' % (line_no, line.strip()))\n continue\n similarity_gold.append(sim) # Similarity from the dataset\n if mode == 1 and window >= 0 and context_emb != None:\n aemb = np_sum(stack([self[a]] + [context_emb[w] for w in (asen[aind-window:aind] + asen[aind+1:aind+window+1]) \n if w in context_emb and self.vocab[a].index <= restrict_vocab]), axis=0)\n aemb2 = np_sum(stack([self[a]] + [context_emb[w] for w in (asen[aind-window:aind] + asen[aind+1:aind+window+1]) \n if w in context_emb and self.vocab[a].index <= restrict_vocab] * 5), axis=0)\n bemb = np_sum(stack([self[b]] + [context_emb[w] for w in (bsen[bind-window:bind] + bsen[bind+1:bind+window+1])\n if w in context_emb and self.vocab[b].index <= restrict_vocab]), axis=0)\n else:\n aemb = self[a]\n bemb = self[b]\n if el:\n similarity_model.append(prod(np_sum((aemb * bemb).reshape(cate_n, cate_k), axis=1))) # Expected likelihood similarity from the model\n else:\n '''\n post = softmax_prob(aemb, cate_n, cate_k).reshape(cate_n, cate_k)\n post2 = softmax_prob(aemb2, cate_n, cate_k).reshape(cate_n, cate_k)\n prior = softmax_prob(self[a], cate_n, cate_k).reshape(cate_n, cate_k)\n print(KL_distance(post, prior), KL_distance(post2, prior))\n print(a, len([context_emb[w] for w in (asen[aind-window:aind] + asen[aind+1:aind+window+1]) if w in context_emb]),\n b, len([context_emb[w] for w in (bsen[bind-window:bind] + bsen[bind+1:bind+window+1]) if w in context_emb]))\n print(asen[aind-window:aind] + asen[aind+1:aind+window+1])\n print(bsen[bind-window:bind] + bsen[bind+1:bind+window+1])\n print(np.stack([prior, post, post2], axis=1)[:10])\n input()\n '''\n similarity_model.append(dot(matutils.unitvec(aemb), matutils.unitvec(bemb)) if normalized else dot(aemb, bemb)) # Similarity from the model\n #similarity_model.append(dot(matutils.unitvec(softmax_prob(aemb, cate_n, cate_k)), matutils.unitvec(softmax_prob(bemb, cate_n, cate_k))) if normalized else dot(aemb, bemb)) # Similarity from the model\n self.vocab = original_vocab\n if context_emb != None:\n context_emb.vocab = context_original_vocab\n spearman = stats.spearmanr(similarity_gold, similarity_model)\n pearson = stats.pearsonr(similarity_gold, similarity_model)\n oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100\n\n #print(\n # 'Pearson correlation coefficient against %s: %f with p-value %f',\n # pairs, pearson[0], pearson[1])\n #print(\n # 'Spearman rank-order correlation coefficient against %s: %f with p-value %f',\n # pairs, spearman[0], spearman[1])\n print('Pairs with unknown words: %d' % oov)\n self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)\n return pearson, spearman, oov_ratio\n\nif __name__ == '__main__':\n if not args.vector_file or not args.sim_file:\n print('-vec and -sim are needed')\n exit()\n mode = 1 if 'SCWS' in args.sim_file else 0\n print('Evaluate %d' % args.top)\n print('Normalize vector' if args.normalized else \"No normalization\")\n print('Context similarity file' if mode else \"No context similarity file\")\n w2v_emb = KeyedVectors.load_word2vec_format(args.vector_file, binary=True)\n if args.context_vector_file:\n context_emb = KeyedVectors.load_word2vec_format(args.context_vector_file, binary=True)\n r = evaluate_word_pairs(w2v_emb, args.sim_file, case_insensitive=True, dummy4unknown=False, restrict_vocab=args.top,\n normalized=args.normalized, el=args.expected_likelihood, cate_n=args.cate_n, cate_k=args.cate_k, \n mode=mode, window=5, context_emb=context_emb)\n else:\n r = evaluate_word_pairs(w2v_emb, args.sim_file, case_insensitive=True, dummy4unknown=False, restrict_vocab=args.top,\n normalized=args.normalized, el=args.expected_likelihood, cate_n=args.cate_n, cate_k=args.cate_k,\n mode=mode)\n print(r)","sub_path":"scripts/compute-similarity.py","file_name":"compute-similarity.py","file_ext":"py","file_size_in_byte":7734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"133632193","text":"\"\"\"\nModule for downloading documents from a specified server.\nSpecifically designed for shell use given the output that\nthe \"download\" command produces\n\"\"\"\n\n__all__ = [\"Downloader\"]\n\nimport itertools\n\nfrom src import error\nfrom time import sleep, strftime\nfrom sys import stdout\nfrom httplib import HTTPConnection\nfrom os.path import isfile\n\n\ndef __LoadingWheel():\n \"Display a loading wheel.\"\n frames = [' \\\\', ' |', ' /', '--']\n spinner = itertools.cycle(frames) \n return lambda: spinner.next()\n\n\ndef monitor_download(stream, uri, outfile, bufferSize=1024):\n \"\"\"\n Display download progress when the \n size of the remote file is unknown.\n \"\"\"\n spin = __LoadingWheel()\n buffer = stream.read(bufferSize)\n while(buffer):\n outfile.write(buffer)\n stdout.write(\"\\rFetching: %s\\t%s\" % (uri, spin()))\n stdout.flush()\n buffer = stream.read(bufferSize) \n sleep(0.02)\n stdout.write(\"\\rFetching: %s\\tcomplete!\\n\" % uri)\n\n\n\ndef display_download_progress(stream, uri, size, outfile, bufferSize=1024):\n \"\"\"\n Display download progress when the\n size of the remote file is known.\n \"\"\"\n chunk = 0\n buffer = stream.read(bufferSize)\n while(buffer):\n outfile.write(buffer)\n chunk += (float)(len(buffer))\n percent = 100*(chunk/size)\n stdout.write(\"\\rFetching: %s\\t%2d%%\" % (uri, percent)) \n stdout.flush()\n buffer = stream.read(bufferSize)\n sleep(0.02)\n stdout.write(\"\\rFetching: %s\\t%2d%%\\n\" % (uri, percent))\n\n\nclass Connection:\n \"An HTTP connection\"\n def __init__(self, server):\n self.server = server\n def __enter__(self):\n self.connection = HTTPConnection(self.server, timeout=6)\n return self.connection\n def __exit__(self, type, value, traceback):\n self.connection.close()\n\nclass Downloader:\n \"Download files via http\"\n def __init__(self, server, bufferSize=1, failedResourceFile=None):\n self.server = server\n self.bufferSize = bufferSize*1024\n self.failedResourceFile = failedResourceFile\n\n def __call__(self, url, outfile=strftime(\"%I-%M-%S\"), forceOverwrite=False):\n if isfile(outfile):\n if not forceOverwrite:\n return\n with Connection(self.server) as conn:\n conn.request('GET', url)\n stream = conn.getresponse()\n if stream.status != 404:\n with open(outfile, 'w') as of:\n if stream.length:\n size = int(stream.length)\n display_download_progress(stream, url, size, of, self.bufferSize)\n else:\n monitor_download(stream, url, of, self.bufferSize)\n else:\n if self.failedResourceFile:\n with open(self.failedResourceFile, 'a') as f:\n f.write(url+'\\n')\n error.consume(\"Error: %s (%s) Skipping...\" % (\"No file for\", outfile))\n # Read to make sure the request finishes\n stream.read()\n","sub_path":"src/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"328136642","text":"import subprocess\n\nontology='data/in/3d.dlp'\nin_data_path='data/in/'\ndata_ext='.ttl'\ndatasets=['pompe_non_sat_no_lang']\njava_vm_options='-Xmx8g -Xms8g'\nprogramm = 'GraalSaturationTest.jar '\n\nchases={'SCC':20}\n#chases={'GRD':5,'SCC':16}\nstores=['MEMORY','MEMORY_MAPPING'] \n#stores=['MEMORY','MEMORY_MAPPING','POSTGRE','POSTGRE_MAPPING','VIRTUOSO'] \n\nfor dataset in datasets:\n\tfor chase in chases.keys():\n\t\tfor store in stores:\n\t\t\tdataset_path=in_data_path+dataset+data_ext\n\t\t\tparams = dataset_path +' '+ ontology +' '+ store +' '+ chase + ' '+str(chases[chase])\n\t\t\t#print('{' + params +'}')\n\t\t\tsubprocess.run(['java','-Xmx6g','-Xms4g','-jar',programm,dataset_path,ontology,store,chase,str(chases[chase])],stdout=True);\n\t\t\t\n","sub_path":"runTest.py","file_name":"runTest.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"426592989","text":"#!/usr/bin/env python3\nimport argparse\nimport serial\nfrom time import sleep\nimport pynput\nimport pyautogui\nimport win32api\nimport win32con\n\n# for time delaying the input:\nfrom threading import Timer\nimport time\nfrom math import sqrt\n\n\nSTICK_MIN\t\t= 0\nSTICK_MAX\t\t= 255\nSTICK_CENTER\t= 128\n\nDPAD_UP = 0x00\nDPAD_UP_RIGHT = 0x01\nDPAD_RIGHT = 0x02\nDPAD_DOWN_RIGHT = 0x03\nDPAD_DOWN = 0x04\nDPAD_DOWN_LEFT = 0x05\nDPAD_LEFT = 0x06\nDPAD_UP_LEFT = 0x07\nDPAD_CENTER = 0x08\n\nclass SwitchController():\n\n\tdef __init__(self):\n\n\t\tself.dpad \t\t= DPAD_CENTER\n\t\tself.up \t\t= 0\n\t\tself.down \t\t= 0\n\t\tself.left \t\t= 0\n\t\tself.right \t\t= 0\n\n\t\tself.lstick \t= 0\n\t\tself.l \t\t\t= 0\n\t\tself.zl \t\t= 0\n\t\tself.minus\t\t= 0\n\t\tself.capture\t= 0\n\n\t\tself.a \t\t= 0\n\t\tself.b \t\t= 0\n\t\tself.x \t\t= 0\n\t\tself.y \t\t= 0\n\t\tself.rstick = 0\n\t\tself.r \t\t= 0\n\t\tself.zr\t\t= 0\n\t\tself.plus\t= 0\n\t\tself.home \t= 0\n\n\t\tself.LX = STICK_CENTER\n\t\tself.LY = STICK_CENTER\n\t\tself.RX = STICK_CENTER\n\t\tself.RY = STICK_CENTER\n\n\t\tself.output = \"\"\n\n\tdef reset(self):\n\n\t\tself.dpad \t\t= DPAD_CENTER\n\t\tself.up\t\t\t= 0\n\t\tself.down \t\t= 0\n\t\tself.left \t\t= 0\n\t\tself.right \t\t= 0\n\n\t\tself.lstick \t= 0\n\t\tself.l \t\t\t= 0\n\t\tself.zl \t\t= 0\n\t\tself.minus\t\t= 0\n\t\tself.capture\t= 0\n\n\t\tself.a \t\t= 0\n\t\tself.b \t\t= 0\n\t\tself.x \t\t= 0\n\t\tself.y \t\t= 0\n\t\tself.rstick = 0\n\t\tself.r \t\t= 0\n\t\tself.zr\t\t= 0\n\t\tself.plus\t= 0\n\t\tself.home \t= 0\n\n\t\tself.LX = STICK_CENTER\n\t\tself.LY = STICK_CENTER\n\t\tself.RX = STICK_CENTER\n\t\tself.RY = STICK_CENTER\n\n\tdef getOutput(self):\n\n\t\tself.output = \"\"\n\n\t\tself.output += str(self.dpad)\n\t\t# self.output += \"1\" if (self.up) else \"0\"\n\t\t# self.output += \"1\" if (self.down) else \"0\"\n\t\t# self.output += \"1\" if (self.left) else \"0\"\n\t\t# self.output += \"1\" if (self.right) else \"0\"\n\t\tself.output += \"1\" if (self.lstick) else \"0\"\n\t\tself.output += \"1\" if (self.l) else \"0\"\n\t\tself.output += \"1\" if (self.zl) else \"0\"\n\t\tself.output += \"1\" if (self.minus) else \"0\"\n\t\tself.output += \"1\" if (self.capture) else \"0\"\n\n\t\tself.output += \"1\" if (self.a) else \"0\"\n\t\tself.output += \"1\" if (self.b) else \"0\"\n\t\tself.output += \"1\" if (self.x) else \"0\"\n\t\tself.output += \"1\" if (self.y) else \"0\"\n\t\tself.output += \"1\" if (self.rstick) else \"0\"\n\t\tself.output += \"1\" if (self.r) else \"0\"\n\t\tself.output += \"1\" if (self.zr) else \"0\"\n\t\tself.output += \"1\" if (self.plus) else \"0\"\n\t\tself.output += \"1\" if (self.home) else \"0\"\n\n\t\tself.output += \" \" + str(self.LX)\n\t\tself.output += \" \" + str(self.LY)\n\t\tself.output += \" \" + str(self.RX)\n\t\tself.output += \" \" + str(self.RY)\n\n\tdef send(self, msg):\n\t\ttry:\n\t\t\tself.ser.write(f'{msg}\\r\\n'.encode(\"utf-8\"));\n\t\texcept:\n\t\t\tprint(\"some write error\")\n\n\n\tdef connect(self, port):\n\t\tself.ser = serial.Serial(port, 38400)\n","sub_path":"controller/switchcontroller/switchcontroller.py","file_name":"switchcontroller.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"326142390","text":"import json\nimport os\n\ntask_suffix = '_tagger_template'\ntask_crf_suffix = '_tagger_crf_template'\ntask_domains = [\n 'upos_uni',\n 'upos_streusle',\n 'xpos_uni',\n 'xpos_streusle',\n 'xpos_conll03',\n 'chunk-iobes_conll02',\n 'chunk-iobes_conll03',\n 'chunk_conll02',\n 'chunk_conll03',\n 'com_broadcast1',\n 'com_broadcast2',\n 'com_broadcast3',\n 'ner-iobes_conll03',\n 'ner_conll03',\n 'supsense_streusle',\n 'mwe_streusle',\n 'smwe_streusle',\n 'sem_semcor',\n 'semtr_semtraits']\n# 'ccg_ccg'\n\nmulti_path = '/data/tagger/multitagger_multi_'\nall_tasks = [\"upos\", \"xpos\", \"chunk\", \"ner\", \"mwe\", \"sem\", \"semtr\", \"supsense\", \"com\"]\n\ncurrent_tsks = [\"upos\", \"upos\",\n \"xpos\", \"xpos\", \"xpos\",\n \"chunk\", \"chunk\",\n \"ner\",\n \"mwe\",\n \"sem\",\n \"semtr\",\n \"supsense\",\n \"com\", \"com\", \"com\"]\ncurrent_tsk_domains = [\"upos_uni\", \"upos_streusle\",\n \"xpos_uni\", \"xpos_streusle\", \"xpos_conll03\",\n \"chunk_conll02\", \"chunk_conll03\",\n \"ner_conll03\",\n \"mwe_streusle\",\n \"sem_semcor\",\n \"semtr_semtraits\",\n \"supsense_streusle\",\n \"com_broadcast1\",\n \"com_broadcast2\",\n \"com_broadcast3\"]\nfor current_tsk, current_tsk_domain in zip(current_tsks, current_tsk_domains):\n # print(current_tsk, current_tsk_domain)\n top_table = '\\\\begin{table*}[t]\\n\\\\centering\\n\\\\footnotesize{\\n\\\\begin{tabular}{c|c|c|c}\\n'\n top_table += 'Trained with & \\\\multicolumn{3}{|c}{\\\\task{' + current_tsk\n top_table += '} on ' + current_tsk_domain.split('_')[-1]\n top_table += '} \\\\\\\\ \\\\cline{2-4}\\n'\n top_table += '& Multiple Decoders & Task Embeddings (All Steps) & Task Embeddings (Prepend) \\\\\\\\ \\\\hline'\n print(top_table)\n current_tsk_domain_single = current_tsk_domain\n if 'ner' in current_tsk_domain:\n current_tsk_domain_single = current_tsk_domain.replace('ner', 'ner-iobes')\n if 'chunk' in current_tsk_domain:\n current_tsk_domain_single = current_tsk_domain.replace('chunk', 'chunk-iobes')\n other_tasks = []\n exts = []\n for tsk in all_tasks:\n if tsk == current_tsk:\n continue\n exts.append(''.join(sorted([tsk, current_tsk])))\n other_tasks.append(tsk)\n exts.append(\"all\")\n other_tasks.append(\"all\")\n\n res_filepath = os.path.join('/data/tagger/' + current_tsk_domain_single + task_crf_suffix, 'metrics.json')\n print(\"Self only & \", end=' ')\n with open(res_filepath, 'r') as fr:\n results = json.load(fr)\n print('\\\\multicolumn{3}{|c}{',end='')\n print(round(100 * results['test_f1-measure-overall'], 2), end='')\n print('}', end=' ')\n print(' \\\\\\\\ \\\\hline')\n filepaths = []\n filepaths += [multi_path + ext for ext in exts]\n te_filepaths = ['results/test_' + current_tsk_domain + '_task_embedding_tagger_' + ext + '_screenlog' for ext in exts]\n tpe_filepaths = ['results/test_' + current_tsk_domain + '_task_prepend_embedding_tagger_' + ext + '_screenlog'\n for ext in exts]\n for i, filepath in enumerate(filepaths):\n print('+\\\\task{' + other_tasks[i] + '}', end=' ')\n print('&', end=' ')\n res_filepath = os.path.join(filepath, 'metrics.json')\n if os.path.exists(res_filepath):\n with open(res_filepath, 'r') as fr:\n results = json.load(fr)\n print(round(100*results['test_' + current_tsk + '-f1-measure-overall'], 2), end=' ')\n print('&', end=' ')\n res_filepath = te_filepaths[i]\n if os.path.exists(res_filepath):\n with open(res_filepath, 'r') as fr:\n for line in fr:\n pass\n try:\n print(round(100*float(line.split()[-1]), 2), end=' ')\n except:\n pass\n print('&', end=' ')\n res_filepath = tpe_filepaths[i]\n if os.path.exists(res_filepath):\n with open(res_filepath, 'r') as fr:\n for line in fr:\n pass\n try:\n print(round(100*float(line.split()[-1]), 2), end=' ')\n except:\n pass\n print(' \\\\\\\\ ')\n if i >= len(filepaths)-2:\n print(' \\\\hline ')\n bottom_table = '\\\\end{tabular}\\n\\\\caption{\\\\small F1-Score}\\\\label{tMulti'\n bottom_table += ''.join(current_tsk_domain.split('_'))\n bottom_table += '}}\\n\\\\end{table*}'\n print(bottom_table)\n print('')\n\n\n\n\n","sub_path":"utils/collect_task_results_for_table_old.py","file_name":"collect_task_results_for_table_old.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"595630946","text":"import os\nimport requests\nimport urllib3\nimport time\nimport threading\nimport subprocess\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n#This file is a video indexer API\n\nimport logging\nlogging.basicConfig(filename='./logs/example.log',level=logging.DEBUG)\n#Log format\n#logging.debug('This message should go to the log file')\n#logging.info('So should this')\n#logging.warning('And this, too')\n\n\nclass Video_Upload_API():\n\n def __init__(self, account_id, subscription_key, account_type=\"trial\"):\n self.subscription_key = subscription_key\n self.access_token = \"\"\n self.account_type = account_type# also known as location in API\n self.account_id = account_id\n self.subscription_key = subscription_key\n self.video_names = []\n self.API_AUTH_URL = \"https://api.videoindexer.ai/auth/{0}/Accounts/{1}\".format(account_type, account_id)\n self.API_VIDEO_URL = \"https://api.videoindexer.ai/{0}/Accounts/{1}\".format(account_type, account_id)\n self.API_VIDEO_INDEX_URL = \"https://api.videoindexer.ai/{0}/Accounts/\".format(account_id)\n\n def get_access_token(self):\n querystring = {\"allowEdit\": \"true\"}\n headers = {\n 'Ocp-Apim-Subscription-Key': self.subscription_key,\n 'Host': \"api.videoindexer.ai\"\n }\n\n url = '{0}/AccessToken'.format(self.API_AUTH_URL)\n\n logging.info(\"calling: \" + url)\n\n response = requests.get(url, headers=headers, params=querystring, verify=False)\n self.access_token = response.text.replace('\"', '')\n\n if len(self.access_token):\n logging.info(\"Retrieved Access Token\")\n\n return self.access_token\n\n def get_video_names(self):\n url = \"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos?accessToken={2}\".format(self.account_type,\n self.account_id,\n self.access_token)\n json_videos = requests.get(url, verify=False)\n\n for i in json_videos.json()[\"results\"]:\n video_name = str(i[\"name\"])\n if video_name not in self.video_names:\n self.video_names.append(video_name)\n\n def upload_video_file(self, video_name, file_path, language=\"auto\", indexing_preset=\"AudioOnly\",\n streaming_preset=\"Default\", replace = False):\n\n if self.access_token == \"\":\n self.get_access_token()\n\n # Upload a video\n upload_video_url = \"{0}/Videos?accessToken={1}&name={2}&language={3}&indexingPreset={4}&streamingPreset={5}\".format(\n self.API_VIDEO_URL, \\\n self.access_token, video_name, language, indexing_preset, streaming_preset)\n\n f = open(file_path, 'rb')\n files = {'file': f}\n headers = {'Host': 'api.videoindexer.ai'}\n logging.info(\"Calling request to upload video ... \" + file_path)\n response = requests.post(upload_video_url, files=files, headers=headers, verify=False)\n logging.info(\"Sent request for ... \" + file_path)\n\n if response.ok:\n logging.info(\"Uploaded video ... determining status\")\n self.check_upload_status(response.json()[\"id\"])\n else:\n logging.info(\"error: \")\n logging.info(response.json())\n #self.check_upload_status(response.json()['id'])\n if \"id\" in response.json().keys():\n return response.json()[\"id\"] #returns video id\n \n return \"None\" \n\n def check_upload_status(self, upload_id):\n result = {}\n\n if upload_id:\n progress_url = \"{0}/Videos/{1}/Index?accessToken={2}\".format(self.API_VIDEO_URL, upload_id,\n self.access_token)\n\n while True:\n logging.info(\"Waiting for \" + str(upload_id) + \" to finish indexing\")\n time.sleep(2)\n response = requests.get(progress_url, verify=False)\n\n if 'state' in response.json().keys():\n print(response.json()['state'])\n\n if response.json()['state'] == 'Failed':\n logging.info(\"Failed to upload video. Please try re-uploading\")\n break\n\n if response.json()['state'] == 'Processed':\n return 0\n logging.info(\"*\" * 10)\n logging.info(\"The source language is: \")\n result['lang'] = response.json()['videos'][0]['sourceLanguage']\n logging.info(result['lang'])\n\n response = requests.get(progress_url, verify=False)\n\n logging.info(response.json()['videos'][0]['insights'].keys())\n if 'sourceLanguageConfidence' in response.json()['videos'][0]['insights'].keys():\n result['level'] = response.json()['videos'][0]['insights']['sourceLanguageConfidence']\n logging.info(\"Source Language Confidence is: \" + str(\n response.json()['videos'][0]['insights']['sourceLanguageConfidence']))\n else:\n logging.info(\"Language confidence could not be determined.\")\n result['level'] = \"Unknown\"\n\n break\n else:\n logging.info(\"State could not be found for \" + upload_id + \" \" + str(response.json().keys()['Message']))\n\n return result\n\n def get_language(self, video_id = None): ## deprecated use new_get_language\n if video_id == None:\n logging.info(\"Error\")\n return 1\n if not self.access_token:\n self.get_access_token()\n location = self.account_type\n my_url = \"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos/{2}/Index?accessToken={3}&language=English\".format(location, self.account_id, video_id, self.access_token)\n response = requests.get(my_url, verify=False)\n if(response.status_code != 200):\n logging.info(\"Error Number: \" + str(response.status_code))\n logging.info(response.json())\n\n x = response.json()\n language = x[\"videos\"][0][\"insights\"][\"sourceLanguage\"]\n\n if \"sourceLanguageConfidence\" in x[\"videos\"][0][\"insights\"].keys():\n confidence = x[\"videos\"][0][\"insights\"][\"sourceLanguageConfidence\"]\n else:\n confidence = None\n\n logging.info(\"language: \" + str(language) + \"\\naccuracy: \" + str(confidence))\n\n\n return language,confidence\n\n #TODO: get the ids of just the files that have been indexed from the Wav-Clips\n def get_video_ids(self):\n if self.access_token == \"\":\n self.get_access_token()\n \n req = requests.get(\"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos?accessToken={2}\".format(self.account_type,self.account_id,self.access_token), verify = False)\n \n Dict = {}\n try:\n for i in req.json()['results']:\n Dict[str(i[\"id\"])] = str(i[\"name\"])\n except:\n print(req.json())\n raise Exception\n return Dict #returns Dictionary with format \"id\":\"name of file\"\n if self.access_token == \"\":\n self.get_access_token()\n\n\n def new_get_video_ids(self):\n if self.access_token == \"\":\n self.get_access_token()\n type = \"LanguageDetection\"\n req = requests.get(\n \"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos/{2}/ArtifactUrl?type={3}?accessToken={4}\".format(self.account_type,self.account_id,video_id,type,self.access_token),\n verify=False)\n Dict = {}\n # print(req.json()['results'])\n for i in req.json()['results']:\n Dict[str(i[\"id\"])] = str(i[\"name\"])\n return Dict; # returns Dictionary with format \"id\":\"name of file\"\n\n def new_get_language(self, video_id = None):\n if video_id == None:\n logging.debug(\"Error\")\n return 1\n if not self.access_token:\n self.get_access_token()\n\n location = self.account_type\n accountId = self.account_id\n videoId = video_id\n type = \"LanguageDetection\"\n accessToken = self.access_token\n response = requests.get(\n \"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos/{2}/ArtifactUrl?type={3}&accessToken={4}\".format(\n location,accountId,videoId,type,accessToken), verify=False)\n\n if response.status_code != 200:\n print(\"Error retrieving response for video from azure: \")\n print(response.json())\n return 0, 0, {}\n \n verbose_language_data_url = response.json()\n #print(y) #prints the retrieved json\n response = requests.get(str(verbose_language_data_url), verify=False)\n response_json = response.json()\n language = response_json[\"MasterLanguage\"]\n confidence = response_json[\"Confidence\"]\n\n return language, confidence, response_json\n\n def index_files(self,directory):\n Dict = self.get_video_ids()\n D2 = [j for j in Dict.values()]\n\n threads = []\n\n for file in os.listdir(directory):\n if str(file) not in D2:\n video_path = directory + \"/\" + str(file)\n logging.info(\"Uploading \" + str(file))\n threads.append(threading.Thread(target=self.upload_video_file, args=(str(file),video_path)))\n\n for i in threads:\n i.start()\n\n for i in threads:\n i.join()\n\n #wait for all child processes to return\n # continue\n return 0\n\n def clean_index(self, arr): # arr contains index numbers of files to be deleted\n location = self.account_type\n accountId = self.account_id\n accessToken = self.access_token\n for i in arr:\n videoId = i\n logging.info(\"Deleting \" + videoId)\n req = requests.delete(\n \"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos/{2}?accessToken={3}\".format(location, accountId,\n videoId, accessToken))\n\n if str(req.status_code) != str(204):\n logging.warning(\"Failed to Delete \" + videoId)\n \n return 0\n","sub_path":"API/azure_api.py","file_name":"azure_api.py","file_ext":"py","file_size_in_byte":10575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"236507482","text":"import networkx as nx\n#import community\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef connectpoints(x1,y1,x2,y2):\n plt.plot([x1,x2],[y1,y2],'k-')\n\n\n\n#partition = community.best_partition(G) # compute communities\n\n#G = nx.karate_club_graph() # load a default graph\na, b =np.loadtxt(\"BestPath_3.txt\", dtype=int, usecols=(0,1), delimiter='\\t', unpack='true')\nx, y = np.loadtxt(\"Map.txt\", usecols=(1,2), delimiter='\\t', unpack='true') # compute graph layout\n\nplt.figure()\nplt.scatter(x,y)\n\n\nfor i in range(len(a)):\n tmp1=a[i]\n tmp2=b[i]\n connectpoints(x[tmp1],y[tmp1],x[tmp2],y[tmp2])\n\n\n#pos = nx.spring_layout(G)\n\n#print(pos)\n#nx.draw(G, nx.spring_layout(G))\n\n\n#plt.figure(figsize=(8, 8)) # image is 8 x 8 inches\n#plt.axis('off')\n#nx.draw_networkx_nodes(G, pos, node_size=100)#, cmap=plt.cm.RdYlBu, node_color=list(partition.values()))\n#nx.draw_networkx_edges(G, pos, alpha=0.3, edge_color='b')\n\n#plt.show(G)\n\n'''\nstep, ene = np.loadtxt(\"inst_energy_gas.txt\",usecols=(0,1), delimiter='\\t', unpack='true')\npress = np.loadtxt(\"inst_pressure_gas.txt\",usecols=(1), delimiter='\\t', unpack='true')\n\n\nplt.plot(step, ene, label='Instant Energy')\nplt.plot(step, press, label='Instant Pressure')\n#plt.plot(mcstep, inst_m, label='Temperature')\n\n\nplt.legend(loc='upper right')\n'''\n\n#x, gofr = np.loadtxt(\"output.epot.0\",usecols=(0,1), delimiter='\\t', unpack='true')\n#x, y = np.loadtxt(\"Map.txt\",usecols=(1,2), unpack='true')\n\n#plt.scatter(x,y)\n\n#f, axarr = plt.subplots(1,2,sharey=True)\n\n#axarr[0].errorbar(x1,y1,yerr=error1)\n#axarr[0].set_title('1 step')\n#axarr[0].plot([1,50,100], [14.975790778311286, 14.975790778311286, 14.975790778311286])\n#axarr[0].set(xlabel='# blocks')\n#axarr[0].set(ylabel=r'$C[S(0),0]$')\n\n#axarr[1].errorbar(x2,y2,yerr=error2)\n#axarr[1].plot([1,50,100], [14.975790778311286, 14.975790778311286, 14.975790778311286])\n#axarr[1].set_title('100 step')\n#axarr[1].set(xlabel='# blocks')\n\n\n\n#plt.xscale('log')\n#plt.yscale('log')\n\nplt.show()\n","sub_path":"lecture_10/Ex_10.2/pl.py","file_name":"pl.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"441797968","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[110]:\n\n\n#Please use ANSIF module for this program to run\nimport numpy as np\nimport pandas as pd\nimport random\n\na=np.random.uniform(low=0,high=14,size=(50,1))\ndf=pd.DataFrame(a,columns=['production'])\ndf['demand']=np.random.uniform(low=16,high=(16+np.random.randint(1,3)),size=(50,1))\n\ndf['power_to_be_generated']=(df['demand']-df['production'])\nprint(df)\n#Here all values represent Power in units of MW\n#Demand is the the power required by the houses\n#Production is the power produced by the fuel cells \n#Power_to_be_generated is the more power that needed to be produced by the fuel cells to meet the demand requirements\n\n\n# In[111]:\n\n\nmf=[[['gaussmf',{\"mean\":np.mean(np.arange(0,8)),\n \"sigma\":np.std(np.arange(0,8))}],\n ['gaussmf',{\"mean\":np.mean(np.arange(7,15)),\n \"sigma\":np.std(np.arange(7,15))}],\n ['gaussmf',{\"mean\":np.mean(np.arange(14,19)),\n \"sigma\":np.std(np.arange(14,19))}]],\n [['gaussmf',{\"mean\":np.mean(np.arange(0,8)),\n \"sigma\":np.std(np.arange(0,8))}],\n ['gaussmf',{\"mean\":np.mean(np.arange(7,15)),\n \"sigma\":np.std(np.arange(7,15))}],\n ['gaussmf',{\"mean\":np.mean(np.arange(14,19)),\n \"sigma\":np.std(np.arange(14,19))}]]]\n\n\n# In[112]:\n\n\nfrom membership import membershipfunction\nmfc=membershipfunction.MemFuncs(mf)\n\n\n# In[113]:\n\n\npower_to_be_generated=df.pop('power_to_be_generated')\n\n\n# In[114]:\n\n\nimport anfis\nanf=anfis.ANFIS(df,power_to_be_generated,mfc)\ntrain=anf.trainHybridJangOffLine(epochs=20)\n\n\n# In[115]:\n\n\nanf.plotErrors()\n\n\n# In[116]:\n\n\nanf.plotResults()\n\n\n# In[104]:\n\n\nprint(train)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"test (1).py","file_name":"test (1).py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"151449671","text":"class Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n result = []\n self.backTrack(result, [], s, 0)\n return result\n \n def backTrack(self,result,tempList,s,start):\n if( start==len(s) ):\n result.append(list(tempList))\n else:\n for i in range(start,len(s)):\n if(self.isPalindrome(s,start,i)):\n tempList.append(s[start:i+1])\n self.backTrack(result, tempList, s, i+1)\n del tempList[len(tempList)-1]\n \n def isPalindrome(self,s,lo,hi):\n while lo None:\n self._mp = move_provider\n self._items = [] # type: [StoPDB]\n self._current_item = None # type: StoPDB\n self._p_path_exists = False\n self._s_path_exists = False\n self._p_valid = False\n self._s_valid = False\n self._error_message = ''\n self._bag_details = ''\n\n def _check_bag(self, p: str, location: str) -> bool:\n item = None\n if location == \"D\":\n item = self._get_path_as_p()\n if self._current_item.p_validated_on is not None:\n return True\n if location == \"O\":\n item = self._current_item.s_root\n if self._current_item.s_validated_on is not None:\n return True\n\n sb = SANCBagger()\n if sb.open_bag(p):\n print(\"Validating: \\t{}\".format(item))\n if sb.quick_validate():\n print(\"Valid.\")\n return True\n print(\"Not Valid.\")\n self._error_message = sb.bagging_error\n self._bag_details = sb.validation_error_details\n return False\n\n def _get_path_as_p(self):\n return os.path.join(\"P:\\\\\", self._current_item.p_root)\n\n def _check_p(self):\n if self._current_item.p_validated_on is not None:\n return True\n if self._check_path(self._get_path_as_p()):\n print(\"Path exists at the destination\")\n # The path exists on P\n self._p_path_exists = True\n if self._check_bag(self._get_path_as_p(), \"D\"):\n # Bag on P is valid. This is a valid and complete move.\n self._current_item.p_validated_on = datetime.now()\n self._current_item.completed_move = datetime.now()\n self._current_item.save()\n self._p_valid = True\n return True\n # Bag on P is not valid. Does S: exist and is it valid?\n return False\n\n def _check_s(self):\n if self._check_path(self._current_item.s_root):\n # The S path exists. Check if there is a bag.\n self._s_path_exists = True\n if self._check_bag(self._current_item.s_root, \"O\"):\n # The S Path bag exists and is valid\n self._current_item.s_validated_on = datetime.now()\n self._s_valid = True\n return True\n return False\n\n def _check_bags(self) -> bool:\n print(\"Validating Bags.\")\n if self._check_p():\n self._current_item.p_validated_on = datetime.now()\n self._current_item.save()\n return True\n\n if self._check_s():\n self._current_item.s_validated_on = datetime.now()\n self._current_item.save()\n return True\n\n return False\n\n def _try_to_bag(self):\n meta = {\n \"Contact-Name\": \"Jamie Patrick-Burns\",\n \"Source-Organization\": \"State Archives of North Carolina\",\n \"Internal-Sender-Identifier\": \"SqlMover 0.0.1\"\n }\n b = os.path.join(self._current_item.s_root, \"data\")\n if not os.path.exists(b):\n sb = SANCBagger()\n if sb.create_bag(self._current_item.s_root, meta):\n self._current_item.is_bagged = True\n self._current_item.save()\n return True\n return False\n\n def _do_copy_to_destination(self):\n print(\"Copying to:\\t{}\".format(self._get_path_as_p()))\n try:\n cp = CopyProgress(\"\\\\\\\\?\\\\\" + self._current_item.s_root, self._get_path_as_p(), CopyProgress.FILES)\n cp.start_copy()\n return True\n except Exception as e:\n print(e)\n self._write_error(str(e))\n return False\n\n def _prep_copy_to_destination(self):\n # Create the path\n if self._do_copy_to_destination():\n # We've moved it. Validate.\n if self._check_bag(self._get_path_as_p(), \"D\"):\n # It's valid. Mark as complete.\n self._current_item.completed_move = True\n self._current_item.p_validated_on = datetime.now()\n self._current_item.save()\n else:\n # Not Valid. Set error.\n print(\"Destination location did not validate after move: {}\".format(self._current_item.s_root))\n self._write_error(\"Destination location did not validate after move. \\t{}\".format(self._bag_details))\n\n @staticmethod\n def _check_path(p: str) -> bool:\n if os.path.exists(p):\n return True\n return False\n\n# Took out staticmethod designation, added (self), added active_proj variable and added it to query so that this method\n# only searches for incomplete items for the current project\n # @staticmethod\n def _are_all_items_completed(self):\n active_proj = int(self._mp.active_project._pk)\n query = (StoPDB\n .select()\n .where((StoPDB.completed_move == False) & (StoPDB.pid == active_proj)))\n if query.exists():\n return False\n return True\n\n def move_items(self):\n self._items = self._mp.set_unfinished_items(self._mp.STOP)\n if len(self._items) == 0:\n self._mp.close_stop()\n return\n for i in self._items: # type: StoPDB\n print(\"Working: {}\".format(i.s_root))\n self._p_path_exists = False\n self._p_valid = False\n self._s_path_exists = False\n self._s_valid = False\n self._bag_details = 'No Details'\n\n self._current_item = i\n if not self._check_bags():\n if not self._p_path_exists:\n # Is\n if not self._s_path_exists:\n # Nothing to be done find out why this path doesn't exist\n print(\"Origin location does not exist: \\t{}\".format(self._current_item.s_root))\n self._write_error(\"Origin location does not exist.\")\n continue\n\n if not self._s_valid:\n # Why is this location not valid?\n # Maybe not bagged yet?\n print(\"Attempting to bag. {}\".format(self._current_item.s_root))\n if self._try_to_bag():\n # Okay we've bagged it. Now move it.\n self._current_item.s_validated_on = datetime.now()\n self._current_item.save()\n self._prep_copy_to_destination()\n else:\n print(\"Origin location is not a valid bag: \\t{}\".format(self._current_item.s_root))\n self._write_error(\"Origin location is not a valid bag.\")\n continue\n else:\n self._current_item.s_validated_on = datetime.now()\n self._current_item.save()\n self._prep_copy_to_destination()\n else:\n if not self._p_path_exists:\n # Copy S to P\n self._prep_copy_to_destination()\n # try to validate\n if self._check_bag(self._get_path_as_p(), \"D\"):\n self._current_item.p_validated_on = datetime.now()\n self._current_item.completed_move = True\n self._current_item.save()\n else:\n self._write_error(\"Destination did not validate.\\t{}\".format(self._bag_details))\n\n if self._are_all_items_completed():\n print(\"Completing project. There may still be origin paths that need to be cleaned up.\")\n self._mp.close_stop()\n\n def _write_error(self, e_text: str):\n er = Errors(sid=self._current_item,\n error_text=e_text,\n error_reported=datetime.now())\n er.save()\n\n\ndef new_file_chooser():\n mp = MoveProvider()\n op = mp.set_open_projects(mp.STOP)\n c = 1\n for i in op: # type: ProjectID\n print(\"{}) {}\".format(i.get_id(), i.project_file))\n c += 1\n val = input(\"Select a project (q to quit): \")\n if val != \"q\":\n mp.set_active_project(int(val))\n return True, mp\n else:\n return False, mp\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n\n while True:\n stp, mp = new_file_chooser()\n if stp:\n print()\n sqlmvr = SqlMover(mp)\n sqlmvr.move_items()\n print()\n print()\n else:\n break","sub_path":"src/bulk_mover/sql_mover.py","file_name":"sql_mover.py","file_ext":"py","file_size_in_byte":9303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"84684900","text":"\n# method 1\ndef fib(n):\n if n == 1 or n == 2:\n return 1\n else:\n return fib(n -1) + fib(n-2)\n\n# method 2\n\ndef fib2(n):\n if n < 1:\n return 0\n memory = [0] * (n+1)\n # print(memory)\n return helper(memory, n)\n\ndef helper(memo, n):\n if n == 1 or n == 2:\n return 1\n if memo[n] != 0:\n return memo[n]\n else:\n memo[n] = helper(memo, n-1) + helper(memo, n -2)\n\n return memo[n]\n\n# method 3\n\ndef fib3(n):\n cur = 1\n pre = 1\n for i in range(n+1):\n if i >= 3:\n sum = cur + pre\n pre = cur\n cur = sum\n return cur\n\nprint(fib3(1))","sub_path":"LeetCode/fib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"215795389","text":"import re\nimport requests\nimport urllib.request\nfrom bs4 import BeautifulSoup\n\ndef getWork(url,savePath):\n req = requests.get(url)\n res = req.text.encode('utf-8')\n print(type(res))\n with open(savePath,'wb') as f:\n f.write(res)\n print('写入成功!')\n # print(res)\n\ndef getWorkByUrllib(url,savePath):\n headers = {\"User-Agent\":'User-Agent:Mozilla/5.0'}\n url = urllib.request.Request(url,headers=headers)\n req = urllib.request.urlopen(url)\n res = req.read()\n with open(savePath,'wb') as f:\n f.write(res)\n print('写入成功!')\n content = res.decode('utf-8')\n print(len(content))\nurl = r'http://202.197.212.73/Web%20Client/ListDir.htm#'\nsavePath = r'C:\\Users\\Asus\\PycharmProjects\\Project1\\test\\spider\\file\\work.html'\n# getWork(url,savePath)\ngetWorkByUrllib(url,savePath)","sub_path":"test/spider/homeworkCrewler.py","file_name":"homeworkCrewler.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"350225887","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass BackupEntity(Model):\n \"\"\"Describes the Service Fabric entity that is configured for backup.\n\n You probably want to use the sub-classes and not this class directly. Known\n sub-classes are: ApplicationBackupEntity, ServiceBackupEntity,\n PartitionBackupEntity\n\n All required parameters must be populated in order to send to Azure.\n\n :param entity_kind: Required. Constant filled by server.\n :type entity_kind: str\n \"\"\"\n\n _validation = {\n 'entity_kind': {'required': True},\n }\n\n _attribute_map = {\n 'entity_kind': {'key': 'EntityKind', 'type': 'str'},\n }\n\n _subtype_map = {\n 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Service': 'ServiceBackupEntity', 'Partition': 'PartitionBackupEntity'}\n }\n\n def __init__(self, **kwargs):\n super(BackupEntity, self).__init__(**kwargs)\n self.entity_kind = None\n","sub_path":"azure-servicefabric/azure/servicefabric/models/backup_entity.py","file_name":"backup_entity.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"220456919","text":"from .updates import UpdateMethods\nfrom ..tl import types, custom\nfrom .. import utils, events\n\n\nclass ButtonMethods(UpdateMethods):\n def _build_reply_markup(self, buttons, inline_only=False):\n if buttons is None:\n return None\n\n try:\n if buttons.SUBCLASS_OF_ID == 0xe2e10ef2:\n return buttons # crc32(b'ReplyMarkup'):\n except AttributeError:\n pass\n\n if not utils.is_list_like(buttons):\n buttons = [[buttons]]\n elif not utils.is_list_like(buttons[0]):\n buttons = [buttons]\n\n is_inline = False\n is_normal = False\n\n rows = []\n for row in buttons:\n current = []\n for button in row:\n inline = custom.Button._is_inline(button)\n is_inline |= inline\n is_normal |= not inline\n if isinstance(button, custom.Button):\n if button.callback:\n self.add_event_handler(\n button.callback,\n events.CallbackQuery(data=button.data)\n )\n\n button = button.button\n\n if button.SUBCLASS_OF_ID == 0xbad74a3:\n # 0xbad74a3 == crc32(b'KeyboardButton')\n current.append(button)\n\n if current:\n rows.append(types.KeyboardButtonRow(current))\n\n if inline_only and is_normal:\n raise ValueError('You cannot use non-inline buttons here')\n elif is_inline == is_normal and is_normal:\n raise ValueError('You cannot mix inline with normal buttons')\n elif is_inline:\n return types.ReplyInlineMarkup(rows)\n elif is_normal:\n return types.ReplyKeyboardMarkup(rows)\n","sub_path":"telethon/client/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"479171333","text":"\nimport tile\n\nID = 4\n\n \nclass Tile(tile.Tile):\n\n \n def __init__(self, xtile, ytile):\n super(Tile, self).__init__(xtile, ytile)\n \n self.is_light_blocking = True\n self.is_under_sky = False\n \n self._set_texture(media.get_texture(\"wood\"))\n \n \nimport media\n","sub_path":"plugins/tiles/wood_floor.py","file_name":"wood_floor.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"59776745","text":"directions_X = {'L': -1, 'R': 1, 'U': 0, 'D': 0}\ndirections_Y = {'L': 0, 'R': 0, 'U': 1, 'D': -1}\n\n\ndef get_points(A):\n x = 0\n y = 0\n total_length = 0\n path_points = {}\n for cmd in A:\n direction = cmd[0]\n length = int(cmd[1:])\n for _ in range(length):\n x += directions_X[direction]\n y += directions_Y[direction]\n total_length += 1\n if (x, y) not in path_points:\n path_points[(x, y)] = total_length\n return path_points\n\n\nwith open('./inputs/day03') as input:\n line_a = input.readline().strip().split(',')\n line_b = input.readline().strip().split(',')\n\n points_a = get_points(line_a)\n points_b = get_points(line_b)\n interesection_points = set(points_a.keys()) & set(points_b.keys())\n part1 = min([abs(x)+abs(y) for (x, y) in interesection_points])\n part2 = min([points_a[p]+points_b[p] for p in interesection_points])\n print(part1, part2)\n","sub_path":"day03-pretty.py","file_name":"day03-pretty.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"585193649","text":"try:\n from urllib import urlencode\nexcept ImportError:\n from urllib.parse import urlencode\n\ntry:\n from urlparse import urlparse, parse_qs, urlunparse\nexcept ImportError:\n from urllib.parse import urlparse, parse_qs, urlunparse\n\nfrom django.forms.utils import flatatt\nfrom django.utils.encoding import force_str, force_text\nfrom django.utils.safestring import mark_safe\nfrom django.utils.html import format_html\n\nfrom .text import text_value\n\n\ndef render_link_tag(url, rel='stylesheet', media=None):\n \"\"\"\n Build a link tag\n \"\"\"\n attrs = {\n 'href': url,\n 'rel': rel,\n }\n if media:\n attrs['media'] = media\n return render_tag('link', attrs=attrs, close=False)\n\n\ndef render_tag(tag, attrs=None, content=None, close=True):\n \"\"\"\n Render a HTML tag\n \"\"\"\n builder = '<{tag}{attrs}>{content}'\n if content or close:\n builder += ''\n return format_html(\n builder,\n tag=tag,\n attrs=mark_safe(flatatt(attrs)) if attrs else '',\n content=text_value(content),\n )\n\n\ndef url_replace_param(url, name, value):\n \"\"\"\n Replace a GET parameter in an URL\n \"\"\"\n url_components = urlparse(force_str(url))\n query_params = parse_qs(url_components.query)\n query_params[name] = value\n query = urlencode(query_params, doseq=True)\n return force_text(urlunparse([\n url_components.scheme,\n url_components.netloc,\n url_components.path,\n url_components.params,\n query,\n url_components.fragment,\n ]))\n","sub_path":"bulma/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"407228066","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 12 09:06:01 2018\n\n@author: Vedran Furtula\n\"\"\"\n\nimport os, re, serial, time, configparser\n\nfrom PyQt5.QtWidgets import (QDialog, QMessageBox, QGridLayout, QLabel, QLineEdit, QComboBox, QVBoxLayout, QPushButton)\n\n\n\nclass Load_config_dialog(QDialog):\n\n\tdef __init__(self, parent, config, load_, initUI_):\n\t\tsuper().__init__(parent)\n\t\t\n\t\t# constants\n\t\tself.config = config\n\t\tself.load_ = load_\n\t\tself.initUI_ = initUI_\n\t\tself.last_used_scan = self.config.get('LastScan','last_used_scan')\n\n\t\tself.setupUi()\n\n\tdef get_scan_sections(self):\n\t\t\n\t\tmylist=[]\n\t\tfor i in self.config.sections():\n\t\t\tif i not in [\"LastScan\",\"Instruments\"]:\n\t\t\t\tmylist.extend([i])\n\t\t\n\t\treturn mylist\n\n\tdef setupUi(self):\n\t\t\n\t\t#self.lb0 = QLabel(\"Receiver(s) comma(,) separated:\",self)\n\t\t#self.le1 = QLineEdit()\n\t\t#self.le1.setText(', '.join([i for i in self.emailrec_str]))\n\t\t#self.le1.textChanged.connect(self.on_text_changed)\n\t\t\n\t\tself.lbl1 = QLabel(\"Pick a setting from the config file:\", self)\n\t\tself.combo1 = QComboBox(self)\n\t\tmylist1=self.get_scan_sections()\n\t\tself.combo1.addItems(mylist1)\n\t\tself.combo1.setCurrentIndex(mylist1.index(self.last_used_scan))\n\t\tself.combo1.setFixedWidth(300)\n\t\tself.combo1.activated[str].connect(self.onActivated1)\n\t\tself.current_selected_setting=self.last_used_scan\n\t\t\n\t\tself.btnLoadSection = QPushButton(\"Setting loaded\",self)\n\t\tself.btnLoadSection.clicked.connect(self.btn_load_section)\n\t\tself.btnLoadSection.setEnabled(False)\n\t\t#self.btnLoadSection.setFixedWidth(90)\n\t\t\n\t\tself.btnDeleteSection = QPushButton(\"Delete setting\",self)\n\t\tself.btnDeleteSection.clicked.connect(self.btn_delete_section)\n\t\tself.btnDeleteSection.setText(\"Can not delete\")\n\t\tself.btnDeleteSection.setEnabled(False)\n\t\t#self.btnDeleteSection.setFixedWidth(90)\n\t\t\n\t\tself.lbl2 = QLabel(\"or create a new config setting:\", self)\n\t\tself.sectionEdit = QLineEdit(self.last_used_scan,self)\n\t\t#self.self.sectionEdit.setFixedWidth(90)\n\t\tself.sectionEdit.textChanged.connect(self.text_stch)\n\t\t\n\t\tself.btnAcceptText= QPushButton(\"Accept new setting\",self)\n\t\tself.btnAcceptText.clicked.connect(self.btn_accept_text)\n\t\tself.btnAcceptText.setEnabled(False)\n\t\t#self.btnAcceptText.setFixedWidth(90)\n\t\t\n\t\tself.lbl3 = QLabel(\"Currently loaded setting:\", self)\n\t\tself.lbl4 = QLabel(\"\", self)\n\t\tself.lbl4.setStyleSheet(\"color: red; font: 16pt\")\n\t\tself.lbl4.setText(self.last_used_scan)\n\t\t\n\t\t# set layout\n\t\tgrid_0 = QGridLayout()\n\t\tgrid_0.addWidget(self.lbl1,0,0)\n\t\tgrid_0.addWidget(self.combo1,1,0)\n\t\t\n\t\tgrid_1 = QGridLayout()\n\t\tgrid_1.addWidget(self.btnLoadSection,0,0)\n\t\tgrid_1.addWidget(self.btnDeleteSection,0,1)\n\t\t\n\t\tgrid_2 = QGridLayout()\n\t\tgrid_2.addWidget(self.lbl2,0,0)\n\t\tgrid_2.addWidget(self.sectionEdit,1,0)\n\t\tgrid_2.addWidget(self.btnAcceptText,2,0)\n\t\tgrid_2.addWidget(self.lbl3,3,0)\n\t\tgrid_2.addWidget(self.lbl4,4,0)\n\t\t\n\t\tv1 = QVBoxLayout()\n\t\tv1.addLayout(grid_0)\n\t\tv1.addLayout(grid_1)\n\t\tv1.addLayout(grid_2)\n\t\t\n\t\tself.setLayout(v1)\n\t\tself.setWindowTitle(\"Configure settings in the config.ini file\")\n\t\t\n\t\t\n\tdef onActivated1(self, text):\n\t\t\n\t\tself.current_selected_setting=str(text)\n\t\tif str(text)!=self.last_used_scan:\n\t\t\tself.btnLoadSection.setText(\"*Load setting*\")\n\t\t\tself.btnLoadSection.setEnabled(True)\n\t\t\tmylist1=self.get_scan_sections()\n\t\t\tif len(mylist1)>1:\n\t\t\t\tself.btnDeleteSection.setText(\"Delete setting\")\n\t\t\t\tself.btnDeleteSection.setEnabled(True)\n\t\telse:\n\t\t\tself.btnLoadSection.setText(\"Setting loaded\")\n\t\t\tself.btnLoadSection.setEnabled(False)\n\t\t\tself.btnDeleteSection.setText(\"Can not delete\")\n\t\t\tself.btnDeleteSection.setEnabled(False)\n\t\t\n\t\t\n\tdef text_stch(self):\n\t\t\n\t\tmylist1=self.get_scan_sections()\n\t\t\n\t\tif not str(self.sectionEdit.text()):\n\t\t\tself.btnAcceptText.setText(\"Empty string not accepted\")\n\t\t\tself.btnAcceptText.setEnabled(False)\n\t\telif str(self.sectionEdit.text()) not in mylist1:\n\t\t\tself.btnAcceptText.setText(\"*Accept new setting*\")\n\t\t\tself.btnAcceptText.setEnabled(True)\n\t\telse:\n\t\t\tself.btnAcceptText.setText(\"Setting accepted\")\n\t\t\tself.btnAcceptText.setEnabled(False)\n\t\t\t\n\t\t\t\n\tdef btn_load_section(self):\n\t\t\n\t\tself.config.read('config.ini')\n\t\tself.config.set(\"LastScan\",\"last_used_scan\", self.current_selected_setting)\n\t\t\n\t\twith open('config.ini', 'w') as configfile:\n\t\t\tself.config.write(configfile)\n\t\t\n\t\tself.btnLoadSection.setText(\"Setting loaded\")\n\t\tself.btnLoadSection.setEnabled(False)\n\t\tself.btnDeleteSection.setText(\"Can not delete\")\n\t\tself.btnDeleteSection.setEnabled(False)\n\t\t\n\t\tself.load_()\n\t\tself.initUI_()\n\t\tself.last_used_scan=self.current_selected_setting\n\t\tself.lbl4.setText(self.last_used_scan)\n\t\t\n\t\t#self.close()\n\t\t\n\t\t\n\tdef btn_delete_section(self):\n\t\t\n\t\ttry:\n\t\t\tself.config.read('config.ini')\n\t\t\tself.config.remove_section(self.current_selected_setting)\n\t\t\twith open('config.ini', 'w') as configfile:\n\t\t\t\tself.config.write(configfile)\n\t\texcept Exception as e:\n\t\t\tQMessageBox.critical(self, 'Message', str(e))\n\t\t\treturn\n\t\t\n\t\tmylist1=self.get_scan_sections()\n\t\tself.combo1.clear()\n\t\tself.combo1.addItems(mylist1)\n\t\tself.combo1.setCurrentIndex(mylist1.index(self.last_used_scan))\n\t\t\n\t\tself.btnLoadSection.setText(\"Setting loaded\")\n\t\tself.btnLoadSection.setEnabled(False)\n\t\tself.btnDeleteSection.setText(\"Can not delete\")\n\t\tself.btnDeleteSection.setEnabled(False)\n\t\t\n\t\tif str(self.sectionEdit.text()) not in mylist1:\n\t\t\tself.btnAcceptText.setText(\"*Accept new setting*\")\n\t\t\tself.btnAcceptText.setEnabled(True)\n\t\telse:\n\t\t\tself.btnAcceptText.setText(\"Setting accepted\")\n\t\t\tself.btnAcceptText.setEnabled(False)\n\t\t\n\t\t\n\tdef btn_accept_text(self):\n\t\t\n\t\tread_text=str(self.sectionEdit.text())\n\t\tself.config.read('config.ini')\n\t\t\n\t\ttry:\n\t\t\tself.config.add_section(read_text)\n\t\texcept configparser.DuplicateSectionError as e:\n\t\t\tQMessageBox.critical(self, 'Message', str(e))\n\t\t\treturn\n\t\t\n\t\tself.i = list(dict(self.config.items(self.last_used_scan)).keys())\n\t\tself.j = list(dict(self.config.items(self.last_used_scan)).values())\n\t\t\n\t\tself.config.set(\"LastScan\",\"last_used_scan\", read_text)\n\t\t\n\t\tfor i_,j_ in zip(self.i,self.j):\n\t\t\tself.config.set(read_text,i_, j_)\n\t\t\t\n\t\twith open('config.ini', 'w') as configfile:\n\t\t\tself.config.write(configfile)\n\t\t\n\t\tself.btnLoadSection.setText(\"Setting loaded\")\n\t\tself.btnLoadSection.setEnabled(False)\n\t\tself.btnDeleteSection.setText(\"Can not delete\")\n\t\tself.btnDeleteSection.setEnabled(False)\n\t\t\n\t\tmylist1=self.get_scan_sections()\n\t\tself.combo1.clear()\n\t\tself.combo1.addItems(mylist1)\n\t\tself.combo1.setCurrentIndex(mylist1.index(read_text))\n\t\t\n\t\tself.last_used_scan=read_text\n\t\t\n\t\tself.btnAcceptText.setText(\"Setting accepted\")\n\t\tself.btnAcceptText.setEnabled(False)\n\t\tself.lbl4.setText(self.last_used_scan)\n\t\t\n\t\tself.load_()\n\t\tself.initUI_()\n\t\t\n\t\t#self.close()\n\t\t\t\n\t\t\t\n\tdef closeEvent(self,event):\n\t\n\t\tevent.accept()\n\t\t\n","sub_path":"Spectrometer_v200420/Load_config_dialog.py","file_name":"Load_config_dialog.py","file_ext":"py","file_size_in_byte":6739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"493588625","text":"import win32com.client\n\n\ndef WMIDateStringToDate(dtmDate):\n strDateTime = \"\"\n if (dtmDate[4] == 0):\n strDateTime = dtmDate[5] + '/'\n else:\n strDateTime = dtmDate[4] + dtmDate[5] + '/'\n if (dtmDate[6] == 0):\n strDateTime = strDateTime + dtmDate[7] + '/'\n else:\n strDateTime = strDateTime + dtmDate[6] + dtmDate[7] + '/'\n strDateTime = strDateTime + dtmDate[0] + dtmDate[1] + dtmDate[2] + dtmDate[3] + \" \" + dtmDate[8] + dtmDate[9] + \":\" + dtmDate[10] + dtmDate[11] +':' + dtmDate[12] + dtmDate[13]\n return strDateTime\n\nstrComputer = \".\"\nobjWMIService = win32com.client.Dispatch(\"WbemScripting.SWbemLocator\")\nobjSWbemServices = objWMIService.ConnectServer(strComputer,\"root\\cimv2\")\ncolItems = objSWbemServices.ExecQuery(\"SELECT * FROM Win32_Battery\")\nfor objItem in colItems:\n if objItem.Availability != None:\n print(\"Availability: {}\".format(objItem.Availability))\n if objItem.BatteryRechargeTime != None:\n print(\"BatteryRechargeTime: {}\".format(objItem.BatteryRechargeTime))\n if objItem.BatteryStatus != None:\n print(\"BatteryStatus: {}\".format(objItem.BatteryStatus))\n if objItem.Caption != None:\n print(\"Caption: {}\".format(objItem.Caption))\n if objItem.Chemistry != None:\n print(\"Chemistry: {}\".format(objItem.Chemistry))\n if objItem.ConfigManagerErrorCode != None:\n print(\"ConfigManagerErrorCode: {}\".format(objItem.ConfigManagerErrorCode))\n if objItem.ConfigManagerUserConfig != None:\n print(\"ConfigManagerUserConfig: {}\".format(objItem.ConfigManagerUserConfig))\n # if objItem.CreationClassName != None:\n # print(\"CreationClassName:\" + ` objItem.CreationClassName`)\n # if objItem.Description != None:\n # print(\"Description:\" + ` objItem.Description`)\n # if objItem.DesignCapacity != None:\n # print(\"DesignCapacity:\" + ` objItem.DesignCapacity`)\n # if objItem.DesignVoltage != None:\n # print(\"DesignVoltage:\" + ` objItem.DesignVoltage`)\n # if objItem.DeviceID != None:\n # print(\"DeviceID:\" + ` objItem.DeviceID`)\n # if objItem.ErrorCleared != None:\n # print(\"ErrorCleared:\" + ` objItem.ErrorCleared`)\n # if objItem.ErrorDescription != None:\n # print(\"ErrorDescription:\" + ` objItem.ErrorDescription`)\n # if objItem.EstimatedChargeRemaining != None:\n # print(\"EstimatedChargeRemaining:\" + ` objItem.EstimatedChargeRemaining`)\n # if objItem.EstimatedRunTime != None:\n # print(\"EstimatedRunTime:\" + ` objItem.EstimatedRunTime`)\n # if objItem.ExpectedBatteryLife != None:\n # print(\"ExpectedBatteryLife:\" + ` objItem.ExpectedBatteryLife`)\n # if objItem.ExpectedLife != None:\n # print(\"ExpectedLife:\" + ` objItem.ExpectedLife`)\n # if objItem.FullChargeCapacity != None:\n # print(\"FullChargeCapacity:\" + ` objItem.FullChargeCapacity`)\n # if objItem.InstallDate != None:\n # print(\"InstallDate:\" + WMIDateStringToDate(objItem.InstallDate))\n # if objItem.LastErrorCode != None:\n # print(\"LastErrorCode:\" + ` objItem.LastErrorCode`)\n # if objItem.MaxRechargeTime != None:\n # print(\"MaxRechargeTime:\" + ` objItem.MaxRechargeTime`)\n # if objItem.Name != None:\n # print(\"Name:\" + ` objItem.Name`)\n # if objItem.PNPDeviceID != None:\n # print(\"PNPDeviceID:\" + ` objItem.PNPDeviceID`)\n # print(\"PowerManagementCapabilities:\")\n # strList = \" \"\n # try :\n # for objElem in objItem.PowerManagementCapabilities :\n # strList = strList + objElem + \",\"\n # except:\n # strList = strList + 'null'\n # print(strList)\n # if objItem.PowerManagementSupported != None:\n # print(\"PowerManagementSupported:\" + objItem.PowerManagementSupported)\n # if objItem.SmartBatteryVersion != None:\n # print(\"SmartBatteryVersion:\" + objItem.SmartBatteryVersion)\n # if objItem.Status != None:\n # print(\"Status:\" + objItem.Status)\n # if objItem.StatusInfo != None:\n # print(\"StatusInfo:\" + ` objItem.StatusInfo`)\n # if objItem.SystemCreationClassName != None:\n # print(\"SystemCreationClassName:\" + ` objItem.SystemCreationClassName`)\n # if objItem.SystemName != None:\n # print(\"SystemName:\" + ` objItem.SystemName`)\n # if objItem.TimeOnBattery != None:\n # print(\"TimeOnBattery:\" + ` objItem.TimeOnBattery`)\n # if objItem.TimeToFullCharge != None:\n # print(\"TimeToFullCharge:\" + ` objItem.TimeToFullCharge`)\n\n","sub_path":"wmi_battery.py","file_name":"wmi_battery.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"455257226","text":"from pathlib import Path\nimport openpyxl\n\nclass EventDecision:\n def __init__(self, row):\n self.time = row[0].value\n self.decision = row[1].value\n self.user = row[2].value\n self.confidence = row[3].value\n self.event_id = row[4].value\n self.decision_id = row[5].value\n\n def __repr__(self):\n return f'({self.user},{self.event_id},{self.decision},{self.confidence})'\n \n def __str__(self):\n return self.__repr__()\n\n def __eq__(self, other):\n return self.user == other.user and self.event_id == other.event_id and self.decision == other.decision and self.confidence == other.confidence\n \n def __hash__(self):\n return hash((self.user, self.event_id, self.decision, self.confidence))\n\n def __lt__(self, value):\n return self.time < value.time\n\n\nfile = Path('backups') / 'cry-wolf_20200125_14-35-09_patched.xlsx'\nwb = openpyxl.load_workbook(file)\nevent_sheet = wb['EventDecision']\n\nusers = {}\nresubmit_count = 0\nrow_count = 0\nfor row in event_sheet.iter_rows(min_row=2):\n row_count += 1\n ed = EventDecision(row)\n if ed.user not in users:\n users[ed.user] = { ed.event_id : {ed} }\n else:\n if ed.event_id in users[ed.user]:\n users[ed.user][ed.event_id].add(ed)\n resubmit_count += 1\n else:\n users[ed.user][ed.event_id] = {ed}\n\nprint(f\"Number of unique event decisions: {row_count}\")\nprint(f\"Number of resubmitted event decisions: {resubmit_count}\")\nprint(f\"Number of unique users: {len(users.keys())}\")\nprint(\"Users+event ids with changes on resubmit:\") \n\ncount_changed = 0\ncount_changed_events = 0\n\nchange_counts_by_user = {}\n\nfor user, v in users.items():\n for event_id, decisions in v.items():\n if len(decisions) > 1:\n if user in change_counts_by_user:\n change_counts_by_user[user] += 1\n else:\n change_counts_by_user[user] = 1\n count_changed += len(decisions)-1\n count_changed_events += 1\n print(sorted(decisions))\n\nprint(f\"Number of changes on resubmit: {count_changed}\")\nprint(f\"Number of unique events that were changed: {count_changed_events}\")\nprint(f\"Users who changed their answers: {change_counts_by_user}\")\n\n\n","sub_path":"duplicate_decision.py","file_name":"duplicate_decision.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"323376030","text":"try:\n import autograd.numpy as np\n from autograd import grad, elementwise_grad, jacobian\n _HAS_GRADIENTS = True\nexcept(ImportError):\n import numpy as np\n _HAS_GRADIENTS = False\n\n\nln2pi = np.log(2 * np.pi)\n\n\nclass PixelResponse(object):\n\n def lnlike(self, params, source):\n image = self.counts(params, source)\n imgrad = self.counts_gradient(params, source)\n delta = self.data - image\n chi = delta / self.unc\n lnlike = -0.5 * np.sum(chi**2)\n if imgrad is None:\n lnlike_grad = None\n else:\n lnlike_grad = np.sum(chi / self.unc * imgrad, axis=0)\n\n return lnlike, lnlike_grad\n\n def counts_gradient(self, params, source):\n if self.hasgrad:\n return self._counts_gradient(params, source)\n else:\n return None\n\n @property\n def _counts_gradient(self):\n return jacobian(self.counts, argnum=0)\n\n\nclass GaussianMixtureResponse(PixelResponse):\n\n \"\"\"An object which approximates the PSF by a mixture of gaussians. This\n allows for analytic convolution with GaussianMixtureSources, under the\n assumption that the PSF does not change across the source.\n \"\"\"\n\n hasgrad = _HAS_GRADIENTS\n\n def __init__(self, amplitudes=[], radii=[], mu=0., points=None):\n self.ncomp = len(amplitudes)\n self.means = np.array([[mu, mu] for i in range(self.ncomp)])\n self.covar = np.array([np.diag([r, r]) for r in radii])\n self.amplitudes = np.array(amplitudes)\n \n def convolve(self, params, source):\n \"\"\"Convolve via sums of mean vectors and covariance matrices and products of amplitudes.\n \"\"\"\n ns = source.ncomp\n nr = self.ncomp\n\n source_mu, source_sigma, source_amp = source.gaussians(params)\n mu = source_mu[None, :, :] + self.means[:, None, :]\n sigma = source_sigma[None, :, :, :] + self.covar[:, None, :, :]\n amplitude = source_amp[None, :] * self.amplitudes[:, None]\n return mu.reshape(nr*ns, 2), sigma.reshape(nr*ns, 2, 2), amplitude.reshape(nr*ns)\n\n def counts(self, params, source):\n x = self.points\n gaussians = self.convolve(params, source)\n mu, sigma, amplitude = gaussians\n #c = 1.0 * np.zeros(len(x))\n #for (m, s, a) in zip(*gaussians):\n # c = c + a * normal(x - m, s)\n d = x[None, :, :] - mu[:, None, :]\n c = normal(d, sigma)\n return c.sum(axis=0)\n\n\n#d = x[None, :, :] - mu[:, None, :]\n#r = np.matmul(np.linalg.inv(sigma[:, None, :, :]), d[:, :, :, None])\n#k = np.matmul(d[:, :, None, :], r)\n\ndef normal(x, sigma):\n \"\"\"Calculate the normal density at x, assuming mean of zero.\n\n :param x:\n ndarray of shape (ngauss, npix, 2)\n\n :param sigma:\n ndarray pf shape (ngauss, 2, 2)\n\n returns density:\n ndarray of shape (ngauss, npix)\n \"\"\"\n ln_density = -0.5 * np.matmul(x[:, :, None, :],\n np.matmul(np.linalg.inv(sigma[:, None, :, :]),\n x[:, :, :, None]))\n # sign, logdet = np.linalg.slogdet(sigma)\n # ln_density -= 0.5 * (logdet + ln2pi)\n # density = sign * np.exp(ln_density)\n density = np.exp(ln_density)[:, :, 0, 0]\n return density / np.sqrt(2 * np.pi * np.linalg.det(sigma)[:, None])\n\n\nclass PhonionPixelResponse(PixelResponse):\n\n \"\"\"An object which applies the pixel response function to a set of point\n sources to compute the pixel counts (and gradients thereof with respect to\n the source properties). This is incredibly general, since in principle the PRF can be\n different for every pixel. It's also slow because to make an image one has\n to make a Python loop over PixelResponse objects. \n \"\"\"\n\n hasgrad = _HAS_GRADIENTS\n \n def __init__(self, mu, Sigma=[1., 1.]):\n \"\"\"Initialize object with parameters of the pixel response function.\n Note that each mu and sigma corresponds to a single pixel.\n\n \"\"\"\n self.mu = np.atleast_2d(mu)\n assert self.mu.shape[1] == 2\n\n s = np.atleast_1d(Sigma)\n assert s.shape[0] == 2\n if s.ndim == 1:\n self.Sigma = np.diag(s)\n elif ndim == 2:\n self.Sigma = s\n else:\n raise(ValueError, \"Sigma must be one- or two-dimensional\")\n\n #assert np.all((self.Sigma.shape) == 2)\n\n\n def counts(self, params, source):\n \"\"\"Return the pixel response to the source with given params.\n\n Should allow here for vectorization (and use jacobian for the gradients)\n \"\"\"\n rp = source.coordinates(params) # (nphony, 2)\n weights = source.weights(params) # (nphony)\n delta = rp[None, :, :] - self.mu[:, None, :] # (npix, nphony, 2)\n # this is returns (npix, nphony, 1, 1)\n ln_density = -0.5 * np.matmul(delta[:, :, None, :],\n np.matmul(np.linalg.inv(self.Sigma[None, None, :, :]),\n delta[:, :, :, None]))\n # and this returns (npix, nphoony)\n density = (weights[None, :] * np.exp(ln_density[:, :, 0, 0]) /\n np.sqrt(2 * np.pi * np.linalg.det(self.Sigma)))\n return density.sum(axis=-1)\n","sub_path":"forcepho/boneyard/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"622812918","text":"\"\"\"\nCollection of functions used in pvlib_python\n\"\"\"\n\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\nimport pytz\n\n\ndef cosd(angle):\n \"\"\"\n Cosine with angle input in degrees\n\n Parameters\n ----------\n angle : float or array-like\n Angle in degrees\n\n Returns\n -------\n result : float or array-like\n Cosine of the angle\n \"\"\"\n\n res = np.cos(np.radians(angle))\n return res\n\n\ndef sind(angle):\n \"\"\"\n Sine with angle input in degrees\n\n Parameters\n ----------\n angle : float\n Angle in degrees\n\n Returns\n -------\n result : float\n Sin of the angle\n \"\"\"\n\n res = np.sin(np.radians(angle))\n return res\n\n\ndef tand(angle):\n \"\"\"\n Tan with angle input in degrees\n\n Parameters\n ----------\n angle : float\n Angle in degrees\n\n Returns\n -------\n result : float\n Tan of the angle\n \"\"\"\n\n res = np.tan(np.radians(angle))\n return res\n\n\ndef asind(number):\n \"\"\"\n Inverse Sine returning an angle in degrees\n\n Parameters\n ----------\n number : float\n Input number\n\n Returns\n -------\n result : float\n arcsin result\n \"\"\"\n\n res = np.degrees(np.arcsin(number))\n return res\n\n\ndef localize_to_utc(time, location):\n \"\"\"\n Converts or localizes a time series to UTC.\n\n Parameters\n ----------\n time : datetime.datetime, pandas.DatetimeIndex,\n or pandas.Series/DataFrame with a DatetimeIndex.\n location : pvlib.Location object\n\n Returns\n -------\n pandas object localized to UTC.\n \"\"\"\n if isinstance(time, dt.datetime):\n if time.tzinfo is None:\n time = pytz.timezone(location.tz).localize(time)\n time_utc = time.astimezone(pytz.utc)\n else:\n try:\n time_utc = time.tz_convert('UTC')\n except TypeError:\n time_utc = time.tz_localize(location.tz).tz_convert('UTC')\n\n return time_utc\n\n\ndef datetime_to_djd(time):\n \"\"\"\n Converts a datetime to the Dublin Julian Day\n\n Parameters\n ----------\n time : datetime.datetime\n time to convert\n\n Returns\n -------\n float\n fractional days since 12/31/1899+0000\n \"\"\"\n\n if time.tzinfo is None:\n time_utc = pytz.utc.localize(time)\n else:\n time_utc = time.astimezone(pytz.utc)\n\n djd_start = pytz.utc.localize(dt.datetime(1899, 12, 31, 12))\n djd = (time_utc - djd_start).total_seconds() * 1.0/(60 * 60 * 24)\n\n return djd\n\n\ndef djd_to_datetime(djd, tz='UTC'):\n \"\"\"\n Converts a Dublin Julian Day float to a datetime.datetime object\n\n Parameters\n ----------\n djd : float\n fractional days since 12/31/1899+0000\n tz : str, default 'UTC'\n timezone to localize the result to\n\n Returns\n -------\n datetime.datetime\n The resultant datetime localized to tz\n \"\"\"\n\n djd_start = pytz.utc.localize(dt.datetime(1899, 12, 31, 12))\n\n utc_time = djd_start + dt.timedelta(days=djd)\n return utc_time.astimezone(pytz.timezone(tz))\n\n\ndef _pandas_to_doy(pd_object):\n \"\"\"\n Finds the day of year for a pandas datetime-like object.\n\n Useful for delayed evaluation of the dayofyear attribute.\n\n Parameters\n ----------\n pd_object : DatetimeIndex or Timestamp\n\n Returns\n -------\n dayofyear\n \"\"\"\n return pd_object.dayofyear\n\n\ndef _doy_to_datetimeindex(doy, epoch_year=2014):\n \"\"\"\n Convert a day of year scalar or array to a pd.DatetimeIndex.\n\n Parameters\n ----------\n doy : numeric\n Contains days of the year\n\n Returns\n -------\n pd.DatetimeIndex\n \"\"\"\n doy = np.atleast_1d(doy).astype('float')\n epoch = pd.Timestamp('{}-12-31'.format(epoch_year - 1))\n timestamps = [epoch + dt.timedelta(days=adoy) for adoy in doy]\n return pd.DatetimeIndex(timestamps)\n\n\ndef _datetimelike_scalar_to_doy(time):\n return pd.DatetimeIndex([pd.Timestamp(time)]).dayofyear\n\n\ndef _datetimelike_scalar_to_datetimeindex(time):\n return pd.DatetimeIndex([pd.Timestamp(time)])\n\n\ndef _scalar_out(arg):\n if np.isscalar(arg):\n output = arg\n else: #\n # works if it's a 1 length array and\n # will throw a ValueError otherwise\n output = np.asarray(arg).item()\n\n return output\n\n\ndef _array_out(arg):\n if isinstance(arg, pd.Series):\n output = arg.values\n else:\n output = arg\n\n return output\n\n\ndef _build_kwargs(keys, input_dict):\n \"\"\"\n Parameters\n ----------\n keys : iterable\n Typically a list of strings.\n input_dict : dict-like\n A dictionary from which to attempt to pull each key.\n\n Returns\n -------\n kwargs : dict\n A dictionary with only the keys that were in input_dict\n \"\"\"\n\n kwargs = {}\n for key in keys:\n try:\n kwargs[key] = input_dict[key]\n except KeyError:\n pass\n\n return kwargs\n\n\ndef _build_args(keys, input_dict, dict_name):\n \"\"\"\n Parameters\n ----------\n keys : iterable\n Typically a list of strings.\n input_dict : dict-like\n A dictionary from which to pull each key.\n dict_name : str\n A variable name to include in an error message for missing keys\n\n Returns\n -------\n kwargs : list\n A list with values corresponding to keys\n \"\"\"\n try:\n args = [input_dict[key] for key in keys]\n except KeyError as e:\n missing_key = e.args[0]\n msg = (f\"Missing required parameter '{missing_key}'. Found \"\n f\"{input_dict} in {dict_name}.\")\n raise KeyError(msg)\n return args\n\n\n# Created April,2014\n# Author: Rob Andrews, Calama Consulting\n\ndef _golden_sect_DataFrame(params, VL, VH, func):\n \"\"\"\n Vectorized golden section search for finding MPP from a dataframe\n timeseries.\n\n Parameters\n ----------\n params : dict\n Dictionary containing scalars or arrays\n of inputs to the function to be optimized.\n Each row should represent an independent optimization.\n\n VL: float\n Lower bound of the optimization\n\n VH: float\n Upper bound of the optimization\n\n func: function\n Function to be optimized must be in the form f(array-like, x)\n\n Returns\n -------\n func(df,'V1') : DataFrame\n function evaluated at the optimal point\n\n df['V1']: Dataframe\n Dataframe of optimal points\n\n Notes\n -----\n This function will find the MAXIMUM of a function\n \"\"\"\n\n df = params\n df['VH'] = VH\n df['VL'] = VL\n\n errflag = True\n iterations = 0\n\n while errflag:\n\n phi = (np.sqrt(5)-1)/2*(df['VH']-df['VL'])\n df['V1'] = df['VL'] + phi\n df['V2'] = df['VH'] - phi\n\n df['f1'] = func(df, 'V1')\n df['f2'] = func(df, 'V2')\n df['SW_Flag'] = df['f1'] > df['f2']\n\n df['VL'] = df['V2']*df['SW_Flag'] + df['VL']*(~df['SW_Flag'])\n df['VH'] = df['V1']*~df['SW_Flag'] + df['VH']*(df['SW_Flag'])\n\n err = df['V1'] - df['V2']\n try:\n errflag = (abs(err) > .01).any()\n except ValueError:\n errflag = (abs(err) > .01)\n\n iterations += 1\n\n if iterations > 50:\n raise Exception(\"EXCEPTION:iterations exceeded maximum (50)\")\n\n return func(df, 'V1'), df['V1']\n","sub_path":"pvlib/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"597270423","text":"\ndef inorder(root):\n # 判断是否为基准条件\n # 否:递归的中序遍历左子树,访问该节点,递归的遍历右子树\n # 是:根据需要返回\n if root:\n inorder(root.left)\n print(root.val)\n inorder(root.right)\n\n'''\n1.沿最左侧通路从根节点自上而下入栈直到最左下叶节点\n2.从最左下叶节点向上回溯,出栈并访问该节点,转向该节点右子树根节点,依1,2步继续处理,直到栈为空\n'''\ndef inOrderTraversal(root):\n if not root:\n return\n s = root\n stack = []\n # 给定节点,先将该节点入栈,遍历其左子树,该节点出栈并访问,遍历其右子树\n while True:\n while s:\n # 给定节点,沿做孩子路径依次入栈相应节点,直至不存在左孩子的节点\n stack.append(s)\n s = s.left\n # 栈不空,向上回溯。出栈节点对应的左子树已被访问\n if stack:\n s = stack.pop()\n print(s.val)\n # 如果出栈节点存在右子树,访问其右子树\n s = s.right\n else:\n return -1","sub_path":"Tree/inorderTraversal.py","file_name":"inorderTraversal.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"339141049","text":"import torch\nfrom tqdm import tqdm\n\nfrom ...utils.learning import adjust_learning_rate\nfrom ...utils.log import logger\nfrom ...base.module import Module\n\nfrom .config import DEVICE, DEFAULT_CONFIG\nfrom .model import Config, BiLstmCrf\nfrom .tool import ner_tool\nfrom .utils.convert import iob_ranges\n\nseed = 2019\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\n\n\nclass NER(Module):\n \"\"\"\n \"\"\"\n def __init__(self):\n self._model = None\n self._word_vocab = None\n self._tag_vocab = None\n \n def train(self, train_path, save_path=DEFAULT_CONFIG['save_path'], dev_path=None, vectors_path=None, **kwargs):\n train_dataset = ner_tool.get_dataset(train_path)\n if dev_path:\n dev_dataset = ner_tool.get_dataset(dev_path)\n word_vocab, tag_vocab = ner_tool.get_vocab(train_dataset, dev_dataset)\n else:\n word_vocab, tag_vocab = ner_tool.get_vocab(train_dataset)\n self._word_vocab = word_vocab\n self._tag_vocab = tag_vocab\n train_iter = ner_tool.get_iterator(train_dataset, batch_size=DEFAULT_CONFIG['batch_size'])\n config = Config(word_vocab, tag_vocab, save_path=save_path, vector_path=vectors_path, **kwargs)\n bilstmcrf = BiLstmCrf(config)\n self._model = bilstmcrf\n optim = torch.optim.Adam(bilstmcrf.parameters(), lr=config.lr)\n for epoch in range(config.epoch):\n bilstmcrf.train()\n acc_loss = 0\n for item in tqdm(train_iter):\n bilstmcrf.zero_grad()\n item_text_sentences = item.text[0]\n item_text_lengths = item.text[1]\n item_loss = (-bilstmcrf.loss(item_text_sentences, item_text_lengths, item.tag)) / item.tag.size(1)\n acc_loss += item_loss.view(-1).cpu().data.tolist()[0]\n item_loss.backward()\n optim.step()\n logger.info('epoch: {}, acc_loss: {}'.format(epoch, acc_loss))\n if dev_path:\n dev_score = self._validate(dev_dataset)\n logger.info('dev score:{}'.format(dev_score))\n\n adjust_learning_rate(optim, config.lr / (1 + (epoch + 1) * config.lr_decay))\n config.save()\n bilstmcrf.save()\n\n def predict(self, text):\n self._model.eval()\n vec_text = torch.tensor([self._word_vocab.stoi[x] for x in text])\n len_text = torch.tensor([len(vec_text)]).to(DEVICE)\n vec_predict = self._model(vec_text.view(-1, 1).to(DEVICE), len_text)[0]\n tag_predict = [self._tag_vocab.itos[i] for i in vec_predict]\n return iob_ranges([x for x in text], tag_predict)\n\n def load(self, save_path=DEFAULT_CONFIG['save_path']):\n config = Config.load(save_path)\n bilstmcrf = BiLstmCrf(config)\n bilstmcrf.load()\n self._model = bilstmcrf\n self._word_vocab = config.word_vocab\n self._tag_vocab = config.tag_vocab\n \n def test(self, test_path):\n test_dataset = ner_tool.get_dataset(test_path)\n test_score = self._validate(test_dataset)\n logger.info('test score:{}'.format(test_score))\n \n def _validate(self, dev_dataset):\n self._model.eval()\n dev_score_list = []\n for dev_item in tqdm(dev_dataset):\n item_score = ner_tool.get_score(self._model, dev_item.text, dev_item.tag, self._word_vocab, self._tag_vocab)\n dev_score_list.append(item_score)\n return sum(dev_score_list) / len(dev_score_list)\n","sub_path":"lightkg/erl/ner/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"621635587","text":"import numpy\n\nfrom chainer.backends import cuda\nfrom chainer import configuration\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _as4darray(arr):\n if arr.ndim == 0:\n return arr.reshape(1, 1, 1, 1)\n elif arr.ndim == 4:\n return arr\n else:\n return arr.reshape(arr.shape[0], -1, 1, 1)\n\n\ndef _xhat(x, mean, std, expander):\n x_mu = x - mean[expander]\n x_mu /= std[expander]\n return x_mu\n\n\nclass BatchRenormalizationFunction(function.Function):\n\n def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9,\n rmax=1, dmax=0, freeze_running_statistics=False):\n self.running_mean = mean\n self.running_var = var\n self.rmax = rmax\n self.dmax = dmax\n self.r = None\n self.d = None\n self.freeze_running_statistics = freeze_running_statistics\n\n self.eps = eps\n self.mean_cache = None\n self.decay = decay\n\n def check_type_forward(self, in_types):\n n_in = type_check.eval(in_types.size())\n if n_in != 3 and n_in != 5:\n raise type_check.InvalidType(\n '%s or %s' % (in_types.size() == 3, in_types.size() == 5),\n '%s == %s' % (in_types.size(), n_in))\n x_type, gamma_type, beta_type = in_types[:3]\n M = type_check.eval(gamma_type.ndim)\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim >= gamma_type.ndim + 1,\n x_type.shape[1:1 + M] == gamma_type.shape,\n # TODO(tkerola): Check shape\n gamma_type.dtype == x_type.dtype,\n beta_type.dtype == x_type.dtype,\n gamma_type.shape == beta_type.shape,\n )\n if len(in_types) == 5:\n mean_type, var_type = in_types[3:]\n type_check.expect(\n mean_type.dtype == x_type.dtype,\n mean_type.shape == gamma_type.shape,\n var_type.dtype == x_type.dtype,\n var_type.shape == gamma_type.shape,\n )\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n x, gamma, beta = inputs[:3]\n\n # Note: If length of inputs is not 5, we must be in train mode.\n if len(inputs) != 5:\n assert configuration.config.train\n\n if configuration.config.train:\n if self.running_mean is None:\n self.running_mean = xp.zeros_like(gamma)\n self.running_var = xp.zeros_like(gamma)\n else:\n self.running_mean = xp.array(self.running_mean)\n self.running_var = xp.array(self.running_var)\n elif len(inputs) == 5:\n fixed_mean = inputs[3]\n fixed_var = inputs[4]\n\n head_ndim = gamma.ndim + 1\n expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)\n\n # NOTE(tommi): cuDNN is not used since it does not support\n # batch renormalization\n if configuration.config.train:\n axis = (0,) + tuple(range(head_ndim, x.ndim))\n mean = x.mean(axis=axis)\n var = x.var(axis=axis) + self.eps\n else:\n mean = fixed_mean\n var = fixed_var + self.eps\n self.std = xp.sqrt(var, dtype=var.dtype)\n\n if not self.freeze_running_statistics or self.r is None:\n if configuration.config.train:\n running_sigma = xp.sqrt(self.running_var + self.eps,\n dtype=self.running_mean.dtype)\n self.r = xp.clip(self.std / running_sigma,\n 1.0 / self.rmax, self.rmax)\n self.d = xp.clip((mean - self.running_mean) / running_sigma,\n -self.dmax, self.dmax)\n\n # Update running statistics:\n m = x.size // gamma[expander].size\n self.running_mean *= self.decay\n adjust = m / max(m - 1., 1.) # unbiased estimation\n temp_ar = xp.array(mean)\n temp_ar *= (1 - self.decay)\n self.running_mean += temp_ar\n del temp_ar\n self.running_var *= self.decay\n temp_ar = xp.array(var)\n temp_ar *= (1 - self.decay) * adjust\n self.running_var += temp_ar\n del temp_ar\n else:\n self.r = xp.ones_like(gamma)\n self.d = xp.zeros_like(gamma)\n\n if self.freeze_running_statistics:\n # Need to explicitly cast during gradient check, as r and d are\n # not updated during finite differences\n self.r = self.r.astype(gamma.dtype)\n self.d = self.d.astype(gamma.dtype)\n\n gamma = gamma[expander]\n beta = beta[expander]\n\n if xp is numpy:\n self.x_hat = _xhat(x, mean, self.std, expander)\n self.x_hat_renorm = self.x_hat * self.r[expander] + \\\n self.d[expander]\n y = gamma * self.x_hat_renorm\n y += beta\n else:\n self.x_hat, self.x_hat_renorm, y = cuda.elementwise(\n 'T x, T mean, T std, T gamma, T beta, T r, T d',\n 'T x_hat, T x_hat_renorm, T y',\n '''\n x_hat = (x - mean) / std;\n x_hat_renorm = x_hat * r + d;\n y = gamma * x_hat_renorm + beta;\n ''',\n 'bn_fwd')(x, mean[expander], self.std[expander], gamma,\n beta, self.r[expander], self.d[expander])\n\n return y,\n\n def backward(self, inputs, grad_outputs):\n x, gamma = inputs[:2]\n gy = grad_outputs[0]\n head_ndim = gamma.ndim + 1\n expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)\n m = gamma.dtype.type(x.size // gamma.size)\n axis = (0,) + tuple(range(head_ndim, x.ndim))\n xp = cuda.get_array_module(x)\n if len(inputs) == 5:\n # This case is unlikely to be used in practice and so does not\n # need to be optimized for performance.\n mean = inputs[3]\n var = inputs[4] + self.eps\n std = xp.sqrt(var, dtype=var.dtype)\n gs = gamma / std\n gbeta = gy.sum(axis=axis)\n x_hat = _xhat(x, mean, std, expander)\n ggamma = (gy * x_hat).sum(axis=axis)\n gmean = -gs * gbeta\n gvar = -0.5 * gamma / var * ggamma\n gx = gs[expander] * gy\n return gx, ggamma, gbeta, gmean, gvar\n\n # Note: If length of inputs is not 5, we must be in train mode.\n assert configuration.config.train\n # NOTE(tommi): cuDNN is not used since it does not support\n # batch renormalization\n gbeta = gy.sum(axis=axis)\n ggamma = (gy * self.x_hat_renorm).sum(axis=axis)\n gsigma_batch = (gy * self.x_hat).sum(axis=axis)\n if xp is numpy:\n scale = (self.r * gamma / self.std)[expander]\n gx = scale * (gy - (self.x_hat * gsigma_batch[expander] +\n gbeta[expander]) / m)\n else:\n inv_m = numpy.float32(1) / m\n gx = cuda.elementwise(\n 'T gy, T x_hat, T gamma, T std, T gsigma_batch, T gbeta, \\\n T inv_m, T r',\n 'T gx',\n 'gx = (r * gamma / std) * (gy - (x_hat * gsigma_batch + gbeta) * \\\n inv_m)',\n 'bn_bwd')(gy, self.x_hat, gamma[expander],\n self.std[expander], gsigma_batch[expander],\n gbeta[expander], inv_m, self.r[expander])\n return gx, ggamma, gbeta\n\n\ndef batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,\n running_mean=None, running_var=None, decay=0.9):\n \"\"\"Batch renormalization function.\n\n This is an extension of batch normalization, which ensures that the\n training and inference models generate the same outputs that depend on\n individual examples rather than the entire minibatch.\n\n See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \\\n Batch-Normalized Models `_\n\n .. seealso:: :class:`links.BatchRenormalization`\n .. seealso:: :func:`functions.BatchNormalization`\n\n \"\"\"\n return BatchRenormalizationFunction(eps, running_mean, running_var,\n decay, rmax, dmax)(x, gamma, beta)\n\n\ndef fixed_batch_renormalization(x, gamma, beta, mean, var, eps=2e-5):\n with configuration.using_config('train', False):\n return BatchRenormalizationFunction(eps, None, None, 0.0)(\n x, gamma, beta, mean, var)\n","sub_path":"chainer/functions/normalization/batch_renormalization.py","file_name":"batch_renormalization.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"297109201","text":"# 1. convertFahrenheit - It will take celsius and will print it into Fahrenheit.\r\n# 2. convertCelsius - It will take Fahrenheit and will convert it into Celsius.\r\ndef main():\r\n class Temprature():\r\n def convertFahrenhiet(self,celsius):\r\n funk=(celsius*(9/5))+32\r\n return f\"your temperature in fahrenheat is {funk}\"\r\n def convertCelsius(self,farenhiet):\r\n funk2=(farenhiet-32)*(5/9)\r\n return f\"your temperature in celcius is {funk2}\"\r\n temp=Temprature()\r\n var=float(input(\"please enter\\n 1-for Fahrenhiet to celcius conversion.. \\n 2-for celcicus to fahrenheat conversion..=> \"))\r\n if var==1:\r\n var2=float(input(\"please enter value for Fahrenhiet temprature => \"))\r\n print(f\"your temperature in Fahrenhiet is = {var2}\")\r\n print(temp.convertCelsius(var2))\r\n elif var==2:\r\n var2=float(input(\"please enter value for celcius temprature => \"))\r\n print(f\"your temperature in celcicus is = {var2}\")\r\n print(temp.convertFahrenhiet(var2))\r\n elif var!= 1&2:\r\n print(\"wrong input \")\r\n main()\r\nmain() \r\n\r\n \r\n\r\n","sub_path":"object_oriented_porgrams/temprature_converter.py","file_name":"temprature_converter.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"309887215","text":"\"\"\"\n\tTakes a CNV VCF that is formatted with and SNVTYPE=CNV and turns it into a vcf that Moon can read.\n\"\"\"\n\n\nimport sys\n\n\ndef main(vcf_file, new_vcf_file):\n\t#open the vcf file\n\tvcf = open(vcf_file, 'r')\n\t#open the new file to write to\n\tnew_vcf = open(new_vcf_file, 'w')\n\t#loop over each of the records in the vcf\n\tfor line in vcf:\n\t\t#if its a header line just write it back to the new vcf\n\t\tif line.startswith('#'):\n\t\t\tnew_vcf.write(line)\n\t\telse:\n\t\t\t#Split the line into workable entries\n\t\t\tline_array = line.split('\\t')\n\t\t\t#Get the sample entry\n\t\t\tsample_entry = line_array[9]\n\t\t\t#Get the type of the entry from the first character of the sample entry\n\t\t\tif sample_entry.startswith('1'):\n\t\t\t\tCNV_type = 'DEL'\n\t\t\telif sample_entry.startswith('2'):\n\t\t\t\tCNV_type = 'DUP'\n\t\t\telse:\n\t\t\t\tCNV_type = 'NONE'\n\t\t\t#Change the ALT\n\t\t\tline_array[4] = '<' + CNV_type + '>'\n\t\t\t\n\t\t\t#get the info entry\n\t\t\tinfo = line_array[7]\n\t\t\t#get the svtype entry \n\t\t\tinfo_array = info.split(';')\n\t\t\t#change the svtype entry\n\t\t\tinfo_array[6] = 'SVTYPE=' + CNV_type\n\t\t\t#change the info entry\n\t\t\tcorrected_info = ''\n\t\t\tfirst = True\n\t\t\tfor entry in info_array:\n\t\t\t\tif first:\n\t\t\t\t\tcorrected_info = entry\n\t\t\t\t\tfirst = False\n\t\t\t\telse:\n\t\t\t\t\tcorrected_info += ';' + entry\n\t\t\t#add the corrected info back into the line_array\n\t\t\tline_array[7] = corrected_info\n\t\t\t\n\t\t\t#write the fixed line array to the new vcf\n\t\t\tfirst = True\n\t\t\tfor entry in line_array:\n\t\t\t\tif first:\n\t\t\t\t\tnew_vcf.write(entry)\n\t\t\t\t\tfirst = False\n\t\t\t\telse:\n\t\t\t\t\tnew_vcf.write('\\t' + entry)\n\tvcf.close()\n\tnew_vcf.close()\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n main(args[1], args[2])","sub_path":"moon_tools/moon_cnv_formater.py","file_name":"moon_cnv_formater.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"287584629","text":"from colors import Color\r\nfrom commands.server import ServerCommand\r\nfrom core import SOURCE_ENGINE_BRANCH\r\nfrom cvars import ConVar\r\nfrom effects.base import TempEntity\r\nfrom engines.precache import Model\r\nfrom engines.server import queue_command_string, execute_server_command\r\nfrom engines.trace import ContentMasks\r\nfrom engines.trace import engine_trace\r\nfrom engines.trace import GameTrace\r\nfrom engines.trace import Ray\r\nfrom engines.trace import TraceFilterSimple\r\nfrom entities import BaseEntityGenerator\r\nfrom entities import TakeDamageInfo\r\nfrom entities.constants import DamageTypes, MoveType\r\nfrom entities.entity import Entity\r\nfrom entities.hooks import EntityCondition\r\nfrom entities.hooks import EntityPreHook\r\nfrom events import Event\r\nfrom events.hooks import PreEvent\r\nfrom filters.players import PlayerIter\r\nfrom filters.recipients import RecipientFilter\r\nfrom listeners.tick import Delay, Repeat\r\nimport math\r\nfrom mathlib import Vector,QAngle\r\nfrom memory import make_object\r\nfrom messages import Fade, FadeFlags, HudMsg, SayText2, TextMsg\r\nfrom messages.base import Shake\r\nfrom players.helpers import index_from_userid, playerinfo_from_userid, index_from_playerinfo, userid_from_index, edict_from_userid,inthandle_from_userid,playerinfo_from_index\r\nfrom players.entity import Player\r\nfrom random import choice,randint\r\nimport string\r\nimport time\r\nimport wcs\r\nfrom wcs import changerace\r\nfrom wcs import wcsgroup\r\nfrom weapons.entity import Weapon\r\n\r\n\r\n\r\n\r\nentity_health = {}\r\n\r\nweapon_list = [\"weapon_ak47\",\"weapon_aug\",\"weapon_awp\",\"weapon_bizon\",\"weapon_c4\",\"weapon_cz75a\",\"weapon_deagle\",\"weapon_decoy\",\"weapon_elite\",\"weapon_famas\",\"weapon_fiveseven\",\"weapon_flashbang\",\"weapon_g3sg1\",\"weapon_galil\",\"weapon_galilar\",\"weapon_glock\",\"weapon_hegrenade\",\"weapon_incgrenade\",\"weapon_hkp2000\",\"weapon_knife\",\"weapon_m249\",\"weapon_m3\",\"weapon_m4a1\",\"weapon_m4a1_silencer\",\"weapon_mac10\",\"weapon_mag7\",\"weapon_molotov\",\"weapon_mp5navy\",\"weapon_mp7\",\"weapon_mp9\",\"weapon_negev\",\"weapon_nova\",\"weapon_p228\",\"weapon_p250\",\"weapon_p90\",\"weapon_sawedoff\",\"weapon_scar17\",\"weapon_scar20\",\"weapon_scout\",\"weapon_sg550\",\"weapon_sg552\",\"weapon_sg556\",\"weapon_ssg08\",\"weapon_smokegrenade\",\"weapon_taser\",\"weapon_tec9\",\"weapon_tmp\",\"weapon_ump45\",\"weapon_usp\",\"weapon_usp_silencer\",\"weapon_xm1014\",\"weapon_revolver\"]\r\n\r\nanti_falldamage = {}\r\nrepeat_dict = {}\r\nfor player in PlayerIter('all'):\r\n\trepeat_dict[player.userid] = 0\r\n\t\r\n\t\r\n# =============================================================================\r\n# >> SERVER COMMANDS\r\n# =============================================================================\t\r\n\t\r\n@ServerCommand('wcs_absorb')\r\ndef absorb(command):\r\n\tuserid = int(command[1])\r\n\tamount = float(command[2])\r\n\tif exists(userid):\r\n\t\twcsgroup.setUser(userid,'absorb',amount)\r\n\r\n\t\r\n@ServerCommand('wcs_ambush')\r\ndef test(command):\r\n player = Player.from_userid(int(command[1]))\r\n target = Player.from_userid(int(command[2]))\r\n if target:\r\n angles = target.get_view_angle()\r\n angle = math.radians(target.get_view_angle()[1]) # (0, angle)\r\n x = -40 * math.cos(angle)\r\n y = -40 * math.sin(angle)\r\n \r\n new_position = Vector(\r\n target.origin[0] + x,\r\n target.origin[1] + y,\r\n target.origin[2])\r\n \r\n # Check if there's enough space behind the target.\r\n trace = check_space(new_position, player.mins, player.maxs)\r\n \r\n # Did the trace hit something solid?\r\n if trace.did_hit():\r\n # Increase the height(z) of the new position,\r\n # in case the target was on sloped terrain.\r\n new_position[2] += 20\r\n \r\n # Is there enough space now?\r\n trace2 = check_space(new_position, player.mins, player.maxs)\r\n if trace2.did_hit():\r\n # There's still something solid behind the target.\r\n # Could be a wall or some other object.\r\n return\r\n \r\n # teleport(position, rotation, velocity)\r\n player.teleport(new_position, None, None)\r\n player.set_view_angle(angles)\r\n\t\t\r\n\t\t\t\r\n@ServerCommand('wcs_aoe')\r\ndef wcs_aoe(command):\r\n\tuserid = int(command[1])\r\n\tattacker_userid = int(command[2])\r\n\tradius = float(command[3])\r\n\tdamage = int(command[4])\r\n\tif exists(userid):\r\n\t\tvictim = Player.from_userid(userid)\r\n\telse:\r\n\t\treturn\r\n\tif exists(attacker_userid):\r\n\t\tattacker = Player.from_userid(attacker_userid)\r\n\telse:\r\n\t\treturn\r\n\tfor player in PlayerIter('alive'):\r\n\t\tif player.team == victim.team:\r\n\t\t\tif player.origin.get_distance(victim.origin) <= radius:\r\n\t\t\t\tqueue_command_string(\"es wcs_dealdamage %s %s %s\" % (player.userid,attacker_userid,damage))\r\n\t\t\t\t\r\n\t\t\t\t\r\n@ServerCommand('wcs_centermsg')\r\ndef _centermessage_command(command):\r\n\tmessage = command.arg_string\r\n\tfor player in PlayerIter():\r\n\t\tif SOURCE_ENGINE_BRANCH == \"css\":\r\n\t\t\tqueue_command_string(\"es_centertell %s %s\" %(player.userid,message))\r\n\t\telse:\r\n\t\t\tHudMsg(message, -1, 0.35,hold_time=5.0).send(player.index)\t\t\t\t\t\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_centertell')\r\ndef _centertell(command):\r\n\tuserid = command[1]\r\n\tif userid != '':\r\n\t\tuserid = int(userid)\r\n\telse:\r\n\t\treturn\r\n\tif exists(userid):\r\n\t\tcommand_string = command.arg_string\r\n\t\tcommand_string = command_string.replace(str(userid)+\" \", '')\r\n\t\tindex = index_from_userid(userid)\r\n\t\tif SOURCE_ENGINE_BRANCH == \"css\":\r\n\t\t\tqueue_command_string(\"es_centertell %s %s\" %(userid,command_string))\r\n\t\telse:\r\n\t\t\tHudMsg(command_string, -1, 0.35,hold_time=5.0).send(index)\r\n\t\t\t\t\t\r\n\r\n@ServerCommand('wcs_changeteam')\r\ndef _changeteam_command(command):\r\n\tuserid = int(command[1])\r\n\tif exists(userid):\r\n\t\tplayer = Player.from_userid(userid)\r\n\t\tteam = int(command[2])\r\n\t\tplayer.team = team\r\n\t\t\t\t\t\r\n\t\t\t\t\r\n@ServerCommand('wcs_color')\r\ndef _color(command):\r\n\tif len(command) >= 5:\r\n\t\tuserid = int(command[1])\r\n\t\tr = int(command[2])\r\n\t\tg = int(command[3])\r\n\t\tb = int(command[4])\r\n\t\ta = 255\r\n\t\tif len(command) >= 6:\r\n\t\t\ta = int(command[5])\r\n\t\tif exists(userid):\r\n\t\t\tplayer = Player.from_userid(userid)\r\n\t\t\tplayer.color = Color(r,g,b,a)\r\n\t\t\tif len(command) == 7:\r\n\t\t\t\twpn = int(command[6])\r\n\t\t\t\tif wpn == 1:\r\n\t\t\t\t\tfor weapon in player.weapons():\r\n\t\t\t\t\t\tweapon.color = Color(r,g,b,a)\r\n\t\t\t\t\t\t\r\n\r\n@ServerCommand('wcs_consolemsg')\r\ndef _wcs_console_message(command):\r\n\tuserid = int(command[1])\r\n\tmessage = ' '.join(tuple(command)[2:])\r\n\tPlayer.from_userid(userid).client_command(\"echo \"+message)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n@ServerCommand('wcs_create_prop')\r\ndef create_prop(command):\r\n\tuserid = int(command[1])\r\n\tpropname = str(command[2])\r\n\tprophealth = int(command[3])\r\n\tif \"models/\" not in propname:\r\n\t\tpropname = \"models/\"+propname\r\n\tmodel = Model(propname)\r\n\tplayer = Player.from_userid(userid)\r\n\tentity = Entity.create('prop_physics_multiplayer')\r\n\tentity.origin = player.view_coordinates\r\n\tentity.set_model(model)\r\n\tentity_health[entity.index] = prophealth\r\n\tentity.set_property_uchar(\"m_takedamage\",4)\r\n\tentity.spawn()\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n@ServerCommand('wcs_dealdamage')\r\ndef _deal_damage(command):\r\n\tvictim = int(command[1])\r\n\tattacker = int(command[2])\r\n\tdamage\t= int(command[3])\r\n\tif len(command) > 4:\r\n\t\tweapon = str(command[4])\r\n\telse:\r\n\t\tweapon = None\r\n\tif exists(victim) and exists(attacker):\r\n\t\tvictim_player = Player.from_userid(victim)\r\n\t\tattacker_player = Player.from_userid(attacker)\r\n\t\tvictim_player.target_name = \"wcs_hurtme\"\r\n\t\tentity = Entity.create('point_hurt')\r\n\t\tentity.set_key_value_string(\"DamageTarget\",\"wcs_hurtme\")\r\n\t\tentity.damage = damage\r\n\t\tentity.damage_type = 0\r\n\t\tif weapon != None:\r\n\t\t\tentity.set_key_value_string(\"classname\",weapon)\r\n\t\tentity.spawn()\r\n\t\tentity.call_input(\"Hurt\",activator=attacker_player)\r\n\t\tvictim_player.target_name = \"wcs_donthurtme\"\r\n\t\tentity.remove()\r\n\t\t\r\n\t\t\t\t\t\t\r\n@ServerCommand('wcs_decimal')\r\ndef _decimal(command):\r\n\tvar = str(command[1])\r\n\tamount = int(round(float(command[2])))\r\n\tConVar(var).set_string(str(amount))\r\n\t\r\n\t\r\n@ServerCommand('wcs_delayed')\r\ndef _delayed_command(command):\r\n\tdelay = float(command[1])\r\n\tcommand_string = command.arg_string\r\n\tcommand_parts = command_string.split(' ')\r\n\tcommand_parts.remove(command_parts[0])\r\n\tcommand_parts_text = ''\r\n\tfor arg in command_parts:\r\n\t\tcommand_parts_text = ''+command_parts_text+' '+arg\r\n\tcommand_parts_text = command_parts_text.replace(' ', '', 1)\r\n\tDelay(delay, queue_command_string, (('%s' % command_parts_text),))\r\n\t\r\n\t\r\n@ServerCommand('wcs_doteleport')\r\ndef _doteleport_command(command):\r\n\tuserid = int(command[1])\r\n\tif exists(userid):\r\n\t\tplayer = Player.from_userid(userid)\r\n\t\tview_vector = player.view_coordinates\r\n\t\tqueue_command_string('wcs_teleport %s %s %s %s' % (userid, view_vector[0], view_vector[1], view_vector[2]))\r\n\r\n\r\n@ServerCommand('wcs_drop')\r\ndef drop(command):\r\n\tif len(command) == 3:\r\n\t\tweapon = str(command[2])\r\n\t\tif exists(int(command[1])):\r\n\t\t\tplayer = Player.from_userid(int(command[1]))\r\n\t\t\tif str(weapon) == \"1\":\r\n\t\t\t\twpn = player.get_weapon(is_filters='primary')\r\n\t\t\t\tif wpn:\r\n\t\t\t\t\tplayer.drop_weapon(wpn)\r\n\t\t\telif str(weapon) == \"2\":\r\n\t\t\t\twpn = player.get_weapon(is_filters='secondary')\r\n\t\t\t\tif wpn:\r\n\t\t\t\t\tplayer.drop_weapon(wpn)\r\n\t\t\telse:\r\n\t\t\t\tif player.get_weapon(is_filters='secondary'):\r\n\t\t\t\t\tif player.get_weapon(is_filters='secondary').classname == weapon:\r\n\t\t\t\t\t\tplayer.drop_weapon(player.get_weapon(is_filters='secondary'))\r\n\t\t\t\tif player.get_weapon(is_filters='primary'):\r\n\t\t\t\t\tif player.get_weapon(is_filters='primary').classname == weapon:\r\n\t\t\t\t\t\tplayer.drop_weapon(player.get_weapon(is_filters='primary'))\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n@ServerCommand('wcs_drug')\r\ndef _drug(command):\r\n\tuserid = int(command[1])\r\n\tdelay = float(command[2])\r\n\tif exists(userid):\r\n\t\tPlayer.from_userid(userid).client_command('r_screenoverlay effects/tp_eyefx/tp_eyefx')\r\n\t\tDelay(delay, remove_drug, (userid,))\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_evasion')\r\ndef wcs_evasion(command):\r\n\tuserid = int(command[1])\r\n\tonoff = int(command[2])\r\n\tchance = int(command[3])\r\n\twcsgroup.setUser(userid,'evasion',onoff)\r\n\twcsgroup.setUser(userid,'evasion_chance',chance)\r\n\t\r\n\t\r\n@ServerCommand('wcs_explode')\r\ndef _wcs_explode_command(command):\r\n\tuserid = int(command[1])\r\n\trange = float(command[2])\r\n\tdamage = int(command[3])\r\n\tif exists(userid):\r\n\t\tplayer_ent = Player.from_userid(userid)\r\n\t\tfor player in PlayerIter():\r\n\t\t\tif player.team != player_ent.team:\r\n\t\t\t\tdistance = player_ent.origin.get_distance(player.origin)\r\n\t\t\t\tif distance <= range:\r\n\t\t\t\t\tif not player.isdead:\r\n\t\t\t\t\t\tqueue_command_string('wcs_dealdamage %s %s %s' % (player.userid, player_ent.userid, damage))\r\n\t\t\t\t\t\twcs.wcs.tell(player.userid,\"\\x04[WCS] \\x05You were hit by \\x04%s's Suicide Explosion!\" % player_ent.name)\r\n\t\t\t\t\t\twcs.wcs.tell(player_ent.userid,'\\x04[WCS] \\x05You hit \\x04%s \\x05with your \\x04Suicide Explosion' % player.name)\r\n\t\t\t\t\t\t\r\n\t\t\r\n@ServerCommand('wcs_explosion')\r\ndef wcs_explosion(command):\r\n\tuserid = int(command[1])\r\n\tmagnitude = int(command[2])\r\n\tradius = int(command[3])\r\n\tif len(command) > 4:\r\n\t\tdo_damage = int(command[4])\r\n\telse:\r\n\t\tdo_damage = 1\r\n\tif exists(userid):\r\n\t\tplayer = Player.from_userid(userid)\r\n\t\tent = Entity.create('env_explosion' )\r\n\t\tent.set_property_int('m_iMagnitude', magnitude)\r\n\t\tent.set_property_int('m_iRadiusOverride', radius)\r\n\t\tif do_damage == 1:\r\n\t\t\tent.spawn_flags = 8\r\n\t\telse:\r\n\t\t\tent.spawn_flags = 1\r\n\t\tent.owner_handle = inthandle_from_userid(userid)\r\n\t\tent.spawn()\r\n\t\tent.origin = player.origin\r\n\t\tent.call_input('Explode')\r\n\r\n\t\t\r\n@ServerCommand('wcs_explosion_point')\r\ndef wcs_explosion_point(command):\r\n\tuserid = int(command[1])\r\n\tx = float(command[2])\r\n\ty = float(command[3])\r\n\tz = float(command[4])\r\n\tmagnitude = int(command[5])\r\n\tradius = int(command[6])\r\n\tif exists(userid):\r\n\t\tent = Entity.create('env_explosion')\r\n\t\tent.set_property_int('m_iMagnitude', magnitude)\r\n\t\tent.set_property_int('m_iRadiusOverride', radius)\t\r\n\t\tent.owner_handle = inthandle_from_userid(userid)\r\n\t\tent.spawn()\r\n\t\tent.origin = Vector(x,y,z)\r\n\t\tent.call_input('Explode')\t\t\r\n\r\n\r\n@ServerCommand('wcs_extinguish')\r\ndef wcs_extinguish(command):\r\n\tuserid = int(command[1])\r\n\tPlayer.from_userid(userid).ignite_lifetime(0)\t\t\r\n\r\n\t\r\n@ServerCommand('wcs_fade')\r\ndef fade(command):\r\n\tuserid = int(command[1])\r\n\tr = int(command[2])\r\n\tg = int(command[3])\r\n\tb = int(command[4])\r\n\ta = int(command[5])\r\n\ttime = float(command[6])\r\n\tcolor = Color(r,g,b,a)\r\n\tif exists(userid):\r\n\t\tFade(int(time), int(time),color,FadeFlags.PURGE).send(Player.from_userid(userid).index)\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_fire')\r\ndef wcs_fire(command):\r\n\tuserid = int(command[1])\r\n\tduration = float(command[2])\r\n\tPlayer.from_userid(userid).ignite_lifetime(duration)\r\n\t\r\n\t\r\n@ServerCommand(['wcs_getactiveweapon','wcs_getgun'])\r\ndef active_weapon(command):\r\n\tuserid = int(command[1])\r\n\tvar = str(command[2])\r\n\tif exists(userid):\r\n\t\tplayer = Player.from_userid(userid)\r\n\t\tConVar(var).set_string(player.active_weapon.classname)\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_getcolors')\r\ndef get_colors(command):\r\n\tuserid = int(command[1])\r\n\tr = command[2]\r\n\tg = command[3]\r\n\tb = command[4]\r\n\ta = command[5]\r\n\tif exists(userid):\r\n\t\tConVar(r).set_int(Player.from_userid(userid).color[0])\r\n\t\tConVar(g).set_int(Player.from_userid(userid).color[1])\r\n\t\tConVar(b).set_int(Player.from_userid(userid).color[2])\r\n\t\tConVar(a).set_int(Player.from_userid(userid).color[3])\r\n\t\t\r\n\t\r\n@ServerCommand(['wcs_getcooldown','wcs_get_cooldown'])\r\ndef get_cooldown(command):\r\n\tuserid = int(command[1])\r\n\tvar = str(command[2])\r\n\tif exists(userid):\r\n\t\tcooldown = wcs.wcs.get_cooldown(userid)\r\n\t\ttimed = int(float(time.time()))\r\n\t\tcooldown = wcsgroup.getUser(userid,'player_ultimate_cooldown')\r\n\t\tdowntime = wcs.wcs.get_cooldown(userid)\r\n\t\tif cooldown == None:\r\n\t\t\tConVar(var).set_int(downtime)\r\n\t\t\treturn\r\n\t\tConVar(var).set_int(downtime-(timed-cooldown))\r\n\r\n\t\t\r\n@ServerCommand('wcs_getdeaths')\r\ndef _get_deaths(command):\r\n\tuserid = int(command[1])\r\n\tvar = str(command[2])\r\n\tif exists(userid):\r\n\t\tConVar(var).set_int(Player.from_userid(userid).deaths)\t\r\n\r\n\t\t\r\n@ServerCommand('wcs_getdistance')\r\ndef _getdistance(command):\r\n\tvar = str(command[1])\r\n\tx1 = float(command[2])\r\n\ty1 = float(command[3])\r\n\tz1 = float(command[4])\r\n\tx2 = float(command[5])\r\n\ty2 = float(command[6])\r\n\tz2 = float(command[7])\r\n\tvec1 = Vector(x1,y1,z1)\r\n\tvec2 = Vector(x2,y2,z2)\r\n\tdistance = vec1.get_distance(vec2)\r\n\tConVar(var).set_float(distance)\t\t\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_getgravity')\r\ndef _get_gravity(command):\r\n\tuserid = int(command[1])\r\n\tvar = str(command[2])\r\n\tif exists(userid):\r\n\t\tConVar(var).set_float(Player.from_userid(userid).gravity)\t\t\r\n\t\r\n\t\r\n@ServerCommand('wcs_getplayerindex')\r\ndef _get_index(command):\r\n\tuserid = int(command[1])\r\n\tvar = str(command[2])\r\n\tif exists(userid):\r\n\t\tConVar(var).set_int(index_from_userid(userid))\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_getrandomrace')\r\ndef random_race(command):\r\n\tuserid = int(command[1])\r\n\tvar = str(command[2])\r\n\tif exists(userid):\r\n\t\trace_list = []\r\n\t\traces = wcs.wcs.racedb.getAll()\r\n\t\tallraces = races.keys()\r\n\t\tfor number, race in enumerate(allraces):\r\n\t\t\tv = changerace.canUse(userid,race)\r\n\t\t\tif not v:\r\n\t\t\t\trace_list.append(race)\r\n\t\tif len(race_list):\r\n\t\t\tchosen = str(choice(race_list))\r\n\t\t\tConVar(var).set_string(chosen)\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_getscore')\r\ndef _get_deaths(command):\r\n\tuserid = int(command[1])\r\n\tvar = str(command[2])\r\n\tif exists(userid):\r\n\t\tConVar(var).set_int(Player.from_userid(userid).kills)\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_getviewcoords')\r\ndef viewcoord(command):\r\n\tuserid = int(command[1])\r\n\txvar = str(command[2])\r\n\tyvar = str(command[3])\r\n\tzvar = str(command[4])\r\n\tif exists(userid):\r\n\t\tplayer = Player(index_from_userid(userid))\r\n\t\tview_vec = player.get_view_coordinates()\r\n\t\tConVar(xvar).set_float(view_vec[0])\r\n\t\tConVar(yvar).set_float(view_vec[1])\r\n\t\tConVar(zvar).set_float(view_vec[2])\r\n\r\n\r\n@ServerCommand('wcs_getviewentity')\r\ndef getViewEntity(command):\r\n\tif len(command) == 3:\r\n\t\tif exists(int(command[1])):\r\n\t\t\tConVar(str(command[2])).set_string(str(Player.from_userid(int(command[1])).get_view_entity().index))\r\n\r\n\t\t\t\r\n@ServerCommand('wcs_getviewplayer')\r\ndef getViewPlayer(command):\r\n\tif len(command) == 3:\r\n\t\tif exists(int(command[1])):\r\n\t\t\tConVar(str(command[2])).set_string(str(Player.from_userid(int(command[1])).get_view_player().userid))\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_getwallbetween')\r\ndef get_wall_between(command):\r\n\tvar = str(command[1])\r\n\tuser_one = int(command[2])\r\n\tuser_two = int(command[3])\r\n\torigin_vector = Player.from_userid(user_one).origin\r\n\tdirection_vector = Player.from_userid(user_two).origin\r\n\ttrace = GameTrace()\r\n\tengine_trace.trace_ray(Ray(origin_vector, direction_vector), ContentMasks.ALL, None, trace)\r\n\tConVar(var).set_int(trace.did_hit_world())\r\n\r\n\t\r\n@ServerCommand('wcs_getweapon')\r\ndef getweapon(command):\r\n\tuserid = int(command[1])\r\n\tvar = str(command[2])\r\n\tslot = str(command[3])\r\n\tif slot == \"1\":\r\n\t\tslot = \"primary\"\r\n\tif slot == \"2\":\r\n\t\tslot = \"secondary\"\r\n\tif exists(userid):\r\n\t\tplayer = Player.from_userid(userid)\r\n\t\tweapon = player.get_weapon(is_filters=slot)\r\n\t\tif weapon != None:\r\n\t\t\tConVar(var).set_string(weapon.classname)\r\n\t\telse:\r\n\t\t\tConVar(var).set_int(-1)\r\n\t\r\n\t\t\t\r\n@ServerCommand('wcs_give')\r\ndef _give(command):\r\n\tuserid = int(command[1])\r\n\tweapon = command[2]\r\n\tif \"weapon_\" not in weapon:\r\n\t\tweapon = \"weapon_\"+command[2]\r\n\tif exists(userid):\r\n\t\tPlayer.from_userid(userid).give_named_item(weapon)\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_leech')\r\ndef wcs_leech(command):\r\n\tuserid = int(command[1])\r\n\tvictim_uid = int(command[2])\r\n\tamount = int(command[3])\r\n\tplayer = Player.from_userid(userid)\r\n\tvictim = Player.from_userid(victim_uid)\r\n\tif victim.health - amount < 0:\r\n\t\tplayer.health += victim.health\r\n\t\tqueue_command_string(\"es wcs_dealdamage %s %s %s\" % (victim_uid,userid,amount))\r\n\telse:\r\n\t\tplayer.health += amount\r\n\t\tqueue_command_string(\"es wcs_dealdamage %s %s %s\" % (victim_uid,userid,amount))\r\n\tprint(victim.armor)\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_noflash')\r\ndef noflash(command):\r\n\tuserid = int(command[1])\r\n\ton_off = int(command[2])\r\n\tif exists(userid):\r\n\t\twcsgroup.setUser(userid,'noflash',on_off)\r\n\t\t\r\n\t\t\r\n@ServerCommand('wcs_overlay')\r\ndef wcs_overlay(command):\r\n\tuserid = int(command[1])\r\n\toverlay = str(command[2])\r\n\tduration = float(command[3])\r\n\tif exists(userid):\r\n\t\tcreate_overlay(userid,overlay,duration)\r\n\t\t\r\n\t\r\n@ServerCommand('wcs_pushed')\r\ndef pushed(command):\r\n\tuserid = int(command[1])\r\n\tx_force = float(command[2])\r\n\ty_force = float(command[3])\r\n\tz_force = float(command[4])\r\n\tif exists(userid):\r\n\t\tvec = Vector(x_force,y_force,z_force)\r\n\t\tplayer = Player(index_from_userid(userid))\r\n\t\tplayer.set_property_vector(\"m_vecBaseVelocity\", vec)\t\r\n\r\n\t\t\t\r\n@ServerCommand('wcs_pushto')\r\ndef push_forward(command):\r\n\tuserid = int(command[1])\r\n\tif exists(userid):\r\n\t\tplayer = Player.from_userid(userid)\r\n\t\tif len(command) >= 5:\r\n\t\t\tx1 = float(command[2])\r\n\t\t\ty1 = float(command[3])\r\n\t\t\tz1 = float(command[4])\r\n\t\t\tforce = float(command[5])\r\n\t\t\tcoord = Vector(x1,y1,z1)\r\n\t\tif len(command) < 5:\r\n\t\t\tcoords = command[2].split(',')\r\n\t\t\tforce = float(command[3])\r\n\t\t\tcoord = Vector(float(coords[0],float(coords[1]),float(coords[2])))\r\n\t\tloca = player.origin\r\n\t\tcoord -= loca\r\n\t\tcoord = coord * float(force)\r\n\t\tplayer.set_property_vector('m_vecBaseVelocity',coord)\r\n\t\t\t\r\n\t\t\r\n@ServerCommand('wcs_randplayer')\r\ndef randplayer(command):\r\n\tvar = str(command[1])\r\n\tident = str(command[2])\r\n\tif \"#\" in ident:\r\n\t\tident = ident.replace(\"#\",\"\")\r\n\tif \";\" in ident:\r\n\t\tident = ident.split(\";\")\r\n\telif \",\" in ident:\r\n\t\tident = ident.split(\",\")\r\n\tplaylist = []\r\n\tfor play in PlayerIter(ident):\r\n\t\tplaylist.append(play)\r\n\tConVar(var).set_string(str(choice(playlist).userid))\r\n\t\r\n\t\r\n@ServerCommand('wcs_regeneration')\r\ndef _regeneration(command):\r\n#
, ) are only handled at the closing point.\n \"\"\"\n if tag == 'td':\n self._in_td = True\n if tag == 'th':\n self._in_th = True\n\n def handle_data(self, data):\n \"\"\" This is where we save content to a cell \"\"\"\n if self._in_td or self._in_th:\n self._current_cell.append(data.strip())\n\n def handle_charref(self, name):\n \"\"\" Handle HTML encoded characters \"\"\"\n\n if self._parse_html_entities:\n self.handle_data(self.unescape('&#{};'.format(name)))\n\n def handle_endtag(self, tag):\n \"\"\" Here we exit the tags. If the closing tag is , we know that we\n can save our currently parsed cells to the current table as a row and\n prepare for a new row. If the closing tag is
, we save the\n current table and prepare for a new one.\n \"\"\"\n if tag == 'td':\n self._in_td = False\n elif tag == 'th':\n self._in_th = False\n\n if tag in ['td', 'th']:\n final_cell = self._data_separator.join(self._current_cell).strip()\n self._current_row.append(final_cell)\n self._current_cell = []\n elif tag == 'tr':\n self._current_table.append(self._current_row)\n self._current_row = []\n elif tag == 'table':\n self.tables.append(self._current_table)\n self._current_table = []\n\ntypesAndUrlValue = {\"Accident & Health Insurance (Financial)\" : 431 ,\"Chemicals - Major Diversified (Basic Materials)\" : 110, \"Diagnostic Substances (Healthcare)\" :516,\n \"Drug Delivery (Healthcare)\": 513,\"Drug Manufacturers - Major (Healthcare)\":510,\"Drug Manufacturers - Other (Healthcare)\":511,\"Drug Related Products (Healthcare)\":514\n ,\"Drug Stores (Services)\" : 733,\"Drugs - Generic (Healthcare)\":512, \"Drugs Wholesale (Services)\": 756\n ,\"Health Care Plans (Healthcare)\":522 , \"Hospitals (Healthcare)\":524, \"Long-Term Care Facilities (Healthcare)\" : 523\n ,\"Medical Appliances & Equipment (Healthcare)\": 521,\"Medical Equipment Wholesale (Services)\":754, \"Medical Instruments & Supplies (Healthcare)\" : 520\n , \"Medical Laboratories & Research (Healthcare)\":525, \"Medical Practitioners (Healthcare)\":527, \"Specialized Health Services (Healthcare)\":528\n , \"Specialty Chemicals (Basic Materials)\":113, \"Synthetics (Basic Materials)\":111}\n\n\nallTypesData = {key: [] for key in typesAndUrlValue.keys()} # dict.fromkeys(typesAndUrlValue.keys())\n\n\ndef get_html_table(target):\n req = urllib.request.Request(url=target)\n f = urllib.request.urlopen(req)\n xhtml = f.read().decode('utf-8')\n p = HTMLTableParser()\n p.tables = []\n p.feed(xhtml)\n return p\n\n\nfor field in typesAndUrlValue.keys():\n value = typesAndUrlValue[field]\n target = 'https://screener.finance.yahoo.com/b?sc=' + str(value) + \\\n '&im=&prmin=&prmax=&mcmin=&mcmax=&dvymin=&dvymax=&betamin=&betamax=&remin=&remax=&pmmin=&pmmax=&pemin=&pemax=&pbmin=&pbmax=&psmin=&psmax=&pegmin=&pegmax=&gr=&grfy=&ar=&vw=1&db=stocks'\n p = get_html_table(target)\n numberOfStocks = p.tables[0][0][0].split(' ')[7].split(')')[0]\n count = 0\n b = 1\n while b <= int(numberOfStocks):\n newTarget = target+\"&b=\"+str(b)\n p = get_html_table(newTarget)\n tmp = [x for i,x in enumerate(p.tables[1]) if i!=0]\n allTypesData[field] += tmp\n count += len(tmp)\n b += 20\n\n print(str(count)+\" items where added to field= \"+ field+\" expect= \"+str(numberOfStocks)+\" currently have= \"+str(len(allTypesData[field])))\n with open('{}.csv'.format(field), 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n for stock in allTypesData[field]:\n writer.writerow(stock)\n\n# for field in typesAndUrlValue.keys():\n# print(allTypesData[field])\n# print()\n\n\n# target = 'https://screener.finance.yahoo.com/b?sc=528&im=&prmin=&prmax=&mcmin=&mcmax=&dvymin=&dvymax=&betamin=&betamax=&remin=&remax=&pmmin=&pmmax=&pemin=&pemax=&pbmin=&pbmax=&psmin=&psmax=&pegmin=&pegmax=&gr=&grfy=&ar=&vw=1&db=stocks'\n#\n# # get website content\n# req = urllib.request.Request(url=target)\n# f = urllib.request.urlopen(req)\n# xhtml = f.read().decode('utf-8')\n#\n# # instantiate the parser and feed it\n# p = HTMLTableParser()\n# p.feed(xhtml)\n# print(p.tables[1][1:-1])\n","sub_path":"YahStocks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"187116179","text":"import json\n\ndef clamp(n, minn, maxn):\n return max(min(maxn, n), minn)\n\ndef get_sensor_dims(camera_model):\n sensor_width_mm = camera_model[\"horizontal_pixels\"] * camera_model[\"pixel_size_um\"] / 1000\n sensor_height_mm = camera_model[\"vertical_pixels\"] * camera_model[\"pixel_size_um\"] / 1000\n return sensor_width_mm, sensor_height_mm\n\ndef get_fov_m(camera_model, flying_height_m):\n sensor_width_mm, sensor_height_mm = get_sensor_dims(camera_model)\n fov_width_m = flying_height_m * sensor_width_mm / camera_model[\"focal_length_mm\"]\n fov_height_m = flying_height_m * sensor_height_mm / camera_model[\"focal_length_mm\"]\n return fov_width_m, fov_height_m\n\ndef get_gsd_m(camera_model, flying_height_m):\n fov_width_m, fov_height_m = get_fov_m(camera_model, flying_height_m)\n gsd_across_m = fov_width_m / camera_model[\"horizontal_pixels\"]\n gsd_along_m = fov_height_m / camera_model[\"vertical_pixels\"]\n return gsd_across_m, gsd_along_m\n\ndef get_max_exp_time_ms(camera_model, flying_height_m, flying_speed_mps, max_blur = 0.1):\n gsd_along_m = get_gsd_m(camera_model, flying_height_m)[-1]\n max_exp_time_ms = 1000 * (gsd_along_m * max_blur) / flying_speed_mps\n return clamp(max_exp_time_ms, camera_model[\"min_exposure_ms\"], camera_model[\"max_exposure_ms\"])\n\ndef get_min_frame_rate_Hz(camera_model, flying_height_m, flying_speed_mps, front_overlap = 0.9):\n fov_height_m = get_fov_m(camera_model, flying_height_m)[-1]\n min_frame_rate_Hz = flying_speed_mps / (fov_height_m * (1 -front_overlap))\n return clamp(min_frame_rate_Hz, camera_model[\"min_framerate_fps\"], camera_model[\"max_framerate_fps\"])\n\ndef get_gigs_per_minute(camera_model, flying_height_m, flying_speed_mps, front_overlap = 0.9):\n frame_rate_Hz = get_min_frame_rate_Hz(camera_model, flying_height_m, flying_speed_mps, front_overlap)\n bytes_per_frame = camera_model[\"horizontal_pixels\"] * camera_model[\"vertical_pixels\"] * max(list(camera_model[\"Bayer_mode_bpp\"].values())) / 8.0\n bytes_per_second = bytes_per_frame * frame_rate_Hz\n bytes_per_minute = bytes_per_second * 60\n gigs_per_minute = bytes_per_minute / (1024 ** 3)\n return gigs_per_minute\n\ndef get_max_number_of_frames(camera_model, flying_height_m, flying_speed_mps, front_overlap = 0.9, max_flight_time_m = 30):\n frame_rate_Hz = get_min_frame_rate_Hz(camera_model, flying_height_m, flying_speed_mps, front_overlap)\n max_number_of_frames = frame_rate_Hz * 60 * max_flight_time_m\n return max_number_of_frames\n\ndef get_line_spacing_from_side_lap(camera_model, flying_height_m, side_overlap = 0.7):\n fov_width_m, fov_height_m = get_fov_m(camera_model, flying_height_m)\n line_spacing = fov_width_m * (1 - side_overlap)\n return line_spacing\n\nwith open(\"cams_and_lenses.json\", \"r\") as fh:\n cams_and_lenses = json.loads(fh.read())\n\ncamera_id = 1\nlens_id = 1\n\ncamera_model = cams_and_lenses[\"cameras\"][camera_id] | cams_and_lenses[\"lenses\"][lens_id]\n\nmax_gigs_planned = 50\n\nprint(camera_model)\n\nspeed = 6.5\nheight = 25\nblur = 0.5\noverlap = 0.8\nside_lap = 0.7\nminutes_per_flight = 30\nmax_flight_time_m = 30\n\ntarget_gigs_total = max_flight_time_m * get_gigs_per_minute(camera_model=camera_model, flying_height_m=height, flying_speed_mps=speed, front_overlap=overlap)\n\nif target_gigs_total > max_gigs_planned:\n available_bpp = sorted(list(camera_model[\"Bayer_mode_bpp\"].values()), reverse=True)\n for bpp in available_bpp[1:]:\n if (bpp / max(available_bpp) * target_gigs_total <= max_gigs_planned) or (bpp == min(list(camera_model[\"Bayer_mode_bpp\"].values()))):\n target_bpp = bpp\n break\nelse:\n #target_bayer_format = 12\n target_bpp = max(list(camera_model[\"Bayer_mode_bpp\"].values()))\ninv_bayer = {v: k[2:] for k, v in camera_model[\"Bayer_mode_bpp\"].items()}\ntarget_bayer_format = inv_bayer[target_bpp]\n\n\nprint (f\"speed: {speed} m/s\\nheight: {height} m\\nblur: {blur*100}%\\nfront overlap: {overlap*100}%\\nside overlap: {side_lap*100}%\")\nprint (f\"total area (m*m): {get_fov_m(camera_model=camera_model, flying_height_m=height)}\")\nprint (f\"pixel size: {get_gsd_m(camera_model=camera_model, flying_height_m=height)[0]*1000:.1f} mm\")\nexposure_max = get_max_exp_time_ms(camera_model=camera_model, flying_height_m=height, flying_speed_mps = speed, max_blur = blur)\nprint (f\"max exposure: {exposure_max:.2f}ms (1/{1000/exposure_max:.0f}s)\")\nprint (f\"min framerate, Hz: {get_min_frame_rate_Hz(camera_model=camera_model, flying_height_m=height, flying_speed_mps = speed, front_overlap = overlap):.2f}\")\nprint (f\"gigs per {minutes_per_flight} minutes: {minutes_per_flight * get_gigs_per_minute(camera_model=camera_model, flying_height_m=height, flying_speed_mps = speed, front_overlap = overlap):.2f}\")\nprint (f\"max number of frames for {max_flight_time_m} minutes: {get_max_number_of_frames(camera_model=camera_model, flying_height_m=height, flying_speed_mps = speed, front_overlap = overlap, max_flight_time_m=max_flight_time_m):.0f}\")\nprint (f\"Maximum line spacing for {side_lap} side overlap: {get_line_spacing_from_side_lap(camera_model=camera_model, flying_height_m=height, side_overlap=side_lap):.1f} m\")\n\nprint(camera_model[\"Bayer_mode_bpp\"])\n\nprint(target_bpp, target_bayer_format)\n\n","sub_path":"08_data_logging/calc_camera_settings.py","file_name":"calc_camera_settings.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"277149968","text":"from __future__ import print_function\n\nimport sys\n\ntry:\n import zdeskcfg\nexcept ImportError:\n print('Could not import zdeskcfg, which is used to set up examples.')\n print('Please `pip install zdeskcfg`, or see the PyPI page:')\n print('https://pypi.python.org/pypi/zdeskcfg')\n sys.exit()\n\nfrom zdesk import Zendesk\n\n################################################################\n## NEW CONNECTION CLIENT\n################################################################\n# Create an object using the [zdesk] section of\n# ~/.zdeskcfg and the zdeskcfg module\n#zendesk = Zendesk(**zdeskcfg.get_ini_config())\n\n# Create an object using the [zdesk] and [sandbox] sections of\n# ~/.zdeskcfg and the zdeskcfg module\nzendesk = Zendesk(**zdeskcfg.get_ini_config(section='sandbox'))\n\n# Manually creating a new connection object\n#zendesk = Zendesk('https://yourcompany.zendesk.com', 'you@yourcompany.com', 'passwd')\n\n# Are you getting an error such as...\n# \"SSL routines:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed\"?\n#zendesk = Zendesk('https://yourcompany.zendesk.com', 'you@yourcompany.com', 'passwd',\n# client_args={\n# \"disable_ssl_certificate_validation\": True\n# }\n#)\n\n\n################################################################\n## TICKETS\n################################################################\n\n# List\nzendesk.tickets_list()\n\n# Create\nnew_ticket = {\n 'ticket': {\n 'requester_name': 'Howard Schultz',\n 'requester_email': 'howard@starbucks.com',\n 'subject':'My Starbucks coffee is cold!',\n 'description': 'please reheat my coffee',\n 'set_tags': 'coffee drinks',\n 'ticket_field_entries': [\n {\n 'ticket_field_id': 1,\n 'value': 'venti'\n },\n {\n 'ticket_field_id': 2,\n 'value': '$10'\n }\n ]\n }\n}\n\n# If a response results in returning a [location] header, then that\n# will be what is returned.\n# Create a ticket and get its URL.\nresult = zendesk.ticket_create(data=new_ticket)\n\n# Alternatively, you can get the complete response and get the location\n# yourself. This can be useful for getting other response items that are\n# not normally returned, such as result['content']['upload']['token']\n# when using zendesk.upload_create()\n#\n#result = zendesk.ticket_create(data=new_ticket, complete_response=True)\n#ticket_url = result['response']['location']\n#ticket_id = get_id_from_url(ticket_url)\n\n# Need ticket ID?\nfrom zendesk import get_id_from_url\nticket_id = get_id_from_url(ticket_url)\n\n# Show\nzendesk.ticket_show(id=ticket_id)\n\n# Delete\nzendesk.ticket_delete(id=ticket_id)\n\n\n################################################################\n## ORGANIZATIONS\n################################################################\n\n# List\nzendesk.organizations_list()\n\n# Create\nnew_org = {\n 'organization': {\n 'name': 'Starbucks Corp'\n }\n}\nresult = zendesk.organization_create(data=new_org)\norg_id = get_id_from_url(result)\n\n# Show\nzendesk.organization_show(id=org_id)\n\n# Delete\nzendesk.organization_delete(id=org_id)\n\n\n################################################################\n## USERS (AGENTS)\n################################################################\n\n# List\nzendesk.users_list()\n\n# Create\nnew_user = {\n 'user': {\n 'name': 'Howard Schultz',\n 'email': 'howard@starbucks.com',\n 'roles': 4,\n }\n}\nresult = zendesk.user_create(data=new_user, complete_response=True)\nuser_id = get_id_from_url(result)\n\n# Show\nzendesk.user_show(id=user_id)\n\n# Delete\nzendesk.user_delete(id=user_id)\n\n\n################################################################\n## GROUPS\n################################################################\n\n# List\nzendesk.groups_list()\n\n# Create\nnew_group = {\n 'group': {\n 'name': 'Starbucks Group',\n 'agents': [\n {\n 'agent': 123\n },\n ]\n }\n}\nresult = zendesk.group_create(data=new_group)\ngroup_id = get_id_from_url(result)\n\n# Show\nzendesk.group_show(id=group_id)\n\n# Delete\nzendesk.group_delete(id=group_id)\n\n\n################################################################\n## TAGS\n################################################################\n\n# List\nzendesk.tags_list()\n\n\n################################################################\n## TICKET TYPES\n################################################################\nzendesk.ticket_fields_list()\n\n\n################################################################\n## SEARCH\n################################################################\nresults = zendesk.search(query='type:ticket sort:desc', page=1)\n\n","sub_path":"examples/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"409023776","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\nfrom .forms import contratatatForm\n\ndef contratar(request):\n\ttitle='Contrar Usuario'\n\tform = contratatatForm(request.POST or None)\n\tcontext = {'title': title, 'form':form, }\n\t\n\n\tif form.is_valid():\n\t\tname = form.cleaned_data['nombre']\n\t\tname1=form.cleaned_data['telefono']\n\t\tname2=form.cleaned_data['direccion']\n\t\tname3=form.cleaned_data['fecha']\n\t\tcomment = form.cleaned_data['ciudad']\n\n\t\tprint ( form.cleaned_data['nombre'])\n\t\tsubject = 'Contratacion de Usuario'\n\t\tmessage = '%s %s %s %s %s' %(name,name1,name2,name3,comment)\n\t\temailFrom = form.cleaned_data['nombre']\n\t\temailTo = [settings.EMAIL_HOST_USER]\n\t\tsend_mail(subject, message, emailFrom, emailTo, fail_silently=True)\t\n\t\t\n\n\ttemplate = 'contratar/contratar.html'\n\treturn render(request,template,context)\n","sub_path":"apps/contratar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"326555344","text":"# -*- coding: utf-8 -*-\nfrom douban_spider import Douban_Spider\nfrom bili_spider import Bili_Spider\nfrom zhihu_spider import Zhihu_Spider\nimport json\nfrom datetime import datetime, timedelta\n\npath = 'data\\douban.json'\nbili_id = '20775815'\ndouban_id = '160308718'\n\ndef to_json(data, path):\n with open(path, 'w', encoding='utf-8', errors='ignore') as f:\n json.dump(data, f, ensure_ascii=False)\n print('Output to {0} have completed').format(path)\n\n\ndef print_format(favorites_container):\n for favorites in favorites_container:\n print('+-------{0}-{1}--------'.format(favorites['title'], favorites['count']))\n for index, item in enumerate(favorites['items']):\n print('|---{0}-{1}'.format(index, item['title']))\n\n\n\nif __name__ == \"__main__\":\n spider = Douban_Spider(douban_id)\n start = datetime.now()\n data = spider.start()\n end = datetime.now()\n print_format(data)\n print('爬取总用时:{0}'.format(end-start))","sub_path":"spriders/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"246793564","text":"#!/usr/bin/env python\n\nclass Interface():\n\n f = 0\n k = 0\n o = 0\n\n notes = []\n messages = {\n 'log':'--',\n 'progress':{\n 'name':'--',\n 'ratio':'--'\n },\n 'note':'-'\n }\n\n def install(this,f,k,o):\n this.log('Installed Interface')\n this.f = f\n this.k = k\n this.o = o\n\n def start(this):\n this.log('Ready.');\n\n def log(this,log, clear=False):\n print(log)\n\n def progress(this,name,ratio, clear=False):\n this.messages['progress']['name'] = name\n if ratio == 1:\n this.messages['progress']['ratio'] = \"Complete.\"\n else:\n this.messages['progress']['ratio'] = str(int(ratio * 100))\n\n def note(this,name):\n this.notes.append(name)\n this.messages['note'] = name\n this.log(\"Playing: %s\" % this.messages['note'])\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"308516858","text":"__author__ = 'apple'\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.utils import simplejson\nfrom models import *\nimport ast\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\n@ensure_csrf_cookie\ndef home(request):\n return render_to_response('home.html')\n\n@ensure_csrf_cookie\ndef authors(request, authorId=None):\n if request.method == 'POST':\n if request.META.has_key('HTTP_X_HTTP_METHOD_OVERRIDE'):\n author = Author.objects.get(id=authorId)\n id = author.id\n if request.META['HTTP_X_HTTP_METHOD_OVERRIDE'] == 'DELETE':\n author.delete()\n elif request.META['HTTP_X_HTTP_METHOD_OVERRIDE'] == 'PUT':\n postData = request.body\n postData = ast.literal_eval(postData)\n author.__dict__.update(postData)\n author.save()\n return HttpResponse(simplejson.dumps({'id': id}), mimetype='application/json')\n\n else:\n postData = request.body\n postData = ast.literal_eval(postData)\n author = Author()\n author.__dict__.update(postData)\n author.save()\n return HttpResponse(simplejson.dumps({'id': author.id}), mimetype='application/json')\n else:\n authors = Author.objects.values()\n return HttpResponse(simplejson.dumps(list(authors)), mimetype='application/json')\n\n\n","sub_path":"testBackbone/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"522678103","text":"\"\"\"\nFournit une classe \"EnsemblePoints\" permettant de stocker\nun ensemble de points a relier dans l'ordre pour les jeux de dessin\npour enfants.\n\"\"\"\nfrom iterateurs import cases, couples, triplets\n\nclass EnsemblePoints:\n \"\"\"\n ensemble de points a relier\n \"\"\"\n def __init__(self, points):\n self.points = points\n\n def eliminer_doublons(self):\n \"\"\"\n elimine les points consecutifs en double.\n exemple: [(0.0, 0.0), (1.0, 0.0), (1.0, 0.0), (2.0, 2.0), (0.0, 0.0)]\n devient [(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)].\n \"\"\"\n nv_points = [p1 for p1, p2 in couples(self.points) if p1 != p2]\n self.points = nv_points\n\n def coordonnees_max(self):\n \"\"\"\n renvoie les couples (x, y) de coordonnees maximales.\n par exemple, pour [(0.0, 0.0), (1.0, 3.0), (2.0, 2.0)]\n renvoie (2.0, 3.0).\n \"\"\"\n return max(cases(self.points, 0)), max(cases(self.points, 1))\n\n\n def aire_triangle(self,triangle):\n \"\"\"\n calcule l'aire d'un triangle\n \"\"\"\n def produit(couple):\n \"sous fonction qui calcule une partie de l'operation de calcul de l'aire\"\n return couple[0][0] * couple[1][1] - couple[0][1] * couple[1][0]\n return abs(sum(produit(couples(triangle))))/2\n\n def simplification(self, nombre_points):\n \"\"\"\n supprime des points jusqu'a ce qu'il ne reste plus que le\n nombre voulu.\n on considere pour chaque point le triplet qu'il forme\n avec son point precedant et son point suivant.\n chaque triplet forme un triangle d'une certaine aire.\n on elimine les points formant les plus petites aires.\n les points restants respectent l'ordre de depart.\n \"\"\"\n points_interets = []\n for i, e in enumerate(triplets(self.points)):\n for i_e, point in enumerate(e):\n points_interets.append((i+i_e,self.aire_triangle(e)))\n print(points_interets)\n return points_interets\n def svg_vide(self, nom_fichier):\n \"\"\"\n dessine les points a relier en svg (numerotes) dans le fichier donne.\n \"\"\"\n self.svg_traits(nom_fichier, 0)\n\n def svg_traits(self, nom_fichier, nombre):\n \"\"\"\n dessine les points a relier en svg (numerotes) dans le fichier donne.\n dessine egalement \"nombre\" traits, en partant du point 0.\n \"\"\"\n entete_svg = ''.\\\n format(self.coordonnees_max()[0] + 10, self.coordonnees_max()[1] + 10)+\"\\n\"\n arriere_plan_svg = ''.\\\n format(self.coordonnees_max()[0] + 10, self.coordonnees_max()[1] + 10) + \"\\n\"\n pied_svg = ''\n with open(nom_fichier, \"w\") as fichier_svg:\n fichier_svg.write(entete_svg)\n fichier_svg.write(arriere_plan_svg)\n for i, point in enumerate(self.points):\n fichier_svg.write('\\n'.\\\n format(point[0], point[1]))\n fichier_svg.write('{}\\n'.format(point[0], point[1], i))\n for nb_traits, couple in enumerate(couples(self.points)):\n if nb_traits < nombre:\n fichier_svg.write(''.\\\n format(couple[0][0], couple[0][1], couple[1][0], couple[1][1]))\n else:\n break\n fichier_svg.write(pied_svg)\n\n\ndef lecture_ensemble(nom_fichier):\n \"\"\"\n lit l'ensemble de points a partir d'un fichier (deux coordonnees par ligne)\n \"\"\"\n with open(nom_fichier) as fichier:\n return EnsemblePoints([\n tuple([float(x) for x in ligne.split()])\n for ligne in fichier\n ])\n\n\ndef ensemble_test():\n \"\"\"\n renvoie un ensemble contenant les trois points d'un petit triangle de test.\n \"\"\"\n return EnsemblePoints([(10.0, 10.0), (300.0, 200.0), (150.0, 400.0)])\n","sub_path":"semestre5/python/test/exam_mi_semestre2016/fichiers_fournis/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"37512711","text":"\"\"\"\nprototype Tile UI for Manufacturing to display pending corrections\n\"\"\"\n\nfrom datetime import datetime\nimport time\nfrom math import ceil\nfrom pathlib import Path\nfrom PIL.ImageTk import PhotoImage\nfrom tkinter import *\nimport pandas as pd\nimport numpy as np\nimport subprocess\nimport sys\nimport traceback\nimport os\nimport _thread as thread\nfrom file_read_backwards import FileReadBackwards\n\nfrom prototype_GDP_Tracker import Main\nfrom prototype_sql import SQL\n\n\nuser = os.getlogin()\npid = os.getpid()\n\n\nclass Tile(SQL):\n def __init__(self, root):\n self.win = root\n self.win.config(bg='dark gray')\n self.win.title('GDP TILE')\n \n self.counter = 1\n self.id = str(np.random.random())[:5] + ', '\n\n # Inherit SQL and read data from database\n SQL.__init__(self)\n\n # Default department view\n self.dept_list = [dept[0] for dept in self.sql_department_list()]\n self.dept_selection=StringVar()\n self.dept_selection.set(self.dept_list[1]) # Drug Product\n\n # Tile Properties\n self.compression_status = 'expanded'\n self.t_height = 200\n self.t_width = 200\n self.t_spacing = 10\n self.font = ('Arial', 12, 'bold')\n\n # Repeating data update\n self.df = self.sql_read() # Data pull at program initiation\n self.current_hash = self.sql_request_hash()[0] # Initialization hash\n\n self.status = False\n\n self.window()\n\n self.dataupdate()\n \n def compression_toggle(self):\n if self.compression_status == 'expanded':\n self.compression_status = 'collapsed'\n self.t_height = 100\n self.t_width = 100\n self.t_spacing = 10\n self.font = ('Arial', 8, 'bold')\n self.tiles()\n else:\n self.compression_status = 'expanded'\n self.t_height = 200\n self.t_width = 200\n self.t_spacing = 10\n self.font = ('Arial', 12, 'bold')\n self.tiles()\n\n def name_truncate(self, name):\n if self.compression_status == 'collapsed':\n split = name.split(', ')\n if len(split)==1:\n return name\n else:\n return split[1] + ' ' + split[0][0] # 'Watanabe, Takahiro' to 'Takahiro W'\n else:\n return name\n \n def trace_log(self, update=None):\n #f = open(Path('.')/'database'/'log.txt', 'a')\n f = open(Path(r'R:\\Common\\MFG\\Tools\\GDP Tracker\\database\\log.txt'), 'a')\n timestamp = str(datetime.now())\n status = 'alive'\n\n if not update:\n f.write(\"\\n%s, %s, %s, %s\" % (user, pid, timestamp, status))\n else:\n f.write(\"\\n%s, %s, %s, %s, %s\" % (user, pid, timestamp, status, update))\n f.close\n\n def dataupdate(self):\n try:\n database_hash = self.sql_request_hash()[0]\n if self.current_hash != database_hash:\n self.current_hash = database_hash\n self.df = self.sql_read() # Pull new data\n self.tiles() # Plot the tiles / image persistance\n self.trace_log(update='Update')\n self.trace_log()\n except:\n error_log = []\n while True:\n try:\n \"\"\"Network resumed, log error que\"\"\"\n #f = open(Path('.')/'database'/'log.txt', 'a')\n f = open(Path(r'R:\\Common\\MFG\\Tools\\GDP Tracker\\database\\log.txt'), 'a')\n for error in error_log:\n f.write(error)\n f.close()\n break\n except:\n \"\"\"Log network disruption every minute\"\"\"\n timestamp = str(datetime.now())\n error = \"\\n%s, %s, %s, %s\" % (user, pid, timestamp, 'disconnected')\n error_log.append(error)\n time.sleep(5)\n\n self.win.after(5000, self.dataupdate)\n \n def update(self, *args):\n self.searchterm = self.searchbox.get()\n self.tiles()\n\n def window(self):\n # Searchbox StringVar()\n self.searchbox = StringVar() # Persistant searchbox value\n self.searchterm = self.searchbox.get()\n self.searchbox.trace('w', self.update)\n\n # Compression & Filter Toolbar\n self.tool = Frame(\n self.win, \n bg='dark gray', \n height=20, \n highlightcolor='dark gray', \n highlightthickness=1, \n highlightbackground='dark gray', \n relief=GROOVE\n )\n self.tool.pack(\n side=BOTTOM, expand=NO, fill=X, padx=(8,25), pady=2\n )\n self.icon = PhotoImage(\n #file=str((Path('.')/'database'/'reqs'/'compression2.png').absolute())\n file=str(\n Path(r'R:\\Common\\MFG\\Tools\\GDP Tracker\\database\\reqs\\compression2.png').absolute()\n )\n )\n self.image = Button(\n self.tool, \n width=20, \n height=20, \n image=self.icon,\n command=self.compression_toggle\n )\n self.image.pack(side=LEFT, padx=2, pady=2)\n Entry(\n master=self.tool,\n textvariable=self.searchbox, \n bg='dark gray'\n ).pack(\n side=LEFT, expand=YES, fill=BOTH, padx=2\n )\n Button(\n master=self.tool,\n textvariable=self.dept_selection,\n command=self.toggle_dept,\n font=('Arial', 8, 'bold') \n ).pack(side=LEFT, expand=NO, fill=BOTH, padx=(2,0))\n\n # Scrollbar\n self.scrollbar = Scrollbar(self.win)\n self.scrollbar.pack(side=RIGHT, fill=Y)\n \n # Canvas\n self.canvas = Canvas(\n self.win, width=500, height=400, \n bg='dark grey', highlightthickness=0\n )\n self.canvas.pack(side=LEFT, expand=YES, fill=BOTH)\n self.canvas.bind(\"\", self.on_resize)\n self.height = self.canvas.winfo_reqheight()\n self.width = self.canvas.winfo_reqwidth()\n\n # Tie in scrolbar with canvas\n self.scrollbar.config(command=self.canvas.yview)\n self.canvas.config(yscrollcommand=self.scrollbar.set)\n\n # Canvas Scroll (mac only)\n handler = lambda event: self.canvas.yview_scroll(int(-0.01 * event.delta), 'units')\n self.canvas.bind_all('', handler) \n\n def toggle_dept(self):\n dept_selection_length = len(self.dept_list)\n current_index = self.dept_list.index(self.dept_selection.get())\n if (current_index + 1) >= dept_selection_length:\n self.dept_selection.set(self.dept_list[0])\n else:\n self.dept_selection.set(self.dept_list[current_index + 1])\n self.tiles()\n\n def reset(self):\n self.canvas.delete('all')\n self.canvas.destroy()\n self.scrollbar.destroy()\n self.tool.destroy()\n self.window()\n\n def on_resize(self, event):\n self.width = event.width\n self.height = event.height\n self.canvas.config(width=self.width, height=self.height)\n self.canvas.delete('all')\n col_count = self.tiles()\n canvSize =(0, 0, self.width, self.t_spacing + col_count * (self.t_height + self.t_spacing))\n self.canvas.config(scrollregion=canvSize)\n\n def tiles(self):\n # Start fresh\n self.canvas.delete('all')\n\n df = self.df.copy()\n if len(df)==0:\n return None\n\n # Find the canvas width\n width = self.width\n height = self.height\n\n #Filter by department\n df = df[\n df['DEPARTMENTS'] == self.dept_selection.get()\n ]\n\n # Count the number of requests\n df.sort_values(by=['DUE_DATE'], inplace=True)\n req = [req for req in df['REQUEST_ID'].drop_duplicates()]\n num_req = len(req)\n \n # Calculate how many to plot per row\n num_per_row = int(width/(self.t_width+self.t_spacing))\n num_per_col = int(ceil(num_req/num_per_row)) # Skip if divide by 0\n \n\n # Filter and create que to be plotted\n textlist = []\n for request_id in req:\n df_req = df[\n df['REQUEST_ID']==request_id]\n batchid = df_req['BATCH_ID'].drop_duplicates().values[0]\n requestor = self.name_truncate(df_req['REQUESTOR'].drop_duplicates().values[0])\n date = df_req['DUE_DATE'].drop_duplicates().values[0]\n req_list = [self.name_truncate(name) for name in df_req['REQUESTED'].drop_duplicates()]\n requested = '\\n'.join(req_list)\n\n color = self.colorcode(date) # color code the due date\n text = (requested+'\\n\\n'+batchid+'\\n'+'DUE: '+date+'\\n'+requestor, request_id, color) # create final display text\n if self.searchterm.lower() in text[0].lower():\n textlist.append(text)\n\n row = 10\n col = 10\n \n while textlist:\n reqrow, textlist = textlist[:num_per_row], textlist[num_per_row:]\n for text, request_id, color in reqrow:\n link = Button(self.canvas,\n text=text,\n font=self.font,\n bg=color,\n width=self.t_width,\n height=self.t_height,\n relief=GROOVE,\n command=(\n lambda req=request_id: self.opentile(req)\n )\n )\n link.pack(side=LEFT, expand=YES)\n self.canvas.create_window(\n col, \n row, \n anchor=NW,\n window=link, \n width=self.t_width, \n height=self.t_height\n )\n col += (self.t_width + self.t_spacing)\n row += (self.t_height + self.t_spacing)\n col = 10\n \n if self.canvas.find_all()[0] > 1000: #Reset the canvas to prevent leak\n self.reset()\n \n return num_per_col\n\n def opentile(self, req):\n x = self.win.winfo_x()\n y = self.win.winfo_y()\n win = Toplevel()\n win.geometry('850x600+%s+%s'%(x, y))\n Main(win, req)\n win.wait_window()\n\n # Re-define the scroll handler\n handler = lambda event: self.canvas.yview_scroll(\n int(-0.01 * event.delta), 'units')\n self.canvas.bind_all('', handler) \n\n def colorcode(self, date):\n today = datetime.now()\n due = datetime.strptime(date, '%Y-%m-%d')\n difference = (due - today).days\n\n palettes = {\n 'Traditional': {'Green': '#60A917',\n 'Amber': '#F0A30A',\n 'Red' : '#d50032'},\n 'Trad Modif': {'Green': '#27AE60',\n 'Amber': '#F39C12',\n 'Red' : '#C0392B'},\n 'Flat': {'Green': '#27AE60',\n 'Amber': '#D35400',\n 'Red' : '#C0392B'},\n 'Flat Yellow': {'Green': '#27AE60',\n 'Amber': '#F1C40F',\n 'Red' : '#C0392B'},\n 'Flat Bright': {'Green': '#0BE881',\n 'Amber': '#FFA801',\n 'Red' : '#FF3F34'} \n }\n sel = 'Trad Modif'\n\n color = palettes[sel]['Green'] # Green\n if difference <= 14:\n color = palettes[sel]['Amber'] # Amber\n if difference <= 7: \n color = palettes[sel]['Red'] # Red\n\n return color\n\ndef heartbeat(self):\n \"\"\"\n Retrieve the latest log, matching the user & PID. Once current timestamp\n and the last log timestamp difference exceed more than 10 seconds\n spawn a new process, and kill the current process\n \"\"\"\n path = (Path('.')/'database'/'log.txt').absolute()\n path = Path(r'R:\\Common\\MFG\\Tools\\GDP Tracker\\database\\log.txt').absolute()\n\n def kill_spawn():\n \"\"\"Spawn a new process and destroy the current one\"\"\"\n base = os.path.basename(__file__)\n filename = os.path.splitext(base)[0] + '.exe'\n subprocess.Popen(filename, shell=True)\n root.destroy()\n\n while True:\n time.sleep(5)\n try:\n \"\"\"Normal Network Activity\"\"\"\n with FileReadBackwards(path, encoding='utf-8') as frb:\n line = frb.readline()\n if line:\n log_user, log_pid, log_timestamp, log_status, *misc = line.split(',')\n if log_user.strip()==user and log_pid.strip()==str(pid):\n delta = datetime.now() - datetime.strptime(\n log_timestamp.strip(), '%Y-%m-%d %H:%M:%S.%f')\n if delta.seconds > 60:\n kill_spawn()\n else:\n pass\n else:\n pass\n except:\n pass\n\n\nif __name__ == '__main__':\n root = Tk()\n root.geometry('656x675+0+0')\n main = Tile(root)\n\n #watch = thread.start_new_thread(heartbeat, (1,)) #parallel monitor thread\n root.mainloop()","sub_path":"prototype_Tile.py","file_name":"prototype_Tile.py","file_ext":"py","file_size_in_byte":13449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"120191077","text":"import http.client\nimport time\nfrom datetime import datetime\n\nfrom mongoengine import connect, fields, Document\n\n# Creating connection\nmongo_database = connect(authentication_source='admin',\n host='mongodb://3.120.160.125/test',\n port=27017,\n alias='main',\n serverSelectionTimeoutMS=5000\n )\nheaders = {\"accept\": \"application/json\", \"x-cr-api-token\": \"e7f6e462a0e84cbab0dcae505cf54239\",\n \"Authorization\": \"Basic SEFYXENyZWF0aXZlTWVkdXNhOmVYeVQ5RiUzK0lvWXkmNw==\"}\nconnection = http.client.HTTPSConnection('things.eu-1.bosch-iot-suite.com')\n\n\n# The data model\n\nclass SmartThing(Document):\n thingId = fields.StringField(required=True)\n attributes = fields.DictField(required=True)\n energyConsumption = fields.FloatField(required=True)\n powerConsumption = fields.FloatField(required=True)\n lastSeen = fields.DateTimeField(required=True)\n switchState = fields.StringField(required=True)\n time = fields.DateTimeField(required=True)\n meta = {'db_alias': 'main'}\n\n\nclass CarbonThing(Document):\n thingId = fields.StringField(required=True)\n attributes = fields.DictField(required=True)\n purity = fields.FloatField(required=True)\n temperature = fields.FloatField(required=True)\n humidity = fields.FloatField(required=True)\n lastSeen = fields.DateTimeField(required=True)\n time = fields.DateTimeField(required=True)\n meta = {'db_alias': 'main'}\n\n\nSMARTmacadresses = [\"3AHomeMaticIP%3A3014F711A0000495385ABCCE\",\n \"3AHomeMaticIP%3A3014F711A0000495385A11FB\",\n \"3AHomeMaticIP%3A3014F711A0000495385A110B\"]\n\nCO2macadresses = [\"3AZigBee%3A000d6f000cb919c4\"]\n\n\ndef getData(devicemac, carbon):\n import json\n connection.request(\"GET\", \"/api/2/things/com.bosch.bcx2019%3AHOME-hdm%\" + devicemac,\n None,\n headers)\n response = connection.getresponse()\n json = json.loads(response.read())\n\n if carbon:\n mongo_object = {\n \"thingId\": json['thingId'],\n \"attributes\": json[\"attributes\"],\n \"purity\": json['features']['airQuality']['properties']['status']['value']['purity'],\n \"temperature\": json['features']['airQuality']['properties']['status']['value']['temperature'],\n \"humidity\": json['features']['airQuality']['properties']['status']['value']['humidity'],\n 'lastSeen': datetime.fromtimestamp(\n json['features']['connectivity']['properties']['status']['value']['lastSeen'] / 1e3),\n 'time': datetime.now\n }\n r = CarbonThing(**mongo_object).save()\n # # Printing ID\n print(r.id)\n elif not carbon:\n mongo_object = {\n \"thingId\": json['thingId'],\n \"attributes\": json[\"attributes\"],\n \"energyConsumption\": json['features']['powerMeter']['properties']['status']['value']['energyConsumption'],\n \"powerConsumption\": json['features']['powerMeter']['properties']['status']['value']['powerConsumption'],\n 'switchState': json['features']['powerSwitch']['properties']['status']['value']['switchState'],\n 'lastSeen': datetime.fromtimestamp(\n json['features']['connectivity']['properties']['status']['value']['lastSeen'] / 1e3),\n 'time': datetime.now\n }\n r = SmartThing(**mongo_object).save()\n # # Printing ID\n print(r.id)\n\n\nwhile True:\n for address in SMARTmacadresses:\n getData(address, False)\n for address in CO2macadresses:\n getData(address, True)\n time.sleep(5)\n","sub_path":"analysis/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"410092720","text":"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Train and Eval DDPG.\n\nTo run:\n\n```bash\ntensorboard --logdir $HOME/tmp/ddpg/gym/HalfCheetah-v2/ --port 2223 &\n\npython tf_agents/agents/ddpg/examples/v2/train_eval.py \\\n --root_dir=$HOME/tmp/ddpg/gym/HalfCheetah-v2/ \\\n --num_iterations=2000000 \\\n --alsologtostderr\n```\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\nimport time\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport gin\nfrom six.moves import range\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nfrom tf_agents.agents.ddpg import ddpg_agent\nfrom tf_agents.drivers import dynamic_step_driver\nfrom tf_agents.environments import parallel_py_environment\nfrom tf_agents.environments import suite_mujoco\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.eval import metric_utils\nfrom tf_agents.keras_layers import inner_reshape\nfrom tf_agents.metrics import tf_metrics\nfrom tf_agents.networks import nest_map\nfrom tf_agents.networks import sequential\nfrom tf_agents.replay_buffers import tf_uniform_replay_buffer\nfrom tf_agents.utils import common\n\n\nflags.DEFINE_string(\n 'root_dir',\n os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),\n 'Root directory for writing logs/summaries/checkpoints.',\n)\nflags.DEFINE_integer(\n 'num_iterations', 100000, 'Total number train/eval iterations to perform.'\n)\nflags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')\nflags.DEFINE_multi_string('gin_param', None, 'Gin binding parameters.')\n\n\nFLAGS = flags.FLAGS\n\n\n@gin.configurable\ndef train_eval(\n root_dir,\n env_name='HalfCheetah-v2',\n eval_env_name=None,\n env_load_fn=suite_mujoco.load,\n num_iterations=2000000,\n actor_fc_layers=(400, 300),\n critic_obs_fc_layers=(400,),\n critic_action_fc_layers=None,\n critic_joint_fc_layers=(300,),\n # Params for collect\n initial_collect_steps=1000,\n collect_steps_per_iteration=1,\n num_parallel_environments=1,\n replay_buffer_capacity=100000,\n ou_stddev=0.2,\n ou_damping=0.15,\n # Params for target update\n target_update_tau=0.05,\n target_update_period=5,\n # Params for train\n train_steps_per_iteration=1,\n batch_size=64,\n actor_learning_rate=1e-4,\n critic_learning_rate=1e-3,\n dqda_clipping=None,\n td_errors_loss_fn=tf.compat.v1.losses.huber_loss,\n gamma=0.995,\n reward_scale_factor=1.0,\n gradient_clipping=None,\n use_tf_functions=True,\n # Params for eval\n num_eval_episodes=10,\n eval_interval=10000,\n # Params for checkpoints, summaries, and logging\n log_interval=1000,\n summary_interval=1000,\n summaries_flush_secs=10,\n debug_summaries=False,\n summarize_grads_and_vars=False,\n eval_metrics_callback=None,\n):\n \"\"\"A simple train and eval for DDPG.\"\"\"\n root_dir = os.path.expanduser(root_dir)\n train_dir = os.path.join(root_dir, 'train')\n eval_dir = os.path.join(root_dir, 'eval')\n\n train_summary_writer = tf.compat.v2.summary.create_file_writer(\n train_dir, flush_millis=summaries_flush_secs * 1000\n )\n train_summary_writer.set_as_default()\n\n eval_summary_writer = tf.compat.v2.summary.create_file_writer(\n eval_dir, flush_millis=summaries_flush_secs * 1000\n )\n eval_metrics = [\n tf_metrics.AverageReturnMetric(buffer_size=num_eval_episodes),\n tf_metrics.AverageEpisodeLengthMetric(buffer_size=num_eval_episodes),\n ]\n\n global_step = tf.compat.v1.train.get_or_create_global_step()\n with tf.compat.v2.summary.record_if(\n lambda: tf.math.equal(global_step % summary_interval, 0)\n ):\n if num_parallel_environments > 1:\n tf_env = tf_py_environment.TFPyEnvironment(\n parallel_py_environment.ParallelPyEnvironment(\n [lambda: env_load_fn(env_name)] * num_parallel_environments\n )\n )\n else:\n tf_env = tf_py_environment.TFPyEnvironment(env_load_fn(env_name))\n eval_env_name = eval_env_name or env_name\n eval_tf_env = tf_py_environment.TFPyEnvironment(env_load_fn(eval_env_name))\n\n actor_net = create_actor_network(actor_fc_layers, tf_env.action_spec())\n critic_net = create_critic_network(\n critic_obs_fc_layers, critic_action_fc_layers, critic_joint_fc_layers\n )\n\n tf_agent = ddpg_agent.DdpgAgent(\n tf_env.time_step_spec(),\n tf_env.action_spec(),\n actor_network=actor_net,\n critic_network=critic_net,\n actor_optimizer=tf.compat.v1.train.AdamOptimizer(\n learning_rate=actor_learning_rate\n ),\n critic_optimizer=tf.compat.v1.train.AdamOptimizer(\n learning_rate=critic_learning_rate\n ),\n ou_stddev=ou_stddev,\n ou_damping=ou_damping,\n target_update_tau=target_update_tau,\n target_update_period=target_update_period,\n dqda_clipping=dqda_clipping,\n td_errors_loss_fn=td_errors_loss_fn,\n gamma=gamma,\n reward_scale_factor=reward_scale_factor,\n gradient_clipping=gradient_clipping,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=global_step,\n )\n tf_agent.initialize()\n\n train_metrics = [\n tf_metrics.NumberOfEpisodes(),\n tf_metrics.EnvironmentSteps(),\n tf_metrics.AverageReturnMetric(),\n tf_metrics.AverageEpisodeLengthMetric(),\n ]\n\n eval_policy = tf_agent.policy\n collect_policy = tf_agent.collect_policy\n\n replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(\n tf_agent.collect_data_spec,\n batch_size=tf_env.batch_size,\n max_length=replay_buffer_capacity,\n )\n\n initial_collect_driver = dynamic_step_driver.DynamicStepDriver(\n tf_env,\n collect_policy,\n observers=[replay_buffer.add_batch],\n num_steps=initial_collect_steps,\n )\n\n collect_driver = dynamic_step_driver.DynamicStepDriver(\n tf_env,\n collect_policy,\n observers=[replay_buffer.add_batch] + train_metrics,\n num_steps=collect_steps_per_iteration,\n )\n\n if use_tf_functions:\n initial_collect_driver.run = common.function(initial_collect_driver.run)\n collect_driver.run = common.function(collect_driver.run)\n tf_agent.train = common.function(tf_agent.train)\n\n # Collect initial replay data.\n logging.info(\n 'Initializing replay buffer by collecting experience for %d steps with '\n 'a random policy.',\n initial_collect_steps,\n )\n initial_collect_driver.run()\n\n results = metric_utils.eager_compute(\n eval_metrics,\n eval_tf_env,\n eval_policy,\n num_episodes=num_eval_episodes,\n train_step=global_step,\n summary_writer=eval_summary_writer,\n summary_prefix='Metrics',\n )\n if eval_metrics_callback is not None:\n eval_metrics_callback(results, global_step.numpy())\n metric_utils.log_metrics(eval_metrics)\n\n time_step = None\n policy_state = collect_policy.get_initial_state(tf_env.batch_size)\n\n timed_at_step = global_step.numpy()\n time_acc = 0\n\n # Dataset generates trajectories with shape [Bx2x...]\n dataset = replay_buffer.as_dataset(\n num_parallel_calls=3, sample_batch_size=batch_size, num_steps=2\n ).prefetch(3)\n iterator = iter(dataset)\n\n def train_step():\n experience, _ = next(iterator)\n return tf_agent.train(experience)\n\n if use_tf_functions:\n train_step = common.function(train_step)\n\n for _ in range(num_iterations):\n start_time = time.time()\n time_step, policy_state = collect_driver.run(\n time_step=time_step,\n policy_state=policy_state,\n )\n for _ in range(train_steps_per_iteration):\n train_loss = train_step()\n time_acc += time.time() - start_time\n\n if global_step.numpy() % log_interval == 0:\n logging.info(\n 'step = %d, loss = %f', global_step.numpy(), train_loss.loss\n )\n steps_per_sec = (global_step.numpy() - timed_at_step) / time_acc\n logging.info('%.3f steps/sec', steps_per_sec)\n tf.compat.v2.summary.scalar(\n name='global_steps_per_sec', data=steps_per_sec, step=global_step\n )\n timed_at_step = global_step.numpy()\n time_acc = 0\n\n for train_metric in train_metrics:\n train_metric.tf_summaries(\n train_step=global_step, step_metrics=train_metrics[:2]\n )\n\n if global_step.numpy() % eval_interval == 0:\n results = metric_utils.eager_compute(\n eval_metrics,\n eval_tf_env,\n eval_policy,\n num_episodes=num_eval_episodes,\n train_step=global_step,\n summary_writer=eval_summary_writer,\n summary_prefix='Metrics',\n )\n if eval_metrics_callback is not None:\n eval_metrics_callback(results, global_step.numpy())\n metric_utils.log_metrics(eval_metrics)\n\n return train_loss\n\n\ndense = functools.partial(\n tf.keras.layers.Dense,\n activation=tf.keras.activations.relu,\n kernel_initializer=tf.compat.v1.variance_scaling_initializer(\n scale=1.0 / 3.0, mode='fan_in', distribution='uniform'\n ),\n)\n\n\ndef create_identity_layer():\n return tf.keras.layers.Lambda(lambda x: x)\n\n\ndef create_fc_network(layer_units):\n return sequential.Sequential([dense(num_units) for num_units in layer_units])\n\n\ndef create_actor_network(fc_layer_units, action_spec):\n \"\"\"Create an actor network for DDPG.\"\"\"\n flat_action_spec = tf.nest.flatten(action_spec)\n if len(flat_action_spec) > 1:\n raise ValueError('Only a single action tensor is supported by this network')\n flat_action_spec = flat_action_spec[0]\n\n fc_layers = [dense(num_units) for num_units in fc_layer_units]\n\n num_actions = flat_action_spec.shape.num_elements()\n action_fc_layer = tf.keras.layers.Dense(\n num_actions,\n activation=tf.keras.activations.tanh,\n kernel_initializer=tf.keras.initializers.RandomUniform(\n minval=-0.003, maxval=0.003\n ),\n )\n\n scaling_layer = tf.keras.layers.Lambda(\n lambda x: common.scale_to_spec(x, flat_action_spec)\n )\n return sequential.Sequential(fc_layers + [action_fc_layer, scaling_layer])\n\n\ndef create_critic_network(\n obs_fc_layer_units, action_fc_layer_units, joint_fc_layer_units\n):\n \"\"\"Create a critic network for DDPG.\"\"\"\n\n def split_inputs(inputs):\n return {'observation': inputs[0], 'action': inputs[1]}\n\n obs_network = (\n create_fc_network(obs_fc_layer_units)\n if obs_fc_layer_units\n else create_identity_layer()\n )\n action_network = (\n create_fc_network(action_fc_layer_units)\n if action_fc_layer_units\n else create_identity_layer()\n )\n joint_network = (\n create_fc_network(joint_fc_layer_units)\n if joint_fc_layer_units\n else create_identity_layer()\n )\n value_fc_layer = tf.keras.layers.Dense(\n 1,\n activation=None,\n kernel_initializer=tf.keras.initializers.RandomUniform(\n minval=-0.003, maxval=0.003\n ),\n )\n\n return sequential.Sequential([\n tf.keras.layers.Lambda(split_inputs),\n nest_map.NestMap({'observation': obs_network, 'action': action_network}),\n nest_map.NestFlatten(),\n tf.keras.layers.Concatenate(),\n joint_network,\n value_fc_layer,\n inner_reshape.InnerReshape([1], []),\n ])\n\n\ndef main(_):\n tf.compat.v1.enable_v2_behavior()\n logging.set_verbosity(logging.INFO)\n gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)\n train_eval(FLAGS.root_dir, num_iterations=FLAGS.num_iterations)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('root_dir')\n app.run(main)\n","sub_path":"tf_agents/agents/ddpg/examples/v2/train_eval.py","file_name":"train_eval.py","file_ext":"py","file_size_in_byte":12269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"452006882","text":"#!/usr/bin/python\n\n\n# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.\n#\n\n\"\"\"\nThis file contains utility functions for sending prouter and job objectlogs\nvia sandesh\n\"\"\"\n\nimport uuid\nimport time\nimport json\n\nfrom job_manager.job_log_utils import JobLogUtils\n\n\ndef validate_job_ctx(job_ctx, results):\n if job_ctx.get('config_args') is None:\n results['msg'] = \"Sandesh args not present in job_ctx\"\n results['failed'] = True\n elif job_ctx.get('job_template_fqname') is None:\n results['msg'] = \"Job template fqname not present in job_ctx\"\n results['failed'] = True\n elif job_ctx.get('job_execution_id') is None:\n results['msg'] = \"Job execution id not present in job_ctx\"\n results['failed'] = True\n elif job_ctx.get('job_input') is None:\n results['msg'] = \"Job input not present in job_ctx\"\n results['failed'] = True\n\n return results\n\n\ndef send_prouter_object_log(prouter_fqname,\n job_ctx,\n os_version,\n serial_num,\n onboarding_state):\n results = {}\n results['failed'] = False\n\n results = validate_job_ctx(job_ctx, results)\n if results['failed']:\n return results\n\n try:\n job_log_util = JobLogUtils(\n sandesh_instance_id=str(\n uuid.uuid4()), config_args=json.dumps(\n job_ctx['config_args']))\n job_log_util.send_prouter_object_log(\n prouter_fqname,\n job_ctx['job_execution_id'],\n json.dumps(job_ctx['job_input']),\n job_ctx['job_template_fqname'],\n onboarding_state,\n os_version,\n serial_num)\n time.sleep(10)\n except Exception as ex:\n msg = \"Failed to create following device log due to error: %s\\n\\t \\\n job name: %s\\n\\t \\\n job execution id: %s\\n\\t \\\n device name: %s\\n\\t \\\n onboarding_state: %s\\n\" \\\n % (str(ex), job_ctx['job_template_fqname'],\n job_ctx['job_execution_id'], str(prouter_fqname),\n onboarding_state)\n results['msg'] = msg\n results['failed'] = True\n\n return results\n\n\ndef send_job_object_log(job_ctx,\n message,\n status,\n result):\n results = {}\n results['failed'] = False\n\n results = validate_job_ctx(job_ctx, results)\n if results['failed']:\n return results\n\n try:\n job_log_util = JobLogUtils(\n sandesh_instance_id=str(\n uuid.uuid4()), config_args=json.dumps(\n job_ctx['config_args']))\n job_log_util.send_job_log(\n job_ctx['job_template_fqname'],\n job_ctx['job_execution_id'],\n message,\n status,\n result)\n time.sleep(10)\n except Exception as ex:\n msg = \"Failed to create following job log due to error: %s\\n\\t \\\n job name: %s\\n\\t \\\n job execution id: %s\\n\\t \\\n job status: %s\\n\\t, \\\n log message: %s\\n\" \\\n % (str(ex), job_ctx['job_template_fqname'],\n job_ctx['job_execution_id'], status, message)\n\n results['msg'] = msg\n results['failed'] = True\n\n return results\n","sub_path":"src/config/fabric-ansible/ansible-playbooks/module_utils/sandesh_log_utils.py","file_name":"sandesh_log_utils.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"614926468","text":"#!/usr/bin/env python3\n\nfrom pathlib import Path\nimport cv2\nimport depthai as dai\nimport numpy as np\nimport time\nfrom datetime import datetime\nimport pickle\nfrom datetime import date\n\nnnPath = str((Path(__file__).parent / Path('./models/OpenVINO_2021_2/fresh_rotten_mobilenetssdv2_openvino_2021.3_6shave.blob')).resolve().absolute())\n\n# MobilenetSSD class labels\nlabelMap = [\"background\", \"Fresh\", \"Rotten\"]\n\nfresh_count = 0\nrotten_count = 0\ndata = {}\ndata['record'] = []\ntoday = date.today()\n\n# Sart defining a pipeline\npipeline = dai.Pipeline()\n\n# Define a source - color camera\ncam = pipeline.createColorCamera()\ncam.setPreviewSize(300, 300)\ncam.setInterleaved(False)\n\n# Define a neural network that will make predictions based on the source frames\n# DetectionNetwork class produces ImgDetections message that carries parsed\n# detection results.\nnn = pipeline.createMobileNetDetectionNetwork()\nnn.setBlobPath(nnPath)\n\n#nn.setConfidenceThreshold(0.7)\nnn.setConfidenceThreshold(0.5)\nnn.setNumInferenceThreads(2)\nnn.input.setBlocking(False)\n\ncam.preview.link(nn.input)\n\n# Create XlinkOut nodes\nxoutFrame = pipeline.createXLinkOut()\nxoutFrame.setStreamName(\"rgb\")\ncam.preview.link(xoutFrame.input)\n\nxoutNN = pipeline.createXLinkOut()\nxoutNN.setStreamName(\"nn\")\nnn.out.link(xoutNN.input)\n\n# Pipeline defined, now the device is connected to\nwith dai.Device(pipeline) as device:\n\n # Start pipeline\n device.startPipeline()\n\n # Output queues will be used to get the rgb frames and nn data from the\n # output streams defined above.\n qRgb = device.getOutputQueue(name=\"rgb\", maxSize=4, blocking=False)\n qDet = device.getOutputQueue(name=\"nn\", maxSize=4, blocking=False)\n\n startTime = time.monotonic()\n counter = 0\n detections = []\n frame = None\n\n # nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height\n def frameNorm(frame, bbox):\n normVals = np.full(len(bbox), frame.shape[0])\n normVals[::2] = frame.shape[1]\n return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)\n\n def displayFrame(name, frame, fr_count=[]):\n for detection in detections:\n bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)\n cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n cv2.putText(frame, f\"{int(detection.confidence * 100)}%\", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n fr_count.append(labelMap[detection.label])\n cv2.imshow(name, frame)\n return fr_count\n\n while True:\n\n inRgb = qRgb.tryGet()\n inDet = qDet.tryGet()\n\n if inRgb is not None:\n frame = inRgb.getCvFrame()\n cv2.putText(frame, \"NN fps: {:.2f}\".format(counter / (time.monotonic() - startTime)),\n (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color=(255, 255, 255))\n\n if inDet is not None:\n detections = inDet.detections\n counter += 1\n\n # if the frame is available, render detection data on frame and display.\n if frame is not None:\n fr_count = displayFrame(\"rgb\", frame)\n for fr in fr_count:\n if fr == \"Fresh\":\n fresh_count += 1\n elif fr == \"Rotten\":\n rotten_count += 1\n try:\n frame = None\n fresh_count_p = fresh_count / (fresh_count + rotten_count) * 100\n rotten_count_p = rotten_count / (fresh_count + rotten_count) * 100\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(\" We have {}% of fresh fruits and {}% of rotten fruits.\"\n .format(fresh_count_p,rotten_count_p))\n #print(now)\n #print(current_time)\n\n #save these things in CSV and average the values then send a json\n data['record'].append({\n 'date': str(today),\n 'time': str(current_time),\n 'Fresh_percentage': fresh_count_p,\n 'Rotten_percentage': rotten_count_p\n })\n #print(data)\n with open('data.pickle', 'wb') as outfile:\n pickle.dump(data, outfile)\n except:\n pass\n\n if cv2.waitKey(1) == ord('q'):\n break\n","sub_path":"oakd-files/mobilenetssd_freshrot.py","file_name":"mobilenetssd_freshrot.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"319322185","text":"\"Droplet Helper Funs\"\nimport subprocess\nfrom contextlib import contextmanager\nfrom os import chdir, getcwd\nfrom os.path import dirname, expanduser, expandvars, join\nfrom sys import executable\nfrom tempfile import TemporaryDirectory\nfrom typing import Iterable\n\nfrom digitalocean import Droplet, Manager\n\nANSIBLE = join(dirname(executable), \"ansible-playbook\")\n\n\nclass DropletNotFound(Exception):\n \"Droplet Not Found Error\"\n\n\ndef expandall(string):\n \"Expand user home and env vars in string\"\n return expanduser(expandvars(string))\n\n\ndef find_droplet_by_name(droplet_name: str, access_token: str) -> Droplet:\n \"\"\"\n Find droplet by name. Finds the first one by name, I think from\n any project.\n\n :param droplet_name: The name of the droplet to find.\n :param manager: A Manager object to use.\n\n \"\"\"\n manager = Manager(token=access_token)\n\n try:\n return next(\n droplet\n for droplet in manager.get_all_droplets()\n if droplet.name == droplet_name\n )\n except StopIteration:\n raise DropletNotFound(f\"Droplet {droplet_name} not found.\") from None\n\n\ndef rsync(source, dest, include: Iterable[str] = ()):\n \"Runs rsync in a subprocess\"\n\n if include is None or len(include) == 0:\n inc_params = '--exclude=\"*.part\"'\n else:\n inc_params = \" \".join(f'\"--include={i}\"' for i in include) + ' --exclude=\"*\"'\n\n subprocess.run(\n f\"rsync -ruv '{source}' '{dest}' {inc_params} --progress\",\n check=True,\n shell=True,\n )\n\n\ndef ansible_playbook(inventory_file, playbook_path):\n \"Call ansible-playbook with inventory and playbook files.\"\n subprocess.run(\n f\"{ANSIBLE} -i {inventory_file} -u root {playbook_path}\",\n shell=True,\n check=True,\n )\n\n\n@contextmanager\ndef in_tempdir():\n \"Change to a temporary directory and then back again\"\n go_back_to = getcwd()\n with TemporaryDirectory() as tempdir:\n chdir(tempdir)\n try:\n yield tempdir\n finally:\n chdir(go_back_to)\n","sub_path":"torrentdoer/droplet_helper.py","file_name":"droplet_helper.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"448128009","text":"#####################################\n# Codes from:\n#\n# https://shizenkarasuzon.hatenablog.com/entry/2018/08/27/002812\n#\n# Modified by Chai Jiazheng\n# E-mail: chai.jiazheng.q1@dc.tohoku.ac.jp\n#\n# 01/07/2019\n#\n#######################################\nPI=3.14159265359\nimport numpy as np\n\nclass PID:\n def __init__(self, P=200, I=0.1, D=0.5,delta_time=0.04,target_pos=0):\n self.Kp = P # P-gain (Cartesian space)\n self.Ki = I # D-gain (joint space)\n self.Kd = D # Link 2 length [m]\n\n self.l1 = 0.1\n self.l2 = 0.1\n self.targetPos = target_pos\n self.delta_time=delta_time #Smallest timestep of the simulation\n self.clear()\n #print(delta_time)\n\n def clear(self):\n self.PTerm = 0\n self.ITerm = 0\n self.DTerm = 0\n self.last_error = 0\n\n # Windup Guard\n self.windup_guard = 20.0\n self.output = 0.0\n\n def update(self, feedback_value):\n\n # Feedback value takes the target position as its reference.\n # For example, if target position is 0, then when the pole\n # tilts towards the rightside of the z-axis, its value is positive.\n # Else, its value is negative.\n\n # Extract values\n c1 = feedback_value[0] # cos(theta1)\n c2 = feedback_value[1] # cos(theta2)\n s1 = feedback_value[2] # sin(theta1)\n s2 = feedback_value[3] # sin(theta2)\n c12 = c1 * c2 - s1 * s2 # cos(theta1 + theta2)\n s12 = s1 * c2 + c1 * s2 # sin(theta1 + theta2)\n v = feedback_value[6:8] # joint angular velocity\n e = feedback_value[8:10] # tracking error in the Cartesian space\n\n # Calculate hand-tip force command\n f_ref = -self.Kp * e\n\n # Compute joint torque command\n tau_ref = np.zeros(2)\n tau_ref[0] = f_ref[0] * self.l2 * (-s1 - s12) + f_ref[1] * self.l2 * (c1 + c12)\n tau_ref[1] = f_ref[0] * self.l1 * (-s12) + f_ref[1] * self.l1 * c12\n\n # Add a friction term\n tau_ref -= self.Kd * v\n\n\n\n #if feedback_value>PI:\n # feedback_value=-(PI-feedback_value%PI)\n\n\n self.output = tau_ref #self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)\n\n #if self.output >0.2:\n # self.output = 0.2\n #elif self.output <-0.2:\n # self.output = -0.2\n\n\n return -self.output,feedback_value\n\n def setTargetPosition(self, targetPos):\n self.targetPos = targetPos","sub_path":"pddm/classic_pollicy/base_programing/PID_vreacher.py","file_name":"PID_vreacher.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"138968785","text":"from . import *\r\n\r\n\r\ndef search(request, model, query_mark, terms, check_acc=True,\r\n fun_check=None, where='', joins=[]):\r\n '''\r\n 设置查询条件\r\n model: model对象\r\n query_mark: 缓存标识符\r\n terms: 查询条件列表\r\n check_acc: 根据账套权限过滤数据\r\n fun_check: 自定义数据检查函数\r\n where: 自定义查询where子句\r\n joins: 关联字段\r\n '''\r\n rc_terms = redis.Redis(connection_pool=pool_terms) # 查询条件池\r\n rc_querys = redis.Redis(connection_pool=pool_querys) # 查询结果池\r\n terms = copy.copy(terms)\r\n # 删除CSRF信息 \r\n if 'csrfmiddlewaretoken' in terms:\r\n terms.pop('csrfmiddlewaretoken')\r\n # 查询条件写入redis\r\n key = get_key(request, model, query_mark)\r\n rc_terms.set(key, json.dumps(terms), ex=60 * 60 * 12)\r\n # 按条件查询所有符合条件的记录id并缓存\r\n rows = fill(request, model, check_acc, where, joins)\r\n rows_ids = filter(rows, terms, fun_check)\r\n rc_querys.set(key, json.dumps(rows_ids), ex=60 * 60 * 12)\r\n return True\r\n\r\n\r\ndef load(request, model, query_mark, page_num, row_num, joins=[]):\r\n '''\r\n 根据查询条件执行查询返回数据(分页数据)\r\n model: model对象\r\n query_mark: 缓存标识符\r\n page_num: 页数\r\n row_num: 行数\r\n joins: 关联字段\r\n '''\r\n # 创建条件和结果缓存redis连接\r\n rc_terms = redis.Redis(connection_pool=pool_terms)\r\n rc_querys = redis.Redis(connection_pool=pool_querys)\r\n key = get_key(request, model, query_mark)\r\n # 未命中缓存返回空值,需要重新查询\r\n if not rc_querys.get(key):\r\n return '{\"total\": 0, \"rows\": []}'\r\n rows_ids = json.loads(rc_querys.get(key))\r\n\r\n # 数据分页,总条目数小于每页条目数不分页\r\n if page_num and row_num:\r\n if len(rows_ids) > row_num:\r\n pnum = math.ceil(len(rows_ids) / row_num)\r\n if page_num > pnum:\r\n page_num = pnum\r\n p_start = int((page_num - 1) * row_num)\r\n p_end = int(page_num * row_num)\r\n if p_end > len(rows_ids):\r\n p_end = len(rows_ids)\r\n ids_cur_page = rows_ids[p_start: p_end]\r\n else:\r\n ids_cur_page = rows_ids\r\n else:\r\n ids_cur_page = rows_ids\r\n\r\n # 根据分页id查询完整数据\r\n\r\n tb_main = model._meta.db_table\r\n sql_idin = '%s.id IN (%s)' % (tb_main, ','.join(ids_cur_page))\r\n rows_cur_page = fill(request, model, False, sql_idin, joins)\r\n\r\n # 返回json数据\r\n rows_page_json = str(json.dumps(rows_cur_page))\r\n rows_page_json = rows_page_json.replace('\"None\"', '\"\"')\r\n context = '{\"total\": %d, \"rows\": %s}'\r\n context = context % (len(rows_ids), rows_page_json)\r\n return context\r\n\r\n\r\ndef fill(request, model, check_acc=False, where='', joins=[]):\r\n '''\r\n JOIN填充完整数据,《多条件查询语句转化!》\r\n model: model对象\r\n check_acc: 根据账套权限过滤数据\r\n where: 自定义查询where子句\r\n joins: 关联字段\r\n :return: 条件list\r\n '''\r\n tb_main = model._meta.db_table # 主表名\r\n\r\n # 获取用户账套权限\r\n if check_acc:\r\n accper = request.session['user_info']['permission_account']\r\n accs = accper.split('; ')\r\n if type(accs) is not list:\r\n accs = [accs]\r\n ins = str(tuple(accs))\r\n\r\n # 获取模型所有字段\r\n mfields = [str(m.verbose_name) for m in model._meta.fields]\r\n wheres_acc = []\r\n # 获取用户可访问的施工单位和周转库\r\n if 'cons_mark' in mfields:\r\n sql = '(%s.cons_mark IN %s OR %s.cons_mark IS NULL)' % (\r\n tb_main, ins, tb_main)\r\n wheres_acc.append(sql)\r\n if 'wh_mark' in mfields:\r\n sql = '(%s.wh_mark IN %s OR %s.wh_mark IS NULL)' % (\r\n tb_main, ins, tb_main)\r\n wheres_acc.append(sql)\r\n # 账套权限过滤条件\r\n sql_acc = ' AND '.join(wheres_acc)\r\n\r\n # 可见过滤条件\r\n wheres = ['%s.is_visible = 1' % tb_main]\r\n if check_acc:\r\n wheres.append(sql_acc)\r\n # 自定义where条件\r\n if where:\r\n wheres.append(where)\r\n # 所有预筛选条件\r\n sql_where = '(SELECT * FROM %s WHERE %s)' % (tb_main, ' AND '.join(wheres))\r\n\r\n # 左连接查询加载关联表数据\r\n join_sqls = []\r\n if joins:\r\n for j in joins:\r\n sql_join = 'LEFT JOIN %s ON %s'\r\n join_tb = j['join_model']._meta.db_table # 关联表名\r\n on_sqls = []\r\n # 生成关联条件语句\r\n for f in j['fields']:\r\n sql_on = '%s.%s = %s.%s'\r\n params = ('a', f['from'], join_tb, f['to'])\r\n sql_on = sql_on % params\r\n on_sqls.append(sql_on)\r\n sql_join = sql_join % (join_tb, ' AND '.join(on_sqls))\r\n join_sqls.append(sql_join)\r\n\r\n # 执行查询\r\n try:\r\n sql = 'SELECT * FROM %s a %s' % (sql_where, ' '.join(join_sqls))\r\n querys = model.objects.raw(sql)\r\n rows_filled = [q.todict() for q in querys]\r\n return rows_filled\r\n except Exception as e:\r\n return []\r\n\r\n\r\ndef filter(rows, terms, fun_check=None):\r\n '''\r\n 根据查询条件过滤数据\r\n 1. 可接受多个查询条件(json格式),每个条件对应一个字段\r\n 每个查询条件又可包含多个关键字keyword\r\n 2. 关键字是纯文本字符串时,执行正则匹配\r\n 3. 关键字是以['>','>=','<','<=','=','!=']之一开头的时候\r\n 视作关系表达式,支持将字段值转换为数值或日期进行比较运算,\r\n 每个关键字之间为and关系,关键字各个条件之间都是or关系\r\n rows: 要筛选的数据 fill()返回值\r\n terms: 查询条件列表\r\n fun_check: 自定义数据检查函数\r\n :return list\r\n '''\r\n\r\n def is_number(s):\r\n '''判断是否为数值'''\r\n try:\r\n a = float(s)\r\n return True\r\n except Exception as e:\r\n return False\r\n\r\n def if_match(value, term):\r\n '''判断字段值是否匹配某个条件'''\r\n # 转换条件格式\r\n keywords = None\r\n try:\r\n keywords = json.loads(term)\r\n except Exception as e:\r\n pass\r\n finally:\r\n if type(keywords) is not list:\r\n keywords = [str(term)]\r\n\r\n # 逐个检查条件关键词\r\n for i in range(len(keywords)):\r\n keyword = str(keywords[i])\r\n # 首先查找关系运算符\r\n # 以['>','>=','<','<=','=','!=']之一开头的视为条件表达式\r\n ops = ['>', '>=', '<', '<=', '=', '!=']\r\n cur_op = ''\r\n for op in ops:\r\n if keyword.startswith(op):\r\n cur_op = op\r\n # 条件表达式执行数值或日期数值比较\r\n if cur_op:\r\n keyword = keyword.replace(' ', '')\r\n v = keyword[len(cur_op):]\r\n # 浮点数执行数值运算\r\n if is_number(v) and is_number(value):\r\n exp = 'float(value) %s float(v)' % cur_op\r\n return eval(exp)\r\n else:\r\n # 日期执行日期运算\r\n try:\r\n v_date = datetime.datetime.strptime(v,\r\n '%Y-%m-%d') # 条件日期\r\n f_date = datetime.datetime.strptime(value[:10],\r\n '%Y-%m-%d') # 字段值日期\r\n delta = f_date - v_date\r\n exp = 'delta.days %s 0' % cur_op\r\n return eval(exp)\r\n except Exception as e:\r\n pass\r\n else:\r\n # 空值跳过\r\n if keyword == '':\r\n return True\r\n # 正则匹配\r\n keyword = '%s' % keyword # .replace('*', '(.+?)')\r\n if re.search(keyword, value):\r\n return True\r\n return False\r\n\r\n # 按条件过滤\r\n rows_filted_ids = [] # 符合条件的记录\r\n\r\n for row in rows:\r\n matched_num = 0 # 满足的条件个数\r\n for field in terms:\r\n keyword = str(terms.get(field, ''))\r\n # 如果条件为空则直接跳过\r\n if not keyword:\r\n matched_num += 1\r\n continue\r\n # 检测字段值是否匹配查询条件\r\n if if_match(row.get(field, ''), keyword):\r\n matched_num += 1\r\n # 自定义函数过滤\r\n if fun_check:\r\n if not fun_check(row):\r\n continue\r\n # 全部匹配则添加到符合记录列表\r\n if matched_num == len(terms):\r\n rows_filted_ids.append(row['id'])\r\n\r\n return rows_filted_ids\r\n\r\n\r\nclass ExportField(object):\r\n '''导出 字段类'''\r\n\r\n def __init__(self, name, title, cwidth=15):\r\n '''初始化信息'''\r\n self.name = name # 字段名\r\n self.title = title # 表头\r\n self.cwidth = cwidth # 列宽\r\n\r\n\r\ndef export_excel(request, model, query_mark, fields_export, joins=[]):\r\n '''\r\n 导出查询数据到excel文件\r\n model: model对象\r\n query_mark: 缓存标识符\r\n fields_export: ExportField对象列表\r\n joins: 关联字段\r\n '''\r\n max_row_num = 100000 # 最大导出行数\r\n\r\n # 从缓存加载数据\r\n rc = redis.Redis(connection_pool=pool_querys)\r\n key = get_key(request, model, query_mark)\r\n ids = json.loads(rc.get(key))\r\n # 填充数据\r\n tb_main = model._meta.db_table # 获取数据表名\r\n sql_idin = '%s.id IN (%s)' % (tb_main, ','.join(ids))\r\n rows = fill(request, model, False, sql_idin, joins)\r\n # 限制最大导出行数\r\n if len(rows) > max_row_num:\r\n rows = rows[: max_row_num]\r\n\r\n # 写入表头\r\n book = openpyxl.Workbook()\r\n sheet = book.active\r\n sheet.cell(row=1, column=1).value = '序号'\r\n # 设置表头背景色\r\n fp = PatternFill(fill_type='solid', fgColor=\"77DDFF\") # 浅蓝色\r\n sheet.cell(row=1, column=1).fill = fp\r\n c = 2\r\n\r\n for field in fields_export:\r\n # 写入表头\r\n cell = sheet.cell(row=1, column=c)\r\n cell.value = field.title\r\n # 设置背景色和列宽\r\n cell.fill = fp\r\n cm = get_column_letter(c) # 生成列标号\r\n sheet.column_dimensions[cm].width = field.cwidth\r\n c += 1\r\n\r\n # 写入数据\r\n r = 2\r\n c = 2\r\n for item in rows:\r\n # 生成序号\r\n sheet.cell(row=r, column=1).value = r - 1\r\n # 逐行,逐字段写入数据\r\n for field in fields_export:\r\n cell = sheet.cell(row=r, column=c)\r\n # 写入数据\r\n value = item.get(field.name, '')\r\n value = value.replace('None', '')\r\n cell.value = value\r\n c += 1\r\n c = 2\r\n r += 1\r\n\r\n # 保存到临时文件夹\r\n file_name = get_ufn(request) + '.xlsx'\r\n file_path = os.path.join(TEMP_DIR, file_name)\r\n book.save(file_path)\r\n book.close()\r\n\r\n # 返回 FileResponse\r\n file = open(file_path, 'rb')\r\n response = FileResponse(file)\r\n response['Content-Type'] = 'application/octet-stream'\r\n response['Content-Disposition'] = 'attachment;filename=\"%s\"' % 'export.xlsx'\r\n return response\r\n","sub_path":"lhwms/lhwms/operator/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":11592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"297870876","text":"\"\"\"\nCreate all of the assemblies of objects here\n\"\"\"\nfrom math import pi, floor\nimport numpy as np\nfrom PIL import Image, ImageDraw\nfrom collections import namedtuple\n\nData = namedtuple('Data', ['extent', 'data'])\n\ndef cartesian_to_image(x, y, extent, nx, ny):\n i = floor((x - extent[0]) / (extent[1] - extent[0]) * nx)\n j = floor((y - extent[2]) / (extent[3] - extent[2]) * ny)\n return i, j\n\n\ndef shielded_true_images(supersample=4):\n extent = np.array([-12, 12, -8, 8], dtype=np.double)\n delta = 0.2\n nx = int((extent[1] - extent[0]) / delta)\n ny = int((extent[3] - extent[2]) / delta)\n\n u235 = 0.2\n steel = 0.15\n poly = 0.3\n\n origin = -9 + 3.8 + 0.3\n outer_radius = 3.8\n inner_radius = 2.8\n\n snx, sny = nx*supersample, ny*supersample\n\n # transmission\n trans_im = Image.new('F', (nx*supersample, ny*supersample), color=0)\n draw = ImageDraw.Draw(trans_im)\n draw.rectangle([cartesian_to_image(-10, -5, extent, snx, sny),\n cartesian_to_image(10, 5, extent, snx, sny)], fill=steel)\n draw.rectangle([cartesian_to_image(-9, -4, extent, snx, sny),\n cartesian_to_image(9, 4, extent, snx, sny)], fill=0)\n\n draw.ellipse([cartesian_to_image(origin - outer_radius, -outer_radius, extent, snx, sny),\n cartesian_to_image(origin + outer_radius, outer_radius, extent, snx, sny)], fill=u235)\n draw.ellipse([cartesian_to_image(origin - inner_radius, -inner_radius, extent, snx, sny),\n cartesian_to_image(origin + inner_radius, inner_radius, extent, snx, sny)], fill=0)\n\n draw.rectangle([cartesian_to_image(5, 3, extent, snx, sny),\n cartesian_to_image(7, 1, extent, snx, sny)], fill=steel)\n draw.rectangle([cartesian_to_image(5, -3, extent, snx, sny),\n cartesian_to_image(7, -1, extent, snx, sny)], fill=poly)\n del draw\n trans_im = trans_im.resize((nx, ny), Image.BILINEAR)\n trans_arr = np.array(trans_im, dtype=np.double)\n\n # fission\n fission_im = Image.new('F', (snx, sny), color=0)\n draw = ImageDraw.Draw(fission_im)\n\n draw.ellipse([cartesian_to_image(origin - outer_radius, -outer_radius, extent, snx, sny),\n cartesian_to_image(origin + outer_radius, outer_radius, extent, snx, sny)], fill=0.1)\n draw.ellipse([cartesian_to_image(origin - inner_radius, -inner_radius, extent, snx, sny),\n cartesian_to_image(origin + inner_radius, inner_radius, extent, snx, sny)], fill=0)\n del draw\n fission_im = fission_im.resize((nx, ny), Image.BILINEAR)\n fission_arr = np.array(fission_im, dtype=np.double)\n\n # p\n p_im = Image.new('F', (snx, sny), color=0)\n draw = ImageDraw.Draw(p_im)\n draw.ellipse([cartesian_to_image(origin - outer_radius, -outer_radius, extent, snx, sny),\n cartesian_to_image(origin + outer_radius, outer_radius, extent, snx, sny)], fill=1.0)\n draw.ellipse([cartesian_to_image(origin - inner_radius, -inner_radius, extent, snx, sny),\n cartesian_to_image(origin + inner_radius, inner_radius, extent, snx, sny)], fill=0)\n del draw\n p_im = p_im.resize((nx, ny), Image.BILINEAR)\n p_mask = np.array(p_im, dtype=np.double)\n\n xs = np.arange(extent[0], extent[1], delta) + delta / 0.5\n ys = np.arange(extent[2], extent[3], delta) + delta / 0.5\n xs -= origin + 0.1\n ys -= 0\n ring_center_radius = (outer_radius - inner_radius) / 2 + inner_radius\n xv, yv = np.meshgrid(xs, ys)\n radius = np.sqrt(xv**2 + yv[::-1]**2)\n p_arr = -0.5 * (radius - ring_center_radius)**2 + 0.3\n slope = -0.05 / (1.1*3.8)\n p_arr += slope * xv - 0.05\n\n p_arr[p_mask <= 0] = 0\n p_arr[p_arr <= 0] = 0\n\n # p_arr = np.array(p_im, dtype=np.double)\n\n return [Data(extent, trans_arr), Data(extent, fission_arr), Data(extent, p_arr)]\n\n\ndef ut_logo():\n extent = np.array([-12, 12, -8, 8], dtype=np.double)\n\n im = Image.open(\"ut-icon-mono.bmp\")\n rot_im = im.transpose(Image.FLIP_TOP_BOTTOM)\n\n ut_image = np.array(rot_im, dtype=np.double)\n ut_image = 1.0 - ut_image\n ut_image *= 0.1\n\n return [Data(extent, ut_image), Data(extent, ut_image), Data(extent, ut_image)]","sub_path":"scripts/refactor2/assemblies.py","file_name":"assemblies.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"501456767","text":"from setuptools import setup, find_packages\n\nNAME = \"ainainain\"\nVERSION = \"1.1\"\n\npackages = [\n \"setuptools\",\n \"wheel\",\n \"python-dotenv==0.10.1\",\n \"docker==3.7.0\",\n \"requests-unixsocket==0.1.5\",\n \"click==7.0\"\n]\n\nsetup(\n package_data = {\n '': ['share/*.env','share/log/*.env'],\n 'share/log': ['*.env'],\n 'share': ['*.env'],\n },\n name = NAME,\n version = VERSION,\n python_requires='>=3.6',\n description = f\"CLI of {NAME} a Python script\",\n license = \"BSD\",\n url = \"https://bitbucket.org/comcomai/ain-worker\",\n packages=find_packages(),\n install_requires=packages,\n entry_points = {\n 'console_scripts' : [f'ain = ain.ain:call']\n },\n classifiers=[\n \"License :: OSI Approved :: BSD License\"\n ]\n)\n","sub_path":"pypi_install_script/ainainain-1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"158345912","text":"#!/usr/bin/python3\n# scraper.py - the module is used to scrape web sites.\n# The module combines both asynchronous and multiprocessing approaches.\n# aiohttp is used to obtain data from web sites.\n# multiprocessing is used to process the obtained data.\n# Current parsers: news, exchange rates, covid rates, weather.\n\nimport os\nimport aiohttp\nimport asyncio\nimport logging\nimport time\nimport bs4, lxml\nfrom multiprocessing import Process, Queue\nimport requests\nimport re\n\nclass Scraper:\n \n def __init__(self, asyncloop):\n self.logger = logging.getLogger('SM2.scraper')\n self.loop = asyncloop\n if __name__ == '__main__': # Creates a logger if the module is called directly.\n ch = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n self.logger.setLevel(logging.DEBUG)\n self.logger.addHandler(ch)\n self.url_yandex = 'https://yandex.ru/'\n self.url_news = 'https://www.newsru.com'\n self.url_covid = 'https://www.worldometers.info/coronavirus/'\n self.url_weather = 'https://api.weather.yandex.ru/v1/informers?'\n self.logger.debug('An instance of Scraper has been created.')\n self.news_list = ['Загрузка новостей']\n self.rates_string = '*** Обновление данных по котировкам ***'\n self.covid_figures = [\n '*** Загрузка данных по коронавирусу ***', \n '', \n '',\n '',\n '',\n '',\n '',\n ''\n ]\n self.forecast_json = None\n\n self.yandex_weather_token = os.environ.get('YANDEX_WEATHER_TOKEN')\n if self.yandex_weather_token == None:\n self.logger.debug('Cannot get YANDEX_WEATHER_TOKEN environment variable')\n else:\n self.logger.debug('YANDEX_WEATHER_TOKEN environment variable found')\n\n if __name__ == '__main__':\n self.yandex_weather_token = ''\n self.logger.debug('Yandex weather token will not be used (__main__)')\n \n queue = Queue()\n rates_loop = self.loop.create_task(self.ratesbot(queue))\n news_loop = self.loop.create_task(self.newsbot(queue))\n covid_loop = self.loop.create_task(self.covidbot(queue)) \n if self.yandex_weather_token is not None:\n weather_loop = self.loop.create_task(self.weatherbot(queue))\n receiver_loop = self.loop.create_task(self.process_receiver(queue))\n\n async def get_page(self, link: str, response_type='text', payload='', headers=''):\n \"\"\" Loads the provided link in the asynchronous way.\n Returns the page data on success, otherwise, returns False.\"\"\"\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(link, params=payload, headers=headers) as resp:\n if resp.status != 200:\n self.logger.error(f'Cannot get {link}\\n Status {resp.status}')\n return False\n else:\n self.logger.debug(f'{link} loaded')\n if response_type == 'text':\n return await resp.text()\n elif response_type == 'json':\n return await resp.json()\n except Exception as error:\n self.logger.error(f'Cannot load the page, the following error occured: {error}')\n return False\n\n def rates_parser(self, res: str, queue):\n \"\"\" Arguments: res - page data in the string format;\n queue - multiprocessing Queue instanse.\n Processes the data to find the exchange rates and their deltas.\n Forms a string that represents the exchange rates of USD to RUB,\n EUR to RUB, oil price, and their deltas. On failure returns None.\n On success puts a dictionary with the key 'rates_string' and \n the string as the value into the queue.\"\"\"\n values = []\n deltas = []\n try:\n soup = bs4.BeautifulSoup(res, features='html.parser') \n rates = soup.find_all('span', class_='inline-stocks__value_inner')\n if len(rates) < 3:\n self.logger.warning('Cannot find the rates on the page.')\n return None\n for rate in rates:\n values.append(rate.getText())\n directions = soup.find_all('span', class_='a11y-hidden')\n if len(directions) < 3:\n self.logger.warning('Cannot find the rates delta on the page.')\n return None\n else:\n for direction in directions:\n if direction.getText()[0] == '+':\n deltas.append('↑')\n elif direction.getText()[0] == '-':\n deltas.append('↓')\n else:\n deltas.append('')\n queue.put({'rates_string': f'$ {values[0]}{deltas[0]} € {values[1]}{deltas[1]} Brent {values[2]}{deltas[2]}'})\n self.logger.info('Got the string for the latest exchange rates.')\n except Exception as exc:\n self.logger.error(f'Cannot update the exchange rates: {exc}')\n\n def news_parser(self, res: str, queue):\n \"\"\" Arguments: res - page data in the string format;\n queue - multiprocessing Queue instanse.\n Processes the data to find the main news on the page.\n Forms a string that represents the news separated by the three\n asterisks. \n On success puts a dictionary with the key 'news_list' and \n the list as the value into the queue.\"\"\"\n new_news = []\n try:\n soup = bs4.BeautifulSoup(res, features='html.parser')\n mainNewsTitle = soup.find('div', class_='sp-main-title')\n mainNewsText = soup.find('div', class_='sp-main-text')\n new_news.append('. '.join([mainNewsTitle.getText().strip(), mainNewsText.getText().strip()]) + ' *** ')\n newsTags = soup.find_all('div', class_='left-feed-text')\n number_of_news = 1\n for tag in newsTags:\n tagTitle = tag.find('div', class_ = 'left-feed-title')\n #tagText = tag.find('div', class_= 'left-feed-anons')\n new_news.append(tagTitle.getText().strip() + ' *** ')\n number_of_news += 1\n if number_of_news == 10:\n break\n if len(new_news) > 0:\n queue.put({'news_list': new_news})\n self.logger.info('Got the latest news.')\n else:\n self.logger.warning('Cannot find the news on the page')\n except Exception as exc:\n self.logger.error(f'Cannot update the news: {exc}')\n\n def covid_parser(self, res: str, queue):\n \"\"\" Arguments: res - page data in the string format;\n queue - multiprocessing Queue instanse.\n Processes the data to find the number of COVID-19 cases.\n Forms a list that represents the number of cases, deaths,\n and recoveries in the world, and in Russia, as well as\n the number of recent cases and deaths.\n On success puts a dictionary with the key 'covid_figures' and \n the list as the value into the queue.\"\"\"\n try:\n #st_time = time.perf_counter() \n \n temp_figures = []\n soup = bs4.BeautifulSoup(res, features='lxml')\n\n figures = soup.find_all('div', class_='maincounter-number')\n for figure in figures:\n temp_figures.append(figure.getText().replace('\\n',''))\n\n nation = re.compile('Russia')\n national_figures = soup.find('a', text=nation, attrs = {'class': 'mt_a'}).parent.parent\n\n temp_figures.extend(national_figures.getText().split()[2:7])\n\n # The old approach.\n #national_figures = soup.find_all('tr')\n #for nation in national_figures:\n #country = nation.getText()\n #if country.find('Russia') != -1:\n #temp_figures.extend(country.split()[2:7])\n #break\n if len(temp_figures) > 7:\n queue.put({'covid_figures': temp_figures})\n self.logger.info('Got the Covid-19 latest figures.')\n else:\n self.logger.warning('Cannot find the Covid-19 figures on the page')\n #self.logger.debug(f'Covid parser runtime: {time.perf_counter() - st_time} seconds.')\n except Exception as exc:\n self.logger.error(f'Cannot update the Covid-19 figures: {exc}')\n\n async def ratesbot(self, queue):\n \"\"\" Argument: queue - multiprocessing Queue instanse.\n The method is async. It periodically loads 'https://yandex.ru/'\n and passes the results to a separate process that parses the data.\"\"\"\n while True:\n res = await self.get_page(self.url_yandex)\n if res == False:\n await asyncio.sleep(600)\n else:\n rates_parser_process = Process(target=self.rates_parser, args=(res, queue))\n rates_parser_process.start()\n await asyncio.sleep(3600)\n\n async def newsbot(self, queue):\n \"\"\" Argument: queue - multiprocessing Queue instanse.\n The method is async. It periodically loads 'https://www.newsru.com'\n and passes the results to a separate process that parses the data.\"\"\"\n while True:\n res = await self.get_page(self.url_news)\n if res == False:\n await asyncio.sleep(600)\n else:\n news_parser_process = Process(target=self.news_parser, args=(res, queue))\n news_parser_process.start()\n \n await asyncio.sleep(3600)\n\n async def covidbot(self, queue):\n \"\"\" Argument: queue - multiprocessing Queue instanse.\n The method is async. It periodically loads \n 'https://www.worldometers.info/coronavirus/'\n and passes the results to a separate process that parses the data.\"\"\"\n while True:\n res = await self.get_page(self.url_covid)\n if res == False:\n await asyncio.sleep(600)\n else:\n covid_parser_process = Process(target=self.covid_parser, args=(res, queue))\n covid_parser_process.start()\n await asyncio.sleep(43200)\n\n async def weatherbot(self, queue):\n \"\"\" Argument: queue - multiprocessing Queue instanse.\n The method is async. It periodically gets \n 'https://api.weather.yandex.ru/v1/informers?'\"\"\"\n lat = '55.716848' # latitude of the forecast (Moscow)\n lon = '37.882962' # longitude of the forecast (Moscow)\n lang = 'ru_RU' # language of the reply (Russian)\n payload = {'lat': lat, 'lon': lon, 'lang': lang}\n headers = {'X-Yandex-API-Key': self.yandex_weather_token}\n while True:\n res = await self.get_page(self.url_weather, 'json', payload, headers)\n if res == False:\n await asyncio.sleep(600)\n else:\n self.logger.debug('Got the latest weather forecast.')\n self.forecast_json = res\n await asyncio.sleep(43200)\n\n async def process_receiver(self, queue):\n \"\"\" Argument: queue - multiprocessing Queue instanse.\n The method is asynchronously waits for dictionaries in the queue.\n If there are expected keys it assigns the values to the\n dedicated variables.\"\"\"\n while True:\n if queue.empty():\n await asyncio.sleep(1)\n else:\n data = queue.get()\n try:\n for key in data.keys():\n if key == 'news_list':\n self.news_list = data[key]\n elif key == 'rates_string':\n self.rates_string = data[key]\n elif key == 'covid_figures':\n self.covid_figures = data[key]\n else:\n self.logger.warning(f'Got unknown key from a proceess: {key}')\n except Exception as exc:\n self.logger.warning(f'Cannot process the data in the queue {exc}')\n\nif __name__ == '__main__': \n loop = asyncio.get_event_loop()\n scraper = Scraper(loop)\n loop.run_forever()\n\n__version__ = '0.96' # 10th September 2020\n__author__ = 'Dmitry Kudryashov'\n__maintainer__ = 'Dmitry Kudryashov'\n__email__ = \"dmitry-kud@yandex.ru\" \n__status__ = \"Development\"\n","sub_path":"smartmirror2/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":13066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"244978717","text":"import os\n\n#Application Debug Settings\nDEBUG = True\n\n# Builtin development application settings\nPORT = 8080\nHOST = '127.0.0.1'\n\n# application base directory setup\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n#Session Key\nSECRET_KEY = 'se2ruWr5chubru4ap_ju#r4C'\n\n#CSRF saftey\nCSRF_ENABLED = True\nCSRF_KEY = 'tHUneKuWrAP8SWaH4&t&tAPh'\n\n# Database Connection Information\nDATABASE_URI = os.path.join(BASE_DIR, 'blog/flaskr.db')\nDATABASE_CONNECT_OPTIONS = {}\n\n# User Password Hashing information\nSALT_KEY = b'tHUneKuWrAP8SWaH4&t&tAPh'\nROUNDS = 100000\nHASH_LIBRARY = 'sha256'","sub_path":"webconfig.py","file_name":"webconfig.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"37873635","text":"# Imports\r\n\r\n# Defines\r\nEQUAL_TO = \"1\"\r\nGREATER_THAN = \"2\"\r\nLESS_THAN = \"4\"\r\nNOT_EQUAL_TO = \"6\"\r\nIN = \"8\"\r\nTYPE = \"9\"\r\n\r\n\r\nclass Criteria:\r\n def __init__(self):\r\n self.conditions = []\r\n\r\n def add_condition(self, condition):\r\n self.conditions.append(condition)\r\n\r\n def remove_condition(self, condition):\r\n unwanted_condition_string = str(condition)\r\n self.remove_condition_given_string(unwanted_condition_string)\r\n\r\n def remove_condition_given_string(self, unwanted_condition_string):\r\n i = 0\r\n while i < len(self.conditions):\r\n if str(self.conditions[i]) == unwanted_condition_string:\r\n del self.conditions[i]\r\n break\r\n i += 1\r\n\r\n\r\n def __str__(self):\r\n string = \"This criteria has the following conditions:\\n\"\r\n for condition in self.conditions:\r\n string += \" - \" + str(condition) + \"\\n\"\r\n\r\n return string\r\n\r\n\r\nclass Condition:\r\n def __init__(self, column, condition, value):\r\n self.column = column\r\n self.condition = condition\r\n self.value = value\r\n\r\n def __str__(self):\r\n string = self.column + \" \"\r\n if self.condition == GREATER_THAN:\r\n string += \">\"\r\n elif self.condition == LESS_THAN:\r\n string += \"<\"\r\n elif self.condition == GREATER_THAN + EQUAL_TO:\r\n string += \">=\"\r\n elif self.condition == LESS_THAN + EQUAL_TO:\r\n string += \"<=\"\r\n elif self.condition == EQUAL_TO:\r\n string += \"=\"\r\n elif self.condition == NOT_EQUAL_TO:\r\n string += \"!=\"\r\n elif self.condition == IN:\r\n string += \"in\"\r\n elif self.condition == TYPE:\r\n string += \"has type in\"\r\n string += \" \"\r\n string += str(self.value)\r\n\r\n return string\r\n\r\n\r\nclass CriteriaEvaluator:\r\n def __init__(self):\r\n pass\r\n\r\n def evaluate_criteria(self, criteria, beam):\r\n check = False\r\n for condition in criteria.conditions:\r\n if condition.column in beam.columns:\r\n beam_value = beam.get_value(condition.column)\r\n if condition.condition == EQUAL_TO:\r\n check = (beam_value == condition.value)\r\n elif condition.condition == NOT_EQUAL_TO:\r\n check = (beam_value != condition.value)\r\n elif condition.condition == GREATER_THAN:\r\n check = float(beam_value) > float(condition.value)\r\n elif condition.condition == LESS_THAN:\r\n check = float(beam_value) < float(condition.value)\r\n elif condition.condition == (GREATER_THAN + EQUAL_TO):\r\n check = float(beam_value) >= float(condition.value)\r\n elif condition.condition == (LESS_THAN + EQUAL_TO):\r\n check = float(beam_value) <= float(condition.value)\r\n elif condition.condition == IN:\r\n check = beam_value in condition.value\r\n elif condition.condition == TYPE:\r\n check = beam.get_type() in condition.value\r\n else:\r\n print(\"Beam: %s\\ndoes not specify a value for %s\" % (beam, condition.column))\r\n\r\n if not check:\r\n return False\r\n\r\n return True\r\n","sub_path":"criteria.py","file_name":"criteria.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"289043500","text":"from django.shortcuts import render, redirect\n\nfrom django.contrib import messages\n\nfrom btlplcontact.forms import ContactDataForm\n\ndef index(request):\n if request.method == \"POST\":\n form = ContactDataForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request,'VALID');\n return redirect('/')\n else:\n messages.error(request, 'NOT VALID');\n else:\n form = ContactDataForm()\n template_vars = {\n \"form\": form\n }\n return render(request, \"index.html\", template_vars)\n","sub_path":"btlplcontact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"190227475","text":"#This python script will create graphs for each problem based on selection scheme\r\n#python3\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom pathlib import Path\r\n\r\nDATA_DIR = '../Data/Polished/Generations/'\r\nPROBLEMS = ['for-loop-index', 'median', 'small-or-large', 'smallest']\r\nDIMS = ['CN_1-CS_512', 'CN_2-CS_256', 'CN_4-CS_128', 'CN_8-CS_64', 'CN_16-CS_32', 'CN_32-CS_16', 'CN_64-CS_8' ,'CN_128-CS_4', 'CN_256-CS_2']\r\nDIM_DICT = {'CN_1-CS_512': 'CN:1-CS:512', 'CN_2-CS_256':'CN:2-CS:256', 'CN_4-CS_128':'CN:4-CS:128' \\\r\n , 'CN_8-CS_64': 'CN:8-CS:64', 'CN_16-CS_32':'CN:16-CS:32', 'CN_32-CS_16':'CN:32-CS:16', \\\r\n 'CN_64-CS_8': 'CN:64-CS:8' ,'CN_128-CS_4' : 'CN:128-CS:4', 'CN_256-CS_2': 'CN:256-CS:2'}\r\nSELECTION = ['COHORT_LEX', 'DOWN_SAMPLE_TESTS' ,'PROG_ONLY_COHORT_LEX', 'TRUNCATED']\r\n\r\nfor prob in PROBLEMS:\r\n for sele in SELECTION:\r\n for dim in DIMS:\r\n \r\n PATH = DATA_DIR + prob + '__' + sele + '__' + dim + '.csv'\r\n file = Path(PATH)\r\n \r\n if file.is_file():\r\n data = pd.read_csv(PATH)\r\n gens = list(data['Generations'])\r\n cnt = list(data['Solution_Count'])\r\n \r\n plt.step(gens, cnt, where='post', label=DIM_DICT[dim])\r\n plt.xlabel('Generations')\r\n plt.ylabel('Solution Count')\r\n \r\n \r\n plt.title(prob+'_'+sele)\r\n plt.legend()\r\n plt.savefig('../Data/Figs/Generations/' + prob+'__'+sele+'.png')\r\n plt.clf()","sub_path":"Summarize/graph_timeseries_GEN.py","file_name":"graph_timeseries_GEN.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"626899445","text":"import time\n\n\ndef Fib(x, memo = {}):\n if x in memo:\n return memo[x]\n if x <= 2:\n return 1\n memo[x] = Fib(x - 1, memo) + Fib(x - 2, memo)\n return memo[x]\n\ntime_start = time.time()\ninp = int(input(\"Choose value: \"))\nsol = Fib(inp)\nprint(f\"The {inp}th Fibonacci value is {float(sol)}\")\nprint(f\"And it took {time.time()-time_start:.3f} seconds to calculate\")\n","sub_path":"Fib.py","file_name":"Fib.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"533320111","text":"from flask import render_template, session, redirect, url_for, flash, request, \\\n current_app, send_from_directory, flash, jsonify, abort\nfrom flask import make_response\nfrom ..models import User, Role, Post, Permission, Comment, Category, Reply, \\\n Message, MessageReply, Praise\nfrom ..models import Album, Photo\nfrom .forms import EditProfileForm, PostForm, CommentForm, EditProfileAdminForm, ReplyForm, \\\n MessageForm, AlbumForm, MessageReplyForm\nfrom flask_login import login_required, current_user, login_user\nfrom ..decorators import admin_required, permission_required\nfrom flask_dropzone import random_filename\nfrom ..utils import resize_image\n\nfrom .. import db, photosSet\n\nfrom .. import moment\n\nfrom . import main \nimport os\nfrom ..auth.forms import LoginForm\n\n@main.route('/', methods=['GET','POST'])\ndef index():\n form1 = LoginForm()\n if form1.validate_on_submit():\n user = User.query.filter_by(email=form1.email.data).first()\n if user is not None and user.verify_password(form1.password.data):\n login_user(user, form1.remember_me.data)\n return redirect(url_for('main.index'))\n flash('Invalid username or password')\n page = request.args.get('page', 1, type=int)\n show_followed = False\n #如果不是pjax请求,就判断 cookie show_followed的值, 以确定首页显示全部文章还是关注的文章\n if not \"X-PJAX\" in request.headers:\n if current_user.is_authenticated:\n show_followed = bool(request.cookies.get('show_followed', ''))\n if show_followed:\n return redirect(url_for('.show_followed'))\n query = Post.query\n pagination = query.order_by(Post.timestamp.desc()).paginate(page, \\\n per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], \\\n error_out=False)\n posts = pagination.items\n resp = make_response(render_template('index_all.html', posts=posts, pagination=pagination, form1=form1, \\\n show_followed=show_followed))\n if current_user.is_authenticated and 'X-PJAX' in request.headers:\n resp.set_cookie('show_followed', '', max_age=10*24*60*60)\n return resp\n\n@main.route('/show-followed', methods=['GET', 'POST'])\n@login_required\ndef show_followed():\n query = current_user.followed_posts\n page = request.args.get('page', 1, type=int)\n pagination = query.order_by(Post.timestamp.desc()).paginate(page,\n per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False)\n posts = pagination.items\n resp = make_response(render_template('index_follow.html', posts=posts, pagination=pagination, show_followed=show_followed)) \n resp.set_cookie('show_followed', '1', max_age=10*24*60*60)\n return resp\n\n\n@main.route('/user/')\ndef user(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n abort(404)\n return render_template('user/user.html', user=user)\n\n@main.route('/user//posts')\ndef user_posts(username):\n user = User.query.filter_by(username=username).first_or_404()\n query = Post.query.filter_by(author=user)\n page = request.args.get('page', 1, type=int)\n pagination = query.order_by(Post.timestamp.desc()).paginate(page,\n per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False)\n posts = pagination.items\n return render_template('user/user_post.html', user=user, posts=posts, pagination=pagination)\n \n@main.route('/edit-profile', methods=['GET','POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.name = form.name.data\n current_user.location = form.location.data\n current_user.about_me = form.about_me.data\n db.session.add(current_user)\n db.session.commit()\n flash('Your profile has been updated.')\n return render_template('edit_profile.html', form=form)\n form.name.data = current_user.name\n form.location.data = current_user.location\n form.about_me.data = current_user.about_me\n return render_template('edit_profile.html', form=form)\n\n@main.route('/edit-profile/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_profile_admin(id):\n user = User.query.get_or_404(id)\n form = EditProfileAdminForm(user=user)\n if form.validate_on_submit():\n user.email = form.email.data\n user.username = form.username.data\n user.confirmed = form.confirmed.data\n user.role = Role.query.get(form.role.data)\n user.name = form.name.data\n user.location = form.location.data\n user.about_me = form.about_me.data\n db.session.add(user)\n flash('The profile has been updated.')\n return redirect(url_for('.user', username=user.username))\n form.email.data = user.email\n form.username.data = user.username\n form.confirmed.data = user.confirmed\n form.role.data = user.role_id\n form.name.data = user.name\n form.location.data = user.location\n form.about_me.data = user.about_me\n return render_template('edit_profile.html', form=form, user=user)\n \n@main.route('/post/', methods=['GET', 'POST'])\ndef post(id):\n post = Post.query.get_or_404(id)\n form = CommentForm()\n form1 = ReplyForm()\n if request.method == 'POST':\n comment = Comment(body=form.body.data,\n post=post,\n author=current_user._get_current_object())\n db.session.add(comment)\n db.session.commit()\n timestamp = moment.create(comment.timestamp).format('YY-MM-DD HH:mm') #在视图函数中渲染时间戳\n return render_template('_comment.html', comment=comment, timestamp=timestamp) \n page = request.args.get('page', 1, type=int)\n if page == -1:\n page = (post.comments.count() - 1) / \\\n current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1\n pagination = post.comments.order_by(Comment.timestamp.desc()).paginate(\n page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],\n error_out=False)\n comments = pagination.items\n return render_template('post.html', form=form, post=post,\n comments=comments, pagination=pagination, \n form1=form1)\n \n@main.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit(id):\n post = Post.query.get_or_404(id)\n if current_user != post.author and \\\n not current_user.can(Permission.ADMINISTER):\n abort(403)\n form = PostForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.body = form.body.data\n db.session.add(post)\n flash('The post has been updated.')\n return redirect(url_for('.post', id=post.id))\n form.title.data = post.title\n form.body.data = post.body\n return render_template('edit_post.html', form=form)\n \n@main.route('/post//delete')\n@login_required\ndef post_delete(id):\n post = Post.query.get_or_404(id)\n if current_user == post.author or current_user.can(Permission.ADMINISTER):\n post.post_delete(id)\n flash('文章删除成功!')\n return redirect(url_for('.index'))\n\n\n@main.route('/comment//delete', methods=['POST'])\n@login_required\ndef comment_delete(id):\n comment = Comment.query.get_or_404(id)\n if current_user == comment.author or current_user.can(Permission.ADMINISTER):\n comment.comment_delete()\n return jsonify(message=\"评论已删除!\")\n\n@main.route('/reply//delete', methods=['POST'])\n@login_required\ndef reply_delete(id):\n reply = Reply.query.get_or_404(id)\n if current_user == reply.author or current_user.can(Permission.ADMINISTER):\n reply.reply_delete()\n return jsonify(message=\"回复已删除!\")\n\n@main.route('/update-post-comments-count/')\ndef update_post_comments_count(post_id):\n post = Post.query.get_or_404(post_id)\n count = post.comments_count()\n return jsonify(count=count)\n\n@main.route('/category/')\ndef category(name):\n category = Category.query.filter_by(name=name).first()\n posts = Post.query.filter_by(category=category).order_by(Post.timestamp.desc()).all()\n return render_template('category.html', posts=posts, category=category)\n\n#回复评论\n@main.route('/reply-comment/', methods=['POST'])\ndef reply_comment(id):\n comment = Comment.query.get_or_404(id)\n if not current_user.is_authenticated:\n return jsonify(message='请先登陆!'), 403\n if request.method == 'POST':\n reply = Reply(body=request.form.get('body'),\n comment=comment,\n author=current_user._get_current_object(),\n replyto_id=comment.id,\n replyto_user=comment.author,\n reply_type = 'comment'\n )\n db.session.add(reply)\n db.session.commit()\n timestamp = moment.create(reply.timestamp).format('YY-MM-DD HH:mm:ss')\n return render_template(\"_comment_reply.html\", reply=reply, timestamp=timestamp)\n\n#回复评论下的回复\n@main.route('/reply-reply/', methods=['GET', 'POST'])\ndef reply_reply(id):\n reply = Reply.query.get_or_404(id)\n if request.method == 'POST':\n reply1 = Reply(body=request.form.get('body'),\n comment=reply.comment,\n author=current_user._get_current_object(),\n replyto_id=reply.id,\n replyto_user=reply.author,\n reply_type='reply')\n db.session.add(reply1)\n db.session.commit()\n timestamp = moment.create(reply1.timestamp).format('YY-MM-DD HH:mm:ss')\n return render_template(\"_comment_reply.html\", reply=reply1, timestamp=timestamp)\n\n@main.route('/write_post/', methods=['GET', 'POST'])\n@login_required\ndef write_post():\n form = PostForm()\n if current_user.can(Permission.WRITE_ARTICLES) and form.validate_on_submit():\n post = Post(title=form.title.data,\n body=form.body.data,\n category=Category.query.get(form.category.data),\n author=current_user._get_current_object())\n db.session.add(post)\n db.session.commit()\n return redirect(url_for('main.index'))\n return render_template('write_post.html', form=form)\n\n@main.route('/follow/', methods=['GET', 'POST'])\ndef follow(username):\n if not current_user.is_authenticated:\n return jsonify(message='Login Required'), 403\n if not current_user.confirmed:\n return jsonify(message='Confirmed account required.'), 400\n if not current_user.can(Permission.FOLLOW):\n return jsonify(message='No permission.'), 403\n\n user = User.query.filter_by(username=username).first_or_404()\n if current_user.is_following(user):\n return jsonify(message='Already followed.'), 400\n current_user.follow(user)\n return jsonify(message=('已关注 ' + user.username))\n\n\n@main.route('/unfollow/', methods=['GET', 'POST'])\ndef unfollow(username):\n if not current_user.is_authenticated:\n return jsonify(message='Login Required'), 403\n if not current_user.confirmed:\n return jsonify(message='Confirmed account required.'), 400\n if not current_user.can(Permission.FOLLOW):\n return jsonify(message='No permission.'), 403\n\n user = User.query.filter_by(username=username).first_or_404()\n if not current_user.is_following(user):\n return jsonify(message='Not follow yet.'), 400\n current_user.unfollow(user)\n return jsonify(message=('已取消关注对 ' + user.username + ' 的关注'))\n\n@main.route('/update-followers-count/')\ndef update_followers_count(user_id):\n user = User.query.get_or_404(user_id)\n count = user.followers.count() - 1 #用户关注自己要减去\n return jsonify(count=count)\n\n\n@main.route('/followers/')\ndef followers(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash('Invalid user.')\n return redirect(url_for('.index'))\n page = request.args.get('page', 1, type=int)\n pagination = user.followers.paginate(\n page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],\n error_out=False)\n follows = [{'user': item.follower, 'timestamp': item.timestamp}\n for item in pagination.items]\n return render_template('followers.html', user=user, title=\"Followers of\",\n endpoint='.followers', pagination=pagination,\n follows=follows)\n\n\n@main.route('/followed-by/')\ndef followed_by(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash('Invalid user.')\n return redirect(url_for('.index'))\n page = request.args.get('page', 1, type=int)\n pagination = user.followed.paginate(\n page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],\n error_out=False)\n follows = [{'user': item.followed, 'timestamp': item.timestamp}\n for item in pagination.items]\n return render_template('followers.html', user=user, title=\"Followed by\",\n endpoint='.followed_by', pagination=pagination,\n follows=follows)\n\n\n@main.route('/avatars/')\ndef get_avatar(filename):\n return send_from_directory(current_app.config['AVATARS_SAVE_PATH'], filename)\n\n@main.route('/images/')\ndef get_image(filename):\n return send_from_directory(current_app.config['IMAGE_SAVE_PATH'], filename)\n\n@main.route('/upload', methods=['GET', 'POST']) #文章图片上传\n@login_required\ndef upload():\n if request.method == 'POST':\n f = request.files.get('file')\n filename = random_filename(f.filename)\n f.save(os.path.join(current_app.config['IMAGE_SAVE_PATH'], filename))\n url = url_for('main.get_image', filename=filename)\n return jsonify( location = url )\n\n@main.route('/album/', methods=['GET', 'POST'])\n@login_required\ndef album(user_id):\n user = User.query.filter_by(id=user_id).first_or_404()\n albums = Album.query.filter_by(author=user).all()\n form = AlbumForm()\n if form.validate_on_submit():\n album = Album(albumname=form.albumname.data,\n decription=form.decription.data,\n author=current_user._get_current_object())\n db.session.add(album)\n db.session.commit()\n return redirect(url_for('main.upload_photos', album_id=album.id))\n return render_template('album.html', form=form, albums=albums, user=user)\n\n@main.route('/upload-photos/', methods=['GET', 'POST'])\n@login_required\ndef upload_photos(album_id):\n album = Album.query.filter_by(id=album_id).first_or_404()\n if request.method == 'POST' and 'file' in request.files:\n f = request.files.get('file')\n filename = random_filename(f.filename)\n folder = album.author.username + '/' + album.albumname\n fname = photosSet.save(f, folder=folder, name=filename)\n filename_s = resize_image(f, fname, 200)\n filename_m = resize_image(f, fname, 800)\n photo = Photo(filename=fname,\n album=album,\n filename_s=filename_s,\n filename_m=filename_m)\n db.session.add(photo)\n db.session.commit()\n return render_template('upload_photos.html', album=album)\n\n@main.route('/album_show/', methods=['GET', 'POST'])\n@login_required\ndef album_show(album_id):\n album = Album.query.filter_by(id=album_id).first_or_404()\n photos = Photo.query.filter_by(album_id=album_id).all()\n form = AlbumForm()\n if request.method == 'POST':\n album.albumname = form.albumname.data\n album.decription = form.decription.data\n db.session.add(album)\n db.session.commit()\n albumname = album.albumname\n decription = album.decription\n message = \"修改成功!\"\n return jsonify(albumname=albumname, decription=decription, message=message)\n return render_template('album_show.html', album=album, photos=photos, form=form)\n\n@main.route('/delete/photo/', methods=['GET', 'POST'])\n@login_required\ndef delete_photo(photo_id):\n photo = Photo.query.get_or_404(photo_id)\n if current_user != photo.album.author:\n abort(404)\n db.session.delete(photo)\n db.session.commit()\n flash('图片已删除!')\n return redirect(url_for('.album_show', album_id = photo.album.id))\n\n@main.route('/delete/album/', methods=['POST', 'GET'])\n@login_required\ndef delete_album(album_id):\n album = Album.query.get_or_404(album_id)\n if current_user != album.author:\n abort(404)\n db.session.delete(album)\n db.session.commit()\n flash('相册已删除!')\n return redirect(url_for('.album', user_id=album.author.id))\n\n@main.route('/user/message/', methods=['GET', 'POST'])\ndef message(username):\n form = MessageForm()\n form1 = MessageReplyForm()\n user = User.query.filter_by(username=username).first_or_404()\n if request.method == \"POST\":\n message = Message(body=form.body.data,\n author=current_user._get_current_object(),\n owner=user)\n db.session.add(message)\n db.session.commit()\n timestamp = moment.create(message.timestamp).format('YY-MM-DD HH:mm')\n return render_template('_message.html', message=message, timestamp=timestamp)\n page = request.args.get('page', 1, type=int)\n if page == -1:\n page = (user.own_messages.count() - 1) / \\\n current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1\n pagination = user.own_messages.order_by(Message.timestamp.asc()).paginate(\n page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],\n error_out=False)\n messages = pagination.items\n return render_template('message.html', user=user, form=form, pagination=pagination,\n messages=messages, form1=form1)\n\n@main.route('/update-user-messages-count/')\ndef update_user_messages_count(user_id):\n user = User.query.get_or_404(user_id)\n count = user.own_messages.count()\n return jsonify(count=count)\n\n#回复留言\n@main.route('/user/message/reply-message/', methods=['GET', 'POST'])\ndef message_replyto_message(message_id):\n message = Message.query.get_or_404(message_id)\n if request.method == 'POST':\n reply = MessageReply(body=request.form.get('body'),\n message=message,\n author=current_user._get_current_object(),\n to_user=message.author,\n reply_type=\"message\")\n db.session.add(reply)\n db.session.commit()\n timestamp = moment.create(reply.timestamp).format('YY-MM-DD HH:mm:ss')\n return render_template(\"_message_reply.html\", reply=reply, timestamp=timestamp)\n\n#回复留言中的回复\n@main.route('/user/message/reply-reply/', methods=['GET', 'POST'])\ndef message_replyto_reply(reply_id):\n reply = MessageReply.query.get_or_404(reply_id)\n message = reply.message\n to_user = reply.author\n if request.method == 'POST':\n newreply = MessageReply(body=request.form.get('body'),\n message=message,\n author=current_user._get_current_object(),\n to_user=to_user,\n reply_type=\"reply\")\n db.session.add(newreply)\n db.session.commit()\n timestamp = moment.create(newreply.timestamp).format('YY-MM-DD HH:mm:ss')\n return render_template(\"_message_reply.html\", reply=newreply, timestamp=timestamp)\n\n@main.route('/user/message//delete', methods=['POST'])\n@login_required\ndef delete_message(id):\n message = Message.query.get_or_404(id)\n if current_user != message.author and current_user != message.owner:\n return jsonify(message='请求错误!'), 403\n db.session.delete(message)\n db.session.commit()\n return jsonify(message='留言已删除!')\n\n@main.route('/user/message/reply//delete', methods=['POST'])\n@login_required\ndef delete_message_reply(id):\n reply = MessageReply.query.get_or_404(id)\n if current_user != reply.author and current_user != reply.message.owner:\n return jsonify(message='请求错误!'), 403\n db.session.delete(reply)\n db.session.commit()\n return jsonify(message='回复已删除!')\n\n#文章点赞\n@main.route('/post/praise/', methods=['GET', 'POST'])\ndef post_praise(id):\n post = Post.query.get_or_404(id)\n if not current_user.is_authenticated:\n return jsonify(message='请先登陆!'), 403\n if current_user.has_praised(post):\n return jsonify(message='你已经点过赞了!'), 400\n praise = Praise(post=post,\n user=current_user._get_current_object())\n db.session.add(praise)\n db.session.commit()\n return jsonify(message='已赞!')\n\n#文章取消赞\n@main.route('/post/cancel_praise/', methods=['GET', 'POST'])\ndef post_cancel_praise(id):\n post = Post.query.get_or_404(id)\n praise = Praise.query.filter(Praise.post == post, Praise.user == current_user).first()\n db.session.delete(praise)\n db.session.commit()\n return jsonify(message='已取消!')\n\n#更新文章点赞数量\n@main.route('/update-post-praise-counts/')\ndef update_post_praise_counts(id):\n post = Post.query.get_or_404(id)\n counts = post.praises.count()\n return jsonify( counts = counts )\n\n\n \n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"111510595","text":"from sys import settrace\nimport inspect\nimport pickle\nimport nbformat as nbf\nimport copy\nimport linecache\n\nnb = nbf.read(\"./astIPYNB.ipynb\", 4)\nstack = []\nwith open(\"dump.txt\", \"rb\") as fp:\n lineNo = pickle.load(fp)\nwith open(\"visited.txt\", \"rb\") as fp:\n visited = pickle.load(fp)\n\nsearchLinoNo = list(zip(*lineNo[::-1]))\nsearchLinoNo = [list(searchLinoNo[0]),list(searchLinoNo[1])]\nsearchLinoNo[0].reverse()\nsearchLinoNo[1].reverse()\nprint(searchLinoNo)\nexecTrace = []\n# print(nb['cells'][0])\n\n# stack.append(stack[-1])\ncurrline = 0\nhistory = []\ncurrent_variables = {}\n\n\ndef my_tracer(frame, event, arg=None):\n global currline\n global current_variables\n global stack\n global execTrace\n global visited\n whitespace = \" \"*4\n code = frame.f_code\n func_name = code.co_name\n currInd = 0\n cellNo = 0\n if func_name == 'encode' or func_name == 'main' or func_name[0] == \"<\":\n return\n line_no = frame.f_lineno-124\n if line_no in searchLinoNo[0]:\n currInd = searchLinoNo[0].index(line_no)\n # print(currInd)\n line = linecache.getline(\"combine.py\", line_no+124)\n # print(\"line %d: %s\" % (line_no, line))\n execTrace.append(currInd)\n # currInd = searchLinoNo[0].index(line_no)\n # print(\"currind \",currInd)\n else:\n return\n \n\n if event == 'call':\n # print(currInd)\n # print(\"call lineno\",line_no)\n call_entry = \"Enter a function \" + func_name + \" with arguments\"\n for j, k in frame.f_locals.items():\n call_entry += \" \" + str(j) + \" -> \" + str(k)\n cellNo = (currInd)*2\n # print(cellNo)\n # print(nb['cells'][cellNo]['source'])\n nb['cells'][cellNo]['source'] += call_entry + \"\\n\\n\"\n\n if event == 'line':\n new_variables = inspect.stack()[1][0].f_locals\n for var in new_variables:\n if var not in current_variables:\n text = \"Introduce a variable :- \" + \\\n var + \" = \" + str(new_variables[var])\n # if currLine == len(output):\n # output.append([line_no, text])\n # else:\n # output[currLine][1] = output[currLine][1] + \\\n # \" -> \" + str(new_variables[var])\n # print(currLine,output)\n cellNo = (execTrace[-2])*2\n # print(\"cellNo \" ,cellNo)\n # if cellNo not in visited:\n nb['cells'][cellNo]['source'] += whitespace*lineNo[execTrace[-2]][1] + text + \"\\n\\n\"\n # visited.append(cellNo)\n\n\n # nb['cells'].append(nbf.v4.new_markdown_cell(text))\n # nb['cells'].append(nbf.v4.new_code_cell())\n # currLine += 1\n # print(\"
\", \"
%s
\" % (var + \" = \" + str(new_variables[var]) + \" is introduced.\"),\"
\")\n\n else:\n if new_variables[var] != current_variables[var]:\n # print(\"var \", var)\n text = var + \" = \" + \\\n str(current_variables[var]) + \\\n \" -> \" + str(new_variables[var])\n cellNo = (execTrace[-2])*2\n # print(\"hello else\", currInd)\n # print(\"cellNo \" ,cellNo)\n # if cellNo not in visited:\n nb['cells'][cellNo]['source'] += whitespace*lineNo[execTrace[-2]][1] + text + \"\\n\\n\"\n # visited.append(cellNo)\n # if currLine == len(output):\n # output.append([line_no, text])\n # else:\n # output[currLine][1] = output[currLine][1] + \\\n # \" -> \" + str(new_variables[var])\n # nb['cells'].append(nbf.v4.new_markdown_cell(text))\n # nb['cells'].append(nbf.v4.new_code_cell())\n # currLine += 1\n # print(\"
\", \"
%s
\" % (var + \" = \" + str(current_variables[var]) + \" -> \" + str(new_variables[var])),\"
\")\n\n # curr_indent = 0\n # for c in curr_code:\n # if c == \" \":\n # curr_indent += 1\n # else:\n # break\n\n current_variables = copy.deepcopy(new_variables)\n stack.append({copy.deepcopy((execTrace[-2])*2):copy.deepcopy(current_variables)})\n\n return my_tracer\n\nsettrace(my_tracer)\ndef fun():\n arr = [int(x) for x in input().split()]\n print(arr)\n i = 1\n while i < len(arr):\n \n key = arr[i]\n \n j = i-1\n while j >= 0 and key < arr[j] :\n arr[j + 1] = arr[j]\n j -= 1\n arr[j + 1] = key\n i+=1\n \n print(arr)\n return 0\nfun()\n\nsettrace(None)\n# print(history)\nnbf.write(nb, 'result.ipynb')\nwith open(\"stack.txt\", \"wb\") as fp:\n print(\"dump\")\n pickle.dump(stack, fp)\n print(\"dumped\")\nwith open(\"visited.txt\", \"wb\") as fp:\n print(\"visited 1\",visited)\n pickle.dump(visited, fp)","sub_path":"flaskr/combine_modified.py","file_name":"combine_modified.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"223116410","text":"import dpath.util\nfrom requests import get\nimport speech_recognition as sr\nimport vk,os,sys,subprocess,wave,contextlib,math,psutil\nfrom time import *\nimport datetime\nimport urllib.request\nfrom pydub import AudioSegment\nlength = 25\n\ndef pozdr(name):\n return '''$name, с днем рождения тебя!!!\nЖелаю тебе творческой реализации, пусть в твоей жизни всегда будет радость и счастье!'''.replace('$name',name)\n\ndef a_length(input_file):\n with contextlib.closing(wave.open(input_file,'r')) as f:\n frames = f.getnframes()\n rate = f.getframerate()\n duration = frames / float(rate)\n return math.ceil(duration / length)\n\ndef cut_audio(input_file,num_pieces): \n for i in range(num_pieces):\n output = 'output/'+input_file.replace('.','('+str(i)+').')\n if i==num_pieces-1:\n s = \"ffmpeg -i \"+input_file+\" -ss \"+str(i * length)+\" -t \"+str(a_length(input_file)%length)+\" -acodec copy \"+output\n else:\n s = \"ffmpeg -i \"+input_file+\" -ss \"+str(i * length)+\" -t \"+str(length)+\" -acodec copy \"+output\n print(s)\n subprocess.call(s,shell=True)\n for proc in psutil.process_iter():\n # check whether the process name matches\n if proc.name() == 'ffmpeg.exe':\n proc.kill()\n\ndef download(url, file_name, wavname):\n r = get(url, allow_redirects=True)\n open(file_name, 'wb').write(r.content)\n sound = AudioSegment.from_mp3(file_name)\n sound.export(wavname, format=\"wav\")\n\ndef links(msg):\n result = []\n ttemp = ''.join(msg)\n while 'link_mp3' in ttemp:\n start = ttemp.index(\"link_mp3\")+12\n end = ttemp.index('.mp3')+4\n result.append(ttemp[start:end])\n ttemp = ttemp[end:]\n return result\n \n\ngroup = '62884807'\nlogin = '+79969127739'\npassword = '01qi1976'\nvk_id = '6658198'\nt1 = 'c02473bf25903bbb563e04e525a21556dfae97dc52702db07f196f1842977f944adeb0fdf3155226d07ed'\nsession = vk.AuthSession(access_token=t1)\nvk_api = vk.API(session,v='5.82')\n\ndef write(ids,message):\n vk_api.messages.send(user_id=ids,message=message)\n sleep(1)\n\ndef respond_audio(link,ans_id,vk_api):\n name = link.split('/')[-1]\n wavname = name.split('.')[0]+'.wav'\n download(link,name,wavname)\n r = sr.Recognizer()\n parts = a_length(wavname)\n print(wavname,parts)\n if parts==1: \n with sr.AudioFile(wavname) as source:\n audio = r.record(source) # read the entire audio file\n try:\n text = \"Я считаю, что сказали: \" + r.recognize_google(audio, language=\"ru_RU\")\n print(ans_id,text)\n vk_api.messages.send(user_id=ans_id, message=text)\n os.remove(name)\n os.remove(wavname)\n except:\n vk_api.messages.send(user_id=ans_id, message='Моя твоя не понимать')\n print(ans_id,'Моя твоя не понимать')\n os.remove(name)\n os.remove(wavname)\n else:\n text = \"Я считаю, что сказали:\"\n cut_audio(wavname,parts)\n for i in range(parts):\n with sr.AudioFile('output/'+wavname.replace('.','('+str(i)+').')) as source:\n audio = r.record(source) # read the entire audio file\n try:\n text += ' '+r.recognize_google(audio, language=\"ru_RU\")\n print(ans_id,text)\n except:\n text='Моя твоя не понимать'\n print(ans_id,'Моя твоя не понимать')\n os.remove('output/'+wavname.replace('.','('+str(i)+').'))\n os.remove(name)\n os.remove(wavname)\n vk_api.messages.send(user_id=ans_id, message=text)\n\na = True\nwhile a:\n sleep(5)\n try:\n for msg in vk_api.messages.getDialogs(count=10, unread=1)['items']:\n if '!др' in str(msg['message']['body']):\n vk_api.messages.send(user_id=msg['message']['user_id'], message=pozdr(msg['message']['body'][3:]))\n if 'fwd_messages' in str(msg) and 'link_mp3' in str(msg):\n ans_id = msg['message']['user_id']\n for i in links(str(msg)):\n respond_audio(i,ans_id,vk_api)\n \n except Exception as e:\n print(e,'restart')\n session = vk.AuthSession(access_token=t1)\n vk_api = vk.API(session, v='5.82')\n \n \n \n\n \n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"241534442","text":"import cv2 as cv\nimport numpy as np\nimport pandas\nfrom skimage.util import *\nimport pandas as pd\n\ndef apply_median_filter(img,window_size):\n w,l = img.shape\n result = np.zeros((w ,l) , np.uint8)\n hw = int(window_size / 2)\n\n for i in range(hw , w - hw):\n for j in range( hw,l - hw):\n result[i - hw][j - hw] = np.median(np.squeeze(np.asarray(img[i-hw:i+hw+1,j-hw:j+hw+1])))\n print(result)\n return result\n\ndef save_result(size , variance , img , n_img):\n path = \"result/3.2.1/var\" + str(variance) + \"_size\" + str(size) + \".jpg\"\n mse = np.square(n_img - img).mean()\n result.set_value(str(variance), str(size) , mse)\n cv.imwrite(path , n_img)\n\n\nimg = cv.imread(\"image/Lena.bmp\",cv.IMREAD_GRAYSCALE)\n\nnoise_var = np.array([0.05 , 0.1 , 0.2])\nwindow_sizes = np.array([3,5,7,9])\n\nresult = pd.DataFrame(index=['0.05' , '0.1' , '0.2'],columns=['nf' , '3' , '5' , '7' , '9'])\n\nfor noise in noise_var:\n noise_img = random_noise(img , mode=\"s&p\" , salt_vs_pepper = 0.1)\n noise_img = np.array(noise_img * 255 , np.uint8) \n save_result(0 , noise , img, noise_img)\n for win in window_sizes:\n n_img = apply_median_filter(noise_img,win)\n save_result(win , noise , img , n_img)\n\n\n\nresult.to_csv(\"result1.csv\")\nprint(result)\n","sub_path":"HW3/321.py","file_name":"321.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"503079782","text":"#!/usr/bin/env python2.7\n# \n\nimport os, sys\nfrom CrabTable import *\nfrom pprint import pprint\n\ndatatable_ascii = CrabTable(os.path.dirname(sys.argv[0])+os.sep+'CrabTableTest.ascii')\ndatatable_fits = CrabTable(os.path.dirname(sys.argv[0])+os.sep+'CrabTableTest.fits')\n\n\n#asciitable.write(datatable_ascii.TableData, sys.stdout, Writer=asciitable.FixedWidthTwoLine)\n#asciitable.write(datatable_fits.TableData, sys.stdout, Writer=asciitable.FixedWidthTwoLine)\npprint(datatable_ascii.TableData)\npprint(datatable_fits.TableData)\n\n\ndatatable_ascii.setCell(1,1,99.0)\ndatatable_fits.setCell(1,1,99.0)\n\ndatatable_ascii.setCell('str',3,'test')\ndatatable_fits.setCell('str',3,'test')\n\n\npprint(datatable_ascii.TableData)\npprint(datatable_fits.TableData)\n\n\ndatatable_ascii.saveAs(os.path.dirname(sys.argv[0])+os.sep+'CrabTableTest.ascii.2.ascii', overwrite=True)\ndatatable_fits.saveAs(os.path.dirname(sys.argv[0])+os.sep+'CrabTableTest.fits.2.fits', overwrite=True)\n\n\n\n\n\n","sub_path":"lib/crab/crabtable/CrabTableTest.py","file_name":"CrabTableTest.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"22084348","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport openmdao.api as om\n\n\ndef make_defect_convergence_animation():\n\n cr = om.CaseReader('min_time_climb_solution_gauss-lobatto.sql')\n\n # for key in cr.system_metadata['traj.phases.phase0']:\n # print(key)\n #\n # print(cr.system_metadata['traj.phases.phase0']['component_options']['transcription'])\n # exit(0)\n\n transcription = cr.system_metadata['traj.phases.phase0']['component_options']['transcription']\n gd = transcription.grid_data\n idxs_disc = gd.subset_node_indices['state_disc']\n idxs_col = gd.subset_node_indices['col']\n\n state = 'gam'\n\n for i, case_name in enumerate(cr.list_cases()):\n fig, ax = plt.subplots(1, 1, figsize=(8, 3.5))\n fig.suptitle('Convergence of state defects over iteration history')\n ax.set_xlim(-50, 450)\n ax.set_ylim(-20, 50)\n plt.text(350, 30, f'iteration: {i}')\n\n case = cr.get_case(case_name)\n case.list_outputs(out_stream=None)\n\n # Plot the high-density interpolated solution\n ax.plot(case.get_val('traj.phase0.timeseries2.time'),\n case.get_val(f'traj.phase0.timeseries2.states:{state}', units='deg'),\n color='lightgray',\n linestyle=':')\n\n # Plot the discretization nodes\n ax.plot(case.get_val('traj.phase0.timeseries.time')[idxs_disc, ...],\n case.get_val(f'traj.phase0.timeseries.states:{state}', units='deg')[idxs_disc, ...], 'ko')\n\n # Plot the collocation nodes\n ax.plot(case.get_val('traj.phase0.timeseries.time')[idxs_col, ...],\n case.get_val(f'traj.phase0.timeseries.states:{state}', units='deg')[idxs_col, ...], 'k^')\n\n # Plot the evaluated state rates\n dgam_dt = case.get_val(f'traj.phase0.timeseries.state_rates:{state}', units='deg/s')[idxs_col, ...].ravel()\n\n dt = np.ones_like(dgam_dt)\n s = 3\n angles = 'xy'\n units='inches'\n scale_units='inches'\n w = 0.03\n\n ax.quiver(case.get_val('traj.phase0.timeseries.time')[idxs_col, ...],\n case.get_val(f'traj.phase0.timeseries.states:{state}', units='deg')[idxs_col, ...],\n dt, dgam_dt,\n units=units, angles=angles, scale=s,\n scale_units=scale_units, color='r', width=w)\n\n # Plot the interpolated state rates\n dgam_dt = case.get_val(f'traj.phase0.state_interp.staterate_col:{state}', units='deg/s').ravel()\n\n ax.quiver(case.get_val('traj.phase0.timeseries.time')[idxs_col, ...],\n case.get_val(f'traj.phase0.timeseries.states:{state}', units='deg')[idxs_col, ...],\n dt, dgam_dt,\n units=units, angles=angles, scale=s,\n scale_units=scale_units, color='b', width=w)\n\n plt.savefig(f'frames/frame_{i:02d}.pdf')\n\n\n # case.list_outputs()\n #\n #\n # fig, ax = plt.subplots(1, 1)\n\n\n\nif __name__ == '__main__':\n make_defect_convergence_animation()\n","sub_path":"slides/SourceCodes/dymos_animations/defect_convergence_lgl/dymos_defect_convergence_lgl.py","file_name":"dymos_defect_convergence_lgl.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"28713334","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 18 14:23:27 2017\n\n@author: Rebecca\n\"\"\"\n\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD,RMSprop,Adam\nfrom NNet import NNet\nfrom importarVariavel import importarDados\nfrom normalizarCaracteristica import normalizarCaracteristica\nfrom confusion_matrix import confusion_matrix\n\n#Importing data\nfilepath = r\"C:\\Users\\Rebecca\\Desktop\\credtrain.txt\"\nX,Y = importarDados(filepath,['ESTC', 'NDEP', 'RENDA', 'TIPOR', 'VBEM', 'NPARC',\n 'VPARC', 'TEL', 'IDADE', 'RESMS', 'ENTRADA', 'CLASSE'])\nfilepath = r\"C:\\Users\\Rebecca\\Desktop\\credtest.txt\"\nX_test,Y_test = importarDados(filepath,['ESTC', 'NDEP', 'RENDA', 'TIPOR', 'VBEM', 'NPARC',\n 'VPARC', 'TEL', 'IDADE', 'RESMS', 'ENTRADA', 'CLASSE'])\n\n#Normalization of input data\nX = normalizarCaracteristica(X)[0]\nX_test = normalizarCaracteristica(X_test)[0]\n\n#Set parameters\ninput_shape=(11,)\nlayers=[Dense(5,activation='sigmoid',input_shape=input_shape),\n Dense(1,activation='sigmoid')]\nloss='mean_squared_error'\noptimizer=SGD(lr=0.1) #Test\nmetrics=['accuracy']\nepochs=250\nbatch_size=100\nvalidation_data=(X_test, Y_test)\nshuffle=True\nplot=True\n\nmodel,history = NNet(X,Y,layers=layers,loss=loss,optimizer=optimizer,\n metrics=metrics,epochs=epochs,batch_size=batch_size, verbose=2,\n validation_data=validation_data,shuffle=shuffle, plot=plot)\nmodel.summary()\n\nscore = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nprint('Confusion matrix:\\n', str(confusion_matrix(Y_test, model.predict(X_test).round())))","sub_path":"Aprendizado_de_Maquina/Trabalho_3/Trabalho 3/DenseNet_1.2.py","file_name":"DenseNet_1.2.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"518348218","text":"'''NESTE EXERCÍCIO ALÉM DE USARMOS O FOR USAMOS TB A CRIANÇÃO DE UMA FUNÇÃO CONTADORA'''\nfrom time import sleep\nlista = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\ndef contadora_2(x):\n for i in lista:\n print(i)\n sleep(1)\n\n\ncontadora_2(lista)\nprint('\\033[1;35m'+'boommmmmm'.upper()+'\\033[m')\nsleep(1)\nprint('\\033[1;34m'+'boommmmmmmmmmmm'.upper()+'\\033[m')","sub_path":"Curso_em_video_mundo_2/Exercícios_mundo_2/Exercicio_46_for_e_CRIANDO_FUNCOES.py","file_name":"Exercicio_46_for_e_CRIANDO_FUNCOES.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"346787718","text":"__author__ = 'aleksandr'\n\nfrom django.conf import settings\nfrom django.db.models.query import EmptyQuerySet\nfrom django.contrib import admin\nfrom django.contrib.auth.models import Group\nfrom main.models import Player\n\n\nclass PlayerAdmin(admin.ModelAdmin):\n list_display = ('__unicode__', 'rating')\n list_filter = ('rating',)\n\n def get_form(self, request, obj=None, **kwargs):\n '''\n This method override PlayerModelForm.base_fields['user'].queryset to all\n no-players users from 'Players' group\n '''\n qs = EmptyQuerySet\n try:\n players = Player.objects.all()\n if obj:\n players = players.exclude(pk=obj.pk)\n qs = Group.objects.get(name=settings.PLAYERS_GROUP_NAME).user_set.exclude(player__in=players)\n except Group.DoesNotExist:\n pass\n mfc = super(PlayerAdmin, self).get_form(request, obj, **kwargs)\n mfc.base_fields['user'].queryset = qs\n return mfc\n\n def get_changelist_formset(self, request, **kwargs):\n return super(PlayerAdmin, self).get_changelist_formset(request, **kwargs)","sub_path":"main/admin/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"210468666","text":"import datetime\n\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\ndef load_data(fileName : str):\n #read .xyz datafile into pandas data frame\n dataFrame = pd.read_csv(fileName, sep='\\s+',header=[18])\n assert dataFrame.columns[1] == 'TIMESTAMP', \"Header is wrong. Location 1 yield: \" + str(dataFrame.columns[1])\n #correct header names\n #first column is a \\ due to data file. Thus labels are shifted one back. Column 179 is empty and thus deleted\n header = dataFrame.columns.drop('/')\n dataFrame.drop(\"DBDT_INUSE_Ch2GT33\", axis=1, inplace=True)\n dataFrame.columns = header\n #assert that there are no NAN\n assert not dataFrame.isnull().values.any(), \"NAN value in data: \" + str(dataFrame.isnull().sum().sum())\n return dataFrame\n\ndef load_data2(fileName : str, gF : int, gT : int):\n dataFrame = load_data(fileName)\n gtimes = pd.read_csv(fileName, sep='\\s+', skiprows=17, nrows=1, header=None)\n gtimes = gtimes.iloc[:, gF:gT+1].values\n gFrom = 'DBDT_Ch2GT' + str(gF)\n gTo = 'DBDT_Ch2GT' + str(gT)\n lblFrom = 'DBDT_INUSE_Ch2GT' + str(gF)\n lblTo = 'DBDT_INUSE_Ch2GT' + str(gT)\n dbdt = dataFrame.loc[:,gFrom:gTo].values\n lbl = dataFrame.loc[:,lblFrom:lblTo].values\n dumdbdt = dbdt == 99999\n dumlbl = lbl == 99999\n assert not dumdbdt.any().any(), 'Dummy values in DBDT present: ' + str(dumdbdt.any().sum().sum())\n assert not dumlbl.any().any(), 'Dummy values in label present: ' + str(dumlbl.any().sum().sum())\n assert (lbl.T == lbl[:, 0]).any(), 'Labels are not equal'\n lbl = lbl[:, 0]\n return dataFrame, dbdt, lbl, dataFrame.loc[:, 'TIMESTAMP'].values, gtimes\n\n#remove soundings around edges\n#notice if first hole is before cutoff edge we have a problem.. and last\ndef remove_edge(timestamp, dbdt, lbl, nremove):\n timestampOG = timestamp\n from sklearn.preprocessing import MinMaxScaler\n mm = MinMaxScaler()\n timestamp = np.insert(timestamp, 0, timestamp[0])\n timestamp = np.reshape(timestamp, (timestamp.shape[0], 1))\n t_diff = np.diff(timestamp, axis=0)\n t_diff = mm.fit_transform(t_diff)\n # sns.distplot(t_diff, 1000)\n t_diff = np.where(t_diff > 0.011) #0.011)\n for i, idx in enumerate(t_diff[0]):\n b = nremove\n dbdt[idx-b : idx + b, :] = np.nan\n lbl[idx-b : idx + b] = np.nan\n timestampOG[idx-b : idx + b] = np.nan\n #super weird, I dunno why i cant just do as with the array...\n lbl = lbl[lbl >= 0]\n timestampOG = timestampOG[timestampOG >= 0]\n return dbdt[~np.isnan(dbdt).any(axis=1)], lbl, timestampOG\n\n\ndef timestampToTime(timestamp):\n b = timestamp - timestamp[0]\n customdate = datetime.datetime(2016, 1, 1, 13, 30)\n return [customdate + datetime.timedelta(days=t) for t in b]\n","sub_path":"utilities/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"311529079","text":"import os\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport cv2\nimport colorsys\nimport random\n\nfrom keras.layers import Input, Conv2D, BatchNormalization, LeakyReLU, MaxPooling2D, Lambda\nfrom keras.regularizers import l2\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom PIL import Image, ImageDraw, ImageFont\n\n\nYOLO_ANCHORS = np.array(\n ((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),\n (7.88282, 3.52778), (9.77052, 9.16828)))\n\nDATA_PATH = os.path.expanduser('~/Downloads/object-detection-crowdai')\n\n\ndef yolo_head(feats, anchors, num_classes):\n num_anchors = len(anchors)\n\n # Reshape to batch, height, width, num_anchors, box_params.\n anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])\n\n # Dynamic implementation of conv dims for fully convolutional model.\n conv_dims = K.shape(feats)[1:3] # assuming channels last\n # In YOLO the height index is the inner most iteration.\n conv_height_index = K.arange(0, stop=conv_dims[0])\n conv_width_index = K.arange(0, stop=conv_dims[1])\n conv_height_index = K.tile(conv_height_index, [conv_dims[1]])\n\n # TODO: Repeat_elements and tf.split doesn't support dynamic splits.\n # conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)\n conv_width_index = K.tile(\n K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])\n conv_width_index = K.flatten(K.transpose(conv_width_index))\n conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))\n conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])\n conv_index = K.cast(conv_index, K.dtype(feats))\n\n feats = K.reshape(\n feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])\n conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))\n\n box_xy = K.sigmoid(feats[..., :2])\n box_wh = K.exp(feats[..., 2:4])\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.softmax(feats[..., 5:])\n\n box_xy = (box_xy + conv_index) / conv_dims\n box_wh = box_wh * anchors_tensor / conv_dims\n\n return box_xy, box_wh, box_confidence, box_class_probs\n\n\ndef yolo_loss(args, anchors, num_classes, rescore_confidence=False, print_loss=False):\n (yolo_output, true_boxes, detectors_mask, matching_true_boxes) = args\n\n num_anchors = len(anchors)\n object_scale = 5\n no_object_scale = 1\n class_scale = 1\n coordinates_scale = 1\n pred_xy, pred_wh, pred_confidence, pred_class_prob = yolo_head(\n yolo_output, anchors, num_classes)\n\n # Unadjusted box predictions for loss.\n # TODO: Remove extra computation shared with yolo_head.\n yolo_output_shape = K.shape(yolo_output)\n feats = K.reshape(yolo_output, [\n -1, yolo_output_shape[1], yolo_output_shape[2], num_anchors,\n num_classes + 5\n ])\n pred_boxes = K.concatenate(\n (K.sigmoid(feats[..., 0:2]), feats[..., 2:4]), axis=-1)\n\n # TODO: Adjust predictions by image width/height for non-square images?\n # IOUs may be off due to different aspect ratio.\n\n # Expand pred x,y,w,h to allow comparison with ground truth.\n # batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params\n pred_xy = K.expand_dims(pred_xy, 4)\n pred_wh = K.expand_dims(pred_wh, 4)\n\n pred_wh_half = pred_wh / 2.\n pred_mins = pred_xy - pred_wh_half\n pred_maxes = pred_xy + pred_wh_half\n\n true_boxes_shape = K.shape(true_boxes)\n\n # batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params\n true_boxes = K.reshape(true_boxes, [\n true_boxes_shape[0], 1, 1, 1, true_boxes_shape[1], true_boxes_shape[2]\n ])\n true_xy = true_boxes[..., 0:2]\n true_wh = true_boxes[..., 2:4]\n\n # Find IOU of each predicted box with each ground truth box.\n true_wh_half = true_wh / 2.\n true_mins = true_xy - true_wh_half\n true_maxes = true_xy + true_wh_half\n\n intersect_mins = K.maximum(pred_mins, true_mins)\n intersect_maxes = K.minimum(pred_maxes, true_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]\n\n pred_areas = pred_wh[..., 0] * pred_wh[..., 1]\n true_areas = true_wh[..., 0] * true_wh[..., 1]\n\n union_areas = pred_areas + true_areas - intersect_areas\n iou_scores = intersect_areas / union_areas\n\n # Best IOUs for each location.\n best_ious = K.max(iou_scores, axis=4) # Best IOU scores.\n best_ious = K.expand_dims(best_ious)\n\n # A detector has found an object if IOU > thresh for some true box.\n object_detections = K.cast(best_ious > 0.6, K.dtype(best_ious))\n\n # TODO: Darknet region training includes extra coordinate loss for early\n # training steps to encourage predictions to match anchor priors.\n\n # Determine confidence weights from object and no_object weights.\n # NOTE: YOLO does not use binary cross-entropy here.\n no_object_weights = (no_object_scale * (1 - object_detections) *\n (1 - detectors_mask))\n no_objects_loss = no_object_weights * K.square(-pred_confidence)\n\n if rescore_confidence:\n objects_loss = (object_scale * detectors_mask *\n K.square(best_ious - pred_confidence))\n else:\n objects_loss = (object_scale * detectors_mask *\n K.square(1 - pred_confidence))\n confidence_loss = objects_loss + no_objects_loss\n\n # Classification loss for matching detections.\n # NOTE: YOLO does not use categorical cross-entropy loss here.\n matching_classes = K.cast(matching_true_boxes[..., 4], 'int32')\n matching_classes = K.one_hot(matching_classes, num_classes)\n classification_loss = (class_scale * detectors_mask *\n K.square(matching_classes - pred_class_prob))\n\n # Coordinate loss for matching detection boxes.\n matching_boxes = matching_true_boxes[..., 0:4]\n coordinates_loss = (coordinates_scale * detectors_mask *\n K.square(matching_boxes - pred_boxes))\n\n confidence_loss_sum = K.sum(confidence_loss)\n classification_loss_sum = K.sum(classification_loss)\n coordinates_loss_sum = K.sum(coordinates_loss)\n total_loss = 0.5 * (\n confidence_loss_sum + classification_loss_sum + coordinates_loss_sum)\n if print_loss:\n total_loss = tf.Print(\n total_loss, [\n total_loss, confidence_loss_sum, classification_loss_sum,\n coordinates_loss_sum\n ],\n message='yolo_loss, conf_loss, class_loss, box_coord_loss:')\n\n return total_loss\n\n\ndef create_model(class_names, prediction=False):\n n_classes = len(class_names)\n n_anchors = 5\n\n detectors_mask_shape = (13, 13, 5, 1)\n matching_boxes_shape = (13, 13, 5, 5)\n\n # Create model input layers.\n image_input = Input(shape=(416, 416, 3))\n boxes_input = Input(shape=(None, 5))\n detectors_mask_input = Input(shape=detectors_mask_shape)\n matching_boxes_input = Input(shape=matching_boxes_shape)\n\n # Conv1\n x = Conv2D(16, (3, 3), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(image_input)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n x = MaxPooling2D()(x)\n\n # Conv2\n x = Conv2D(32, (3, 3), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n x = MaxPooling2D()(x)\n\n # Conv3\n x = Conv2D(64, (3, 3), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n x = MaxPooling2D()(x)\n\n # Conv4\n x = Conv2D(128, (3, 3), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n x = MaxPooling2D()(x)\n\n # Conv5\n x = Conv2D(256, (3, 3), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n x = MaxPooling2D()(x)\n\n # Conv6\n x = Conv2D(512, (3, 3), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n x = MaxPooling2D(padding='same', pool_size=2, strides=(1, 1))(x)\n\n # Conv7\n x = Conv2D(1024, (3, 3), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n # Conv8\n x = Conv2D(1024, (3, 3), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n # Conv9\n x = Conv2D(n_anchors * (n_classes + 5), (1, 1), padding='same', kernel_regularizer=l2(5e-4), use_bias=False)(x)\n\n model_body = Model(image_input, x)\n\n if not prediction:\n model_loss = Lambda(\n yolo_loss,\n output_shape=(1, ),\n name='yolo_loss',\n arguments={'anchors': YOLO_ANCHORS,\n 'num_classes': len(class_names)})([\n model_body.output, boxes_input,\n detectors_mask_input, matching_boxes_input\n ])\n\n model = Model(\n [model_body.input, boxes_input, detectors_mask_input,\n matching_boxes_input], model_loss)\n else:\n model = model_body\n\n print(model.summary())\n\n return model_body, model\n\n\ndef get_detector_mask(boxes, anchors):\n '''\n Precompute detectors_mask and matching_true_boxes for training.\n Detectors mask is 1 for each spatial position in the final conv layer and\n anchor that should be active for the given boxes and 0 otherwise.\n Matching true boxes gives the regression targets for the ground truth box\n that caused a detector to be active or 0 otherwise.\n '''\n detectors_mask = [0 for i in range(len(boxes))]\n matching_true_boxes = [0 for i in range(len(boxes))]\n for i, box in enumerate(boxes):\n detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box[None,...][0], anchors, [416, 416])\n\n return np.array(detectors_mask), np.array(matching_true_boxes)\n\n\ndef preprocess_true_boxes(true_boxes, anchors, image_size):\n \"\"\"Find detector in YOLO where ground truth box should appear.\n\n Parameters\n ----------\n true_boxes : array\n List of ground truth boxes in form of relative x, y, w, h, class.\n Relative coordinates are in the range [0, 1] indicating a percentage\n of the original image dimensions.\n anchors : array\n List of anchors in form of w, h.\n Anchors are assumed to be in the range [0, conv_size] where conv_size\n is the spatial dimension of the final convolutional features.\n image_size : array-like\n List of image dimensions in form of h, w in pixels.\n\n Returns\n -------\n detectors_mask : array\n 0/1 mask for detectors in [conv_height, conv_width, num_anchors, 1]\n that should be compared with a matching ground truth box.\n matching_true_boxes: array\n Same shape as detectors_mask with the corresponding ground truth box\n adjusted for comparison with predicted parameters at training time.\n \"\"\"\n height, width = image_size\n num_anchors = len(anchors)\n # Downsampling factor of 5x 2-stride max_pools == 32.\n # TODO: Remove hardcoding of downscaling calculations.\n assert height % 32 == 0, 'Image sizes in YOLO_v2 must be multiples of 32.'\n assert width % 32 == 0, 'Image sizes in YOLO_v2 must be multiples of 32.'\n conv_height = height // 32\n conv_width = width // 32\n num_box_params = true_boxes.shape[1]\n detectors_mask = np.zeros(\n (conv_height, conv_width, num_anchors, 1), dtype=np.float32)\n matching_true_boxes = np.zeros(\n (conv_height, conv_width, num_anchors, num_box_params),\n dtype=np.float32)\n\n for box in true_boxes:\n # scale box to convolutional feature spatial dimensions\n box_class = box[4:5]\n box = box[0:4] * np.array([conv_width, conv_height, conv_width, conv_height])\n i = np.floor(box[1]).astype('int')\n j = np.floor(box[0]).astype('int')\n best_iou = 0\n best_anchor = 0\n for k, anchor in enumerate(anchors):\n # Find IOU between box shifted to origin and anchor box.\n box_maxes = box[2:4] / 2.\n box_mins = -box_maxes\n anchor_maxes = (anchor / 2.)\n anchor_mins = -anchor_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[0] * intersect_wh[1]\n box_area = box[2] * box[3]\n anchor_area = anchor[0] * anchor[1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n if iou > best_iou:\n best_iou = iou\n best_anchor = k\n\n if best_iou > 0:\n detectors_mask[i, j, best_anchor] = 1\n adjusted_box = np.array(\n [\n box[0] - j, box[1] - i,\n np.log(box[2] / anchors[best_anchor][0]),\n np.log(box[3] / anchors[best_anchor][1]), box_class\n ],\n dtype=np.float32)\n matching_true_boxes[i, j, best_anchor] = adjusted_box\n return detectors_mask, matching_true_boxes\n\n\nclass DataGenerator(object):\n def __init__(self, anchors, dim_x=416, dim_y=416, batch_size=32, shuffle=True):\n 'Initialization'\n self.anchors = anchors\n self.dim_x = dim_x\n self.dim_y = dim_y\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n def generate(self, data):\n 'Generates batches of samples'\n # Infinite loop\n files = data.Frame.unique()\n while 1:\n # Generate batches\n imax = int(len(files) / self.batch_size)\n for i in range(imax):\n # Find list of IDs\n batch_files = list(files[i * self.batch_size:(i + 1) * self.batch_size])\n batch = data[data.Frame.isin(batch_files)]\n\n # Generate data\n images, boxes, detectors_mask, matching_true_boxes = self.__data_generation(batch)\n\n yield [images, boxes, detectors_mask, matching_true_boxes], np.zeros(self.batch_size)\n\n def __read_image(self, filename):\n img = cv2.imread(filename)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (416, 416))\n return img.astype(np.float32) / 255.\n\n def __data_generation(self, batch):\n files = batch.Frame.unique()\n image_files = [os.path.join(DATA_PATH, f) for f in files]\n images = [self.__read_image(f) for f in image_files]\n\n boxes = []\n max_boxz = 0\n for f in files:\n rows = batch[batch.Frame == f]\n frame_boxes = []\n for _, row in rows.iterrows():\n frame_boxes.append(row[['x', 'y', 'w', 'h', 'c']].as_matrix())\n max_boxz = max(max_boxz, len(frame_boxes))\n boxes.append(np.array(frame_boxes))\n\n for i, box in enumerate(boxes):\n if len(box) < max_boxz:\n zero_padding = np.zeros((max_boxz - len(box), 5))\n boxes[i] = np.vstack((box, zero_padding))\n\n boxes = np.array(boxes)\n detectors_mask, matching_true_boxes = get_detector_mask(boxes, self.anchors)\n\n # print((boxes.shape, detectors_mask.shape, matching_true_boxes.shape))\n\n return np.array(images), boxes, detectors_mask, matching_true_boxes\n\n\ndef yolo_boxes_to_corners(box_xy, box_wh):\n \"\"\"Convert YOLO box predictions to bounding box corners.\"\"\"\n box_mins = box_xy - (box_wh / 2.)\n box_maxes = box_xy + (box_wh / 2.)\n\n return K.concatenate([\n box_mins[..., 1:2], # y_min\n box_mins[..., 0:1], # x_min\n box_maxes[..., 1:2], # y_max\n box_maxes[..., 0:1] # x_max\n ])\n\ndef yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):\n \"\"\"Filter YOLO boxes based on object and class confidence.\"\"\"\n box_scores = box_confidence * box_class_probs\n box_classes = K.argmax(box_scores, axis=-1)\n box_class_scores = K.max(box_scores, axis=-1)\n prediction_mask = box_class_scores >= threshold\n\n # TODO: Expose tf.boolean_mask to Keras backend?\n boxes = tf.boolean_mask(boxes, prediction_mask)\n scores = tf.boolean_mask(box_class_scores, prediction_mask)\n classes = tf.boolean_mask(box_classes, prediction_mask)\n return boxes, scores, classes\n\n\ndef yolo_eval(yolo_outputs,\n image_shape,\n max_boxes=10,\n score_threshold=.6,\n iou_threshold=.5):\n \"\"\"Evaluate YOLO model on given input batch and return filtered boxes.\"\"\"\n box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs\n boxes = yolo_boxes_to_corners(box_xy, box_wh)\n boxes, scores, classes = yolo_filter_boxes(\n boxes, box_confidence, box_class_probs, threshold=score_threshold)\n\n # Scale boxes back to original image shape.\n height = image_shape[0]\n width = image_shape[1]\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n\n # TODO: Something must be done about this ugly hack!\n max_boxes_tensor = K.variable(max_boxes, dtype='int32')\n K.get_session().run(tf.variables_initializer([max_boxes_tensor]))\n nms_index = tf.image.non_max_suppression(\n boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)\n boxes = K.gather(boxes, nms_index)\n scores = K.gather(scores, nms_index)\n classes = K.gather(classes, nms_index)\n return boxes, scores, classes\n\n\ndef _eval():\n model_path = 'trained_stage_3_best.h5'\n anchor_path = 'tiny-yolo_anchors.txt'\n # Load anchors\n with open(anchor_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n anchors = np.array(anchors).reshape(-1, 2)\n\n data = pd.read_csv(os.path.join(DATA_PATH, 'labels2.csv'))\n class_names = data.Label.unique()\n\n lenc = LabelEncoder()\n data['c'] = lenc.fit_transform(data.Label)\n\n hsv_tuples = [(x / len(class_names), 1., 1.)\n for x in range(len(class_names))]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n colors))\n random.seed(10101) # Fixed seed for consistent colors across runs.\n random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.\n random.seed(None) # Reset seed to default.\n\n print('Creating model...')\n\n model_body, yolo_model = create_model(lenc.classes_, prediction=True)\n yolo_model.load_weights(model_path)\n\n # image_file = '/Users/meshams/Documents/Learn/temp/YAD2K/images/test1.jpg'\n image_file = '/Users/meshams/Downloads/object-detection-crowdai/1479498392966162658.jpg'\n\n image = cv2.imread(image_file)\n\n height, width = image.shape[0:2]\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (416, 416))\n image = image.astype(np.float32) / 255.\n image = image[np.newaxis,:]\n\n sess = K.get_session()\n\n model_image_size = yolo_model.layers[0].input_shape[1:3]\n\n yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))\n\n input_image_shape = K.placeholder(shape=(2,))\n\n boxes, scores, classes = yolo_eval(\n yolo_outputs,\n input_image_shape,\n score_threshold=0.7,\n iou_threshold=0.5)\n\n out_boxes, out_scores, out_classes = sess.run(\n [boxes, scores, classes],\n feed_dict={\n yolo_model.input: image,\n input_image_shape: [height, width],\n K.learning_phase(): 0\n })\n print('Found {} boxes for {}'.format(len(out_boxes), image_file))\n\n font = ImageFont.truetype(\n font='font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * height + 0.5).astype('int32'))\n thickness = (width + height) // 300\n\n image_orig = Image.open(image_file)\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n\n draw = ImageDraw.Draw(image_orig)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(height, np.floor(bottom + 0.5).astype('int32'))\n right = min(width, np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n image_orig.save(os.path.split(image_file)[-1], quality=90)\n\n sess.close()\n\n\n\ndef _main():\n # model_path = '/Users/meshams/Documents/Learn/temp/YAD2K/model_data/tiny-yolo.h5'\n anchor_path = 'tiny-yolo_anchors.txt'\n\n data = pd.read_csv(os.path.join(DATA_PATH, 'labels2.csv'))\n class_names = data.Label.unique()\n\n lenc = LabelEncoder()\n data['c'] = lenc.fit_transform(data.Label)\n\n data = data.sample(frac=0.001, random_state=1000)\n\n print(len(data))\n\n mask = np.random.rand(len(data)) < 0.9\n\n train_data = data[mask]\n valid_data = data[~mask]\n\n # Load anchors\n with open(anchor_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n anchors = np.array(anchors).reshape(-1, 2)\n\n print('Creating model...')\n\n model_body, model = create_model(lenc.classes_)\n\n model.compile(optimizer='adam', loss={\n 'yolo_loss': lambda y_true, y_pred: y_pred\n })\n\n batch_size = 8\n\n train_generator = DataGenerator(anchors, batch_size=batch_size).generate(train_data)\n valid_generator = DataGenerator(anchors, batch_size=batch_size).generate(valid_data)\n\n logging = TensorBoard()\n checkpoint = ModelCheckpoint(\"train.{epoch:02d}-{val_loss:.2f}.h5\", monitor='val_loss',\n save_weights_only=False, save_best_only=True)\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')\n\n model.fit_generator(generator=train_generator, steps_per_epoch=len(train_data) // batch_size,\n validation_data=valid_generator, validation_steps=len(valid_data) // batch_size,\n callbacks=[logging], epochs=10)\n\n model.save_weights('trained_stage_1.h5')\n\n model_body, model = create_model(lenc.classes_)\n\n model.load_weights('trained_stage_1.h5')\n\n model.compile(optimizer='adam', loss={\n 'yolo_loss': lambda y_true, y_pred: y_pred\n })\n\n model.fit_generator(generator=train_generator, steps_per_epoch=len(train_data) // batch_size,\n validation_data=valid_generator, validation_steps=len(valid_data) // batch_size,\n callbacks=[logging, checkpoint, early_stopping], epochs=30)\n\n\nif __name__ == \"__main__\":\n # _main()\n _eval()\n","sub_path":"train_yolo.py","file_name":"train_yolo.py","file_ext":"py","file_size_in_byte":24034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"95897472","text":"#!/usr/bin/env python\n\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tensorflow as tf\nimport featurizer\nimport metadata\n\n\n# ******************************************************************************\n# YOU MAY MODIFY THESE FUNCTIONS TO CONFIGURE THE CANNED ESTIMATOR\n# ******************************************************************************\n\n\ndef create(args, config):\n \"\"\" Create a DNNLinearCombinedEstimator based on metadata.TASK_TYPE\n\n Args:\n args: experiment parameters.\n config: tf.RunConfig object.\n Returns\n DNNLinearCombinedClassifier or DNNLinearCombinedRegressor\n \"\"\"\n\n wide_columns, deep_columns = featurizer.create_wide_and_deep_columns(args)\n logging.info('Wide columns: {}'.format(wide_columns))\n logging.info('Deep columns: {}'.format(deep_columns))\n\n # Change the optimisers for the wide and deep parts of the model if you wish\n linear_optimizer = tf.train.FtrlOptimizer(learning_rate=args.learning_rate)\n # Use _update_optimizer to implement an adaptive learning rate\n dnn_optimizer = lambda: _update_optimizer(args)\n\n if metadata.TASK_TYPE == 'classification':\n estimator = tf.estimator.DNNLinearCombinedClassifier(\n n_classes=len(metadata.TARGET_LABELS),\n label_vocabulary=metadata.TARGET_LABELS,\n linear_optimizer=linear_optimizer,\n linear_feature_columns=wide_columns,\n dnn_feature_columns=deep_columns,\n dnn_optimizer=dnn_optimizer,\n weight_column=metadata.WEIGHT_COLUMN_NAME,\n dnn_hidden_units=_construct_hidden_units(args),\n dnn_activation_fn=tf.nn.relu,\n dnn_dropout=args.dropout_prob,\n batch_norm=True,\n config=config,\n )\n else:\n estimator = tf.estimator.DNNLinearCombinedRegressor(\n linear_optimizer=linear_optimizer,\n linear_feature_columns=wide_columns,\n dnn_feature_columns=deep_columns,\n dnn_optimizer=dnn_optimizer,\n weight_column=metadata.WEIGHT_COLUMN_NAME,\n dnn_hidden_units=_construct_hidden_units(args),\n dnn_activation_fn=tf.nn.relu,\n dnn_dropout=args.dropout_prob,\n batch_norm=True,\n config=config,\n )\n\n return estimator\n\n# ******************************************************************************\n# YOU NEED NOT TO CHANGE THESE HELPER FUNCTIONS\n# ******************************************************************************\n\n\ndef _construct_hidden_units(args):\n \"\"\" Create the number of hidden units in each layer\n\n if the args.layer_sizes_scale_factor > 0 then it will use a \"decay\" mechanism\n to define the number of units in each layer. Otherwise, arg.hidden_units\n will be used as-is.\n\n Returns:\n list of int\n \"\"\"\n hidden_units = [int(units) for units in args.hidden_units.split(',')]\n\n if args.layer_sizes_scale_factor > 0:\n first_layer_size = hidden_units[0]\n scale_factor = args.layer_sizes_scale_factor\n num_layers = args.num_layers\n\n hidden_units = [\n max(2, int(first_layer_size * scale_factor ** i))\n for i in range(num_layers)\n ]\n\n logging.info(\"Hidden units structure: {}\".format(hidden_units))\n\n return hidden_units\n\n\ndef _update_optimizer(args):\n \"\"\"Create an optimizer with an update learning rate.\n Arg:\n args: experiment parameters\n Returns:\n Optimizer\n \"\"\"\n\n # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)\n # See: https://www.tensorflow.org/api_docs/python/tf/train/exponential_decay\n learning_rate = tf.train.exponential_decay(\n learning_rate=args.learning_rate,\n global_step=tf.train.get_global_step(),\n decay_steps=args.train_steps,\n decay_rate=args.learning_rate_decay_factor\n )\n\n tf.summary.scalar('learning_rate', learning_rate)\n\n # By default, AdamOptimizer is used. You can change the type of the optimizer.\n return tf.train.AdamOptimizer(learning_rate=learning_rate)\n\n","sub_path":"cloudml-template/examples/classification/census/trainer/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"249726153","text":"import numpy as np\nimport pandas as pd\n\nimport basicDeltaOperations as op\nimport calcIsotopologues as ci\n\n'''\nThis code extracts the concentrations of isotopologues of interest from the dictionary of all isotopologues \nin order to predict the outcomes of meaurements. It also allows one to fragment the isotopologues to compute the \noutcome of fragment measurements. \n\nIt assumes one has access to a dictionary with information about the isotopologues. See calcIsotopologues.py. \n'''\n\n#Gives an easy way to recover an isotope from an element and its cardinal mass representation. \nsubDict = {'C':{'0':'','1':'13C'},\n 'N':{'0':'','1':'15N'},\n 'H':{'0':'','1':'D'},\n 'O':{'0':'','1':'17O','2':'18O'},\n 'S':{'0':'','1':'33S','2':'34S','4':'36S'}}\n\n#An easy way to recover the mass of an isotope from an element and its cardinal mass representation\nmassDict = {'C':{'0':12,'1':13.00335484},\n 'N':{'0':14.003074,'1':15.00010889},\n 'H':{'0':1.007825032,'1':2.014101778},\n 'O':{'0':15.99491462,'1':16.99913175,'2':17.9991596},\n 'S':{'0':31.97207117,'1':32.9714589,'2':33.96786701,'4':35.9670807}}\n\ndef UValueMeasurement(bySub, allMeasurementInfo, massThreshold = 3, subList = []):\n '''\n Simulates measurements with no fragmentation. Extracts the concentration of all isotopologues with mass below some threshold for easy reference. \n \n Inputs:\n bySub: A dictionary with information about all isotopologues of a molecule, sorted by substitution. \n allMeasurementInfo: A dictionary containing information from many types of measurements. \n massThreshold: A mass cutoff; isotopologues with cardinal mass change above this will not be included unless indicated in subList. \n subList: A list giving specific substitutions to calculate U values for ('13C', '15N', etc.). If substitutions are given, calculates U values only for these substitutions. Otherwise, calculates all U values below the mass threshold. \n \n Outputs:\n allMeasurementInfo: A dictionary, updated to include information from direct measurements.\n '''\n unsubConc = bySub['']['Conc']\n \n if 'Full Molecule' not in allMeasurementInfo:\n allMeasurementInfo['Full Molecule'] = {}\n \n for sub, info in bySub.items():\n if info['Mass'][0] <= massThreshold and subList == []:\n allMeasurementInfo['Full Molecule'][sub] = info['Conc'] / unsubConc\n elif sub in subList:\n allMeasurementInfo['Full Molecule'][sub] = info['Conc'] / unsubConc\n \n return allMeasurementInfo\n \n\ndef fragMult(z, y):\n '''\n Fragments an individual site of an isotopologue. z should be 1 or 'x'. \n \n Inputs:\n z: specifies whether a site is retained (1) or lost ('x')\n y: The mass of a substitution at that site\n \n Outputs:\n 'x', specifying that the site is lost, or y, specifying that the site remains. \n '''\n if z not in [1,'x']:\n raise Exception(\"Cannot fragment successfully, each site must be lost ('x') or retained (1)\")\n if z == 'x' or y == 'x':\n return 'x'\n else:\n return y\n \ndef expandFrag(siteDepict, numberAtSite):\n '''\n Creates an ATOM depiction of a fragment from a SITE depiction of a fragment. Expands the fragmentation vector according to the number of atoms at each site. For example, if I fragment [0,(0,1)] with fragmentation vector [0,1], I do so by applying the fragmentation vector [011] to the isotopologue [001], expanding the tuple. This function expands the fragmentation vector. \n \n Inputs:\n siteDepict: SITE depiction of fragmentation vector.\n \n Outputs:\n atomDepict: expanded depiction of fragmentation vector\n '''\n atomDepict = []\n for i, v in enumerate(siteDepict):\n atomDepict += [v] * numberAtSite[i]\n \n return atomDepict\n\ndef fragmentOneIsotopologue(atomFrag, isotopologue):\n '''\n Applies the ATOM fragmentation vector to the ATOM depiction of an isotopologue. Raises a warning if they are not the same length. Returns the ATOM depiction of the isotopologue with \"x\" in positions that are lost.\n \n Inputs:\n atomFrag: The ATOM depiction of the fragmentation vector\n isotopologue: The ATOM depiction of the isotopologue, a string\n \n Outputs:\n A string giving the ATOM depiction of a fragmented isotopologue. \n '''\n #important to raise this--otherwise one may inadvertantly fragment incorrectly. \n if len(atomFrag) != len(isotopologue):\n raise Exception(\"Cannot fragment successfully, as the fragment and the isotopologue you want to fragment have different lengths\")\n \n a = [fragMult(x,y) for x, y in zip(atomFrag, isotopologue)]\n \n if len(a) != len(isotopologue):\n raise Exception(\"Cannot fragment successfully, the resulting fragment has a different length than the input isotopologue.\")\n \n return ''.join(a)\n\ndef fragmentIsotopologueDict(atomIsotopologueDict, atomFrag, relContribution = 1):\n '''\n Applies the same fragmentation vector to all isotopologues of an input isotopologue dict and stores the results. This operation corresponds to the \"fragmentation\" operation from the M+N paper. Combines isotopologues which fragment to yield the same product. For the version which does track, see \"fragmentAndTrackIsotopologues\"\n \n Inputs:\n atomIsotopologueDict: A dictionary containing some set of isotopologues, often a M1, M2, ... set, keyed by their ATOM depiction. \n atomFrag: An ATOM depiction of a fragment\n relContribution: A float between 0 and 1, giving the relative contribution of this fragmentation geometry to the observed ion beam at that mass\n \n Outputs: \n fragmentedDict: A dictionary where the keys are the ATOM isotopologues after fragmentation (i.e. \"0000x\") and the values are the concentrations of those isotopologues. Note that this may combine isotopologues from the input dictionary which fragment in the same way; i.e. 001 and 002 both fragment to yield \"00x\". \n '''\n fragmentedDict = {}\n for isotopologue, value in atomIsotopologueDict.items():\n newIsotopologue = fragmentOneIsotopologue(atomFrag, isotopologue)\n if newIsotopologue not in fragmentedDict:\n fragmentedDict[newIsotopologue] = 0\n fragmentedDict[newIsotopologue] += (value['Conc'] * relContribution)\n \n return fragmentedDict\n \ndef computeSubs(isotopologue, IDs):\n '''\n Given an ATOM depiction of an isotopologue, computes which substitutions are present. \n \n Inputs:\n isotopologue: The ATOM string depiction of an isotopologue\n IDs: The string of site elements, i.e. the output of strSiteElements\n \n Outputs:\n A string giving substitutions present in that isotopologue, separated by \"-\". I.e. \"17O-17O\"\n '''\n subs = []\n for i in range(len(isotopologue)):\n if isotopologue[i] != 'x':\n element = IDs[i]\n if subDict[element][str(isotopologue[i])] != '':\n subs.append(subDict[element][str(isotopologue[i])])\n \n if subs == []:\n return \"Unsub\"\n \n return '-'.join(subs)\n\ndef computeMass(isotopologue, IDs):\n '''\n Used to predict and generate spectra with exact masses. \n \n Inputs:\n isotopologue: A string, the ATOM depiction of an isotopologue.\n IDs: A string, the ATOM depiction of element IDs.\n \n Outputs:\n mass: A float, giving the exact mass of the isotopologue. \n '''\n mass = 0\n for i in range(len(isotopologue)):\n if isotopologue[i] != 'x':\n element = IDs[i]\n mass += massDict[element][str(isotopologue[i])]\n \n return mass\n\ndef predictMNFragmentExpt(allMeasurementInfo, MNDict, atomFragList, fragSubgeometryKeys, molecularDataFrame, fragmentationDictionary, abundanceThreshold = 0, omitMeasurements = {}, fractionationFactors = {}, calcFF = False, ffstd = 0.05, randomseed = 25, unresolvedDict = {}, outputFull = False):\n '''\n Predicts the results of several M+N experiements across a range of mass selected populations and fragments. It incorporates the preceding functions into a whole, so you can just call this and get results.\n \n Inputs:\n allMeasurementInfo: A dictionary containing information from many types of measurements. \n MNDict: A dictionary where the keys are \"M0\", \"M1\", etc. and the values are dictionaries containing all isotopologues from the ATOM dictionary with a specified cardinal mass difference. See massSelections function in calcIsotopologues.py\n atomFragList: A list of expanded fragments, one for each subgeometry, e.g. [[1, 1, 1, 1, 'x'], ['x', 1, 1, 1, 'x']]. See expandFrags function.\n fragSubgeometryKeys: A list of strings, indicating the identity of each fragment subgeometry. I.e. ['54_01','42_01']\n molecularDataFrame: A dataFrame containing information about the molecule.\n fragmentationDictionary: A dictionary, e.g. {'full': {'01': {'subgeometry': [1, 1, 1, 1, 1, 1], 'relCont': 1}},\n '44': {'01': {'subgeometry': [1, 'x', 'x', 1, 1, 'x'], 'relCont': 1}}}\n which gives information about the fragments, their subgeometries and relative contributions.\n abundanceThreshold: Does not include measurements below a certain relative abundance, i.e. assuming they will not be measured due to low abundance. \n omitMeasurements: A dictionary, {}, specifying measurements which I will not observed. For example, omitMeasurements = {'M1':{'61':'D'}} would mean I do not observe the D ion beam of the 61 fragment of the M+1 experiment, regardless of its abundance. \n fractionationFactors: A dictionary, specifying a fractionation factor to apply to each ion beam. This is used to apply fractionation factors calculated previously to this predicted measurement (e.g. for a sample/standard comparison with the same experimental fractionation). \n calcFF: A boolean, specifying whether new fractionation factors should be calculated via this function. If True, fractionFactors should be left empty. \n ffstd: A float. If new fractionation factors are calculated, they are generated from a normal distribution with mean 1 and standard deviation of ffstd. \n randomseed: An integer. If new fractionation factors are calculated, we initialize this random seed; this allows us to generate the same factors if we run multiple times. \n unresolvedDict: A dictionary, specifying which unresolved ion beams add to each other. \n outputFull: A boolean. Typically False, in which case beams that are not observed are culled from the dictionary. If True, includes this information; this should only be used for debugging, and will likely break the solver routine. \n \n Outputs: \n allMeasurementInfo: A dictionary, updated to include information from the M+N measurements. \n calculatedFF: The calculated fractionation factors for this measurement (empty unless calcFF == True)\n '''\n calculatedFF = {}\n siteElements = ci.strSiteElements(molecularDataFrame)\n np.random.seed(randomseed)\n #For each population (M1, M2, M3) that we mass select\n for massSelection, MN in MNDict.items():\n #add a key to output dictionary\n if massSelection not in allMeasurementInfo:\n allMeasurementInfo[massSelection] = {}\n \n if calcFF == True:\n calculatedFF[massSelection] = {}\n\n #For each fragment we will observe\n for j, fragment in enumerate(atomFragList):\n\n #add a key to output dictionary\n if fragSubgeometryKeys[j] not in allMeasurementInfo[massSelection]:\n allMeasurementInfo[massSelection][fragSubgeometryKeys[j]] = {}\n \n if calcFF == True:\n calculatedFF[massSelection][fragSubgeometryKeys[j]] = {}\n \n #fragment the mass selection accordingly \n fragKey, fragNum = fragSubgeometryKeys[j].split('_')\n relContribution = fragmentationDictionary[fragKey][fragNum]['relCont']\n fragmentedIsotopologues = fragmentIsotopologueDict(MN, fragment, relContribution = relContribution)\n\n #compute the absolute abundance of each substitution\n predictSpectrum = {}\n\n for key, item in fragmentedIsotopologues.items():\n sub = computeSubs(key, siteElements)\n \n if sub not in predictSpectrum:\n predictSpectrum[sub] = {'Abs. Abundance':0}\n predictSpectrum[sub]['Abs. Abundance'] += item\n \n #Fractionate\n if calcFF == True:\n for sub in predictSpectrum.keys():\n ff = np.random.normal(1,ffstd)\n calculatedFF[massSelection][fragSubgeometryKeys[j]][sub] = ff\n predictSpectrum[sub]['Abs. Abundance'] *= ff\n \n elif fractionationFactors != {}:\n for sub in predictSpectrum.keys():\n predictSpectrum[sub]['Abs. Abundance'] *= fractionationFactors[massSelection][fragSubgeometryKeys[j]][sub]\n \n allMeasurementInfo[massSelection][fragSubgeometryKeys[j]] = predictSpectrum\n \n allMeasurementInfo = combineFragmentSubgeometries(allMeasurementInfo, fragmentationDictionary)\n \n allMeasurementInfo = computeMNRelAbundances(allMeasurementInfo, omitMeasurements = omitMeasurements, abundanceThreshold = abundanceThreshold, unresolvedDict = unresolvedDict, outputFull = outputFull)\n \n return allMeasurementInfo, calculatedFF\n\ndef combineFragmentSubgeometries(allMeasurementInfo, fragmentationDictionary):\n '''\n Takes fragments with multiple subgeometries and combines their measurements. For example, if frag 82 is made via 82_01 (relCont = 0.4) and 82_02 (relCont = 0.6) this function adds the values of these subfragments to give the actual measurement. \n \n Inputs:\n allMeasurementInfo: A dictionary containing information about the measurement including fragment subgeometries.\n fragmentationDictionary: A dictionary giving information about the fragments and their subgeometries. \n \n Outputs:\n combinedAllMeasurementInfo: A dictionary containing information about the measurement including only full fragments. \n '''\n combinedAllMeasurementInfo = {}\n for massSelection, fragmentData in allMeasurementInfo.items():\n #only take MN experiments\n if massSelection[0] != 'M':\n combinedAllMeasurementInfo[massSelection] = fragmentData\n else:\n combinedAllMeasurementInfo[massSelection] = {}\n for fullFragKey, isotopicData in fragmentData.items():\n fragKey, fragNum = fullFragKey.split('_')\n \n if fragKey not in combinedAllMeasurementInfo[massSelection]:\n combinedAllMeasurementInfo[massSelection][fragKey] = {}\n\n for isotopicSub, subData in isotopicData.items():\n if isotopicSub not in combinedAllMeasurementInfo[massSelection][fragKey]:\n combinedAllMeasurementInfo[massSelection][fragKey][isotopicSub] = {'Abs. Abundance':0}\n\n combinedAllMeasurementInfo[massSelection][fragKey][isotopicSub]['Abs. Abundance'] += subData['Abs. Abundance']\n \n return combinedAllMeasurementInfo\n \ndef computeMNRelAbundances(allMeasurementInfo, omitMeasurements = {}, abundanceThreshold = 0, unresolvedDict = {}, outputFull = False):\n '''\n Compute relative abundances from a MN experiment.\n \n Inputs:\n allMeasurementInfo: A dictionary containing information about the absolute abundance of peaks observed in the measurement. \n omitMeasurements: Allows a user to manually specify ion beams to not measure. For example, omitMeasurements = {'M1':{'61':'D'}} would mean I do not observe the D ion beam of the 61 fragment of the M+1 experiment, regardless of its abundance. \n abundanceThreshold: gives a relative abundance threshold (e.g. 0.01) below which peaks will not be observed. If a simulated ion beam has relative abundance below this threshold, it is culled from the predicted measurement. \n unresolvedDict: A dictionary, specifying which unresolved ion beams add to each other. \n outputFull: False by default. Can be set True to include information about all ion beams, not only the observed ones. This is useful for debugging. outputFull: False by default. Can be set True to include information about all ion beams, not only the observed ones. This is useful for debugging. \n \n Outputs:\n allMeasurementInfo: A dictionary, containing information about the relative abundances of peaks observed in the measurement. \n '''\n \n for massSelection, fragmentData in allMeasurementInfo.items():\n #only take MN experiments\n if massSelection[0] == 'M':\n #By fragment\n for fragKey, isotopicData in fragmentData.items():\n #compute relative abundance of each substitution\n totalAbundance = 0\n \n #Get abundance of each sub\n for isotopicSub, subData in isotopicData.items():\n totalAbundance += subData['Abs. Abundance']\n \n #compute relative abundances\n for isotopicSub, subData in isotopicData.items():\n subData['Rel. Abundance'] = subData['Abs. Abundance'] / totalAbundance\n \n #Coalescing peaks--if we are moving abundance from one substitution to another\n for isotopicSub, subData in isotopicData.items():\n #check to see if we have to \n try:\n #if we do, set the coalesced relative abundance of the old sub to 0\n newSub = unresolvedDict[massSelection][fragKey][isotopicSub]\n subData['Combined Rel. Abundance'] = 0\n except:\n newSub = isotopicSub\n \n #Then find the new substitution\n newSubData = allMeasurementInfo[massSelection][fragKey][newSub]\n \n #Add the old subs relative abundance to the new sub\n if 'Combined Rel. Abundance' not in newSubData:\n newSubData['Combined Rel. Abundance'] = subData['Rel. Abundance']\n else:\n newSubData['Combined Rel. Abundance'] += subData['Rel. Abundance']\n \n #Calculate adjusted relative abundance, which does not include contributions from peaks below some\n #threshold\n shortSpectrum = {}\n totalRelAbund = 0\n try:\n forbiddenPeaks = omitMeasurements[massSelection][fragKey]\n except:\n forbiddenPeaks = []\n\n for isotopicSub, subData in isotopicData.items():\n #If the peak is observed, count it\n if subData['Combined Rel. Abundance'] > abundanceThreshold and isotopicSub not in forbiddenPeaks:\n shortSpectrum[isotopicSub] = subData\n totalRelAbund += subData['Combined Rel. Abundance']\n #Otherwise, either 1) set Adj. Rel. Abundance to 0, keeping it in the spectrum or\n # 2) cull it from the spectrum \n else:\n if outputFull:\n shortSpectrum[isotopicSub] = subData\n shortSpectrum[isotopicSub]['Adj. Rel. Abundance'] = 0\n\n #calculate adj. rel. abundance for the qualifying peaks\n for isotopicSub, subData in shortSpectrum.items():\n #If we added adj. rel. abundance = 0 the previous step, we don't want to repeat that calculation\n if 'Adj. Rel. Abundance' not in subData:\n subData['Adj. Rel. Abundance'] = subData['Combined Rel. Abundance'] / totalRelAbund\n \n allMeasurementInfo[massSelection][fragKey] = shortSpectrum\n \n return allMeasurementInfo\n\ndef trackMNFragments(MN, expandedFrags, fragSubgeometryKeys, molecularDataFrame, unresolvedDict = {}):\n '''\n Fragments and tracks isotopologues across a range of mass selections and fragments. \n \n Inputs:\n MN: A dictionary, where the key is an MNKey and the values give information about all isotopologues associated with that mass selection. \n expandedFrags: A list of the expanded fragments\n fragSubgeometryKeys: A list of the fragment subgeometry keys \n molecularDataFrame: The initial dataframe with information about the molecule\n unresolvedDict: A dictionary, specifying which unresolved ion beams add to each other. \n \n Outputs:\n MN: The same dictionary, with information about fragmentation added. \n '''\n unsubString = list(MN['M0'].keys())[0]\n UnsubConc = MN['M0'][unsubString]['Conc']\n\n for key in list(MN.keys()):\n massSelection = MN[key]\n for i, fragment in enumerate(expandedFrags):\n fragmentAndTrackIsotopologues(massSelection, fragment, fragSubgeometryKeys[i], UnsubConc, molecularDataFrame, unresolvedDict = unresolvedDict)\n \n return MN\n\ndef fragmentAndTrackIsotopologues(massSelection, atomFrag, fragmentKey, unsubConc, molecularDataFrame, unresolvedDict = {}):\n '''\n Fragments isotopologues and tracks which parent isotopologues end up in which product. For the version that combines isotopologues, for simulating measurements, see fragmentIsotopologueDict (that is, if 001 and 002 both form 00x on fragmentation, this function tracks 001 and 002 explictly; fragmentIsotopologueDict only reports 00x). This function fills in a dictionary with the isotopologues introduced to be fragmented by identifying the product and substitutions of each. \n \n inputs:\n massSelection: A subset of isotopologues indexed using the ATOM depiction. \n atomFrag: An ATOM depiction fragment\n fragmentKey: A string giving the identity of the fragment. \n unsubConc: The concentration of the unsubstituted isotopologue. \n molecularDataFrame: A dataFrame containing information about the molecule.\n unresolvedDict: {'133':{'17O':'13C'}}\n \n outputs:\n massSelection: The same dictionary, updated to include information about fragmentation. \n '''\n siteElements = ci.strSiteElements(molecularDataFrame)\n \n fragmentedDict = {}\n for isotopologue, value in massSelection.items():\n value['Stochastic U'] = value['Conc'] / unsubConc\n frag = [fragMult(x,y) for x, y in zip(atomFrag, isotopologue)]\n newIsotopologue = ''.join(frag)\n massSelection[isotopologue][fragmentKey + ' Identity'] = newIsotopologue\n \n sub = computeSubs(newIsotopologue, siteElements)\n \n #If unresolved peaks are a problem\n if fragmentKey in unresolvedDict:\n if sub in unresolvedDict[fragmentKey]:\n sub = unresolvedDict[fragmentKey][sub]\n \n massSelection[isotopologue][fragmentKey + ' Subs'] = sub\n \n return massSelection\n\ndef isotopologueDataFrame(MNDictionary, molecularDataFrame):\n '''\n Given a dictionary containing different mass selections, iterates through each mass selection. Extracts the isotopologues from each and puts them into a dataframe, identifying their concentration, substitution, as well as a long string giving a \"precise identity\", i.e. including explicit labels. Returns these as a dictionary with keys \"M0\", \"M1\", etc. where the values are dataFrames of the isotopologues. \n \n Inputs:\n MNDictionary: A dictionary containing different mass selections, i.e. the output of fragmentAndTrackIsotopologues\n molecularDataFrame: A dataFrame containing information about the molecule.\n \n Outputs:\n isotopologuesDict: A dictionary where the keys are \"M0\", \"M1\", etc. and the values are dataFrames giving the isotopologues with those substitutions. \n '''\n \n isotopologuesDict = {}\n siteElements = ci.strSiteElements(molecularDataFrame)\n \n for key in list(MNDictionary.keys()):\n massSelection = MNDictionary[key]\n \n Isotopologues = pd.DataFrame.from_dict(massSelection).T\n Isotopologues.rename(columns={'Conc':'Stochastic',\"Subs\": \"Composition\"},inplace = True)\n \n preciseStrings = []\n \n expandedIndices = []\n for i, n in enumerate(molecularDataFrame.Number):\n expandedIndices += n * [molecularDataFrame.index[i]]\n \n for i, v in Isotopologues.iterrows():\n Subs = [ci.uEl(element, int(number)) for element, number in zip(siteElements, i)]\n \n Precise = [x + \" \" + y for x, y in zip(Subs, expandedIndices) if x != '']\n output = ' | '.join(Precise)\n preciseStrings.append(output)\n Isotopologues['Precise Identity'] = preciseStrings\n Isotopologues.sort_values('Composition',inplace = True)\n \n isotopologuesDict[key] = Isotopologues\n \n return isotopologuesDict","sub_path":"fragmentAndSimulate.py","file_name":"fragmentAndSimulate.py","file_ext":"py","file_size_in_byte":25840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"140382879","text":"#!/usr/bin/env python3\n\nimport networkx as nx\nimport numpy as np\nimport ExceptionsPgmPy as epp\n\n\nclass BayesianModel(nx.DiGraph):\n \"\"\" Public Methods\n --------------\n add_nodes('node1', 'node2', ...)\n add_edges(('node1', 'node2', ...), ('node3', 'node4', ...))\n set_states('node1', ('state1', 'state2', ...))\n get_states('node1')\n add_rule_for_states('node1', ('state2', 'state1', ...))\n add_rule_for_parents('node1', ('parent1', 'parent2', ...))\n get_parents('node1')\n set_cpd('node1', cpd1)\n get_cpd('node1')\n set_observed(observations, reset=False)\n reset_observed('node1', ...)\n reset_observed()\n is_observed('node1')\n \"\"\"\n #__init__ is inherited\n def add_nodes(self, *args):\n \"\"\"Adds nodes to graph with node-labels as provided in function.\n Currently, only string-labels are allowed.\n \"\"\"\n for item in args:\n if not isinstance(item, str):\n raise TypeError(\"Name of nodes must be strings.\")\n self.add_nodes_from(args)\n\n def _string_to_tuple(self, string):\n \"\"\"Converts a single string into a tuple with a string element.\"\"\"\n return (string,)\n\n def add_edges(self, tail, head):\n \"\"\"Takes two tuples of nodes as input. All nodes in 'tail' are\n joint to all nodes in 'head' with the direction of each edge is\n from a node in 'tail' to a node in 'head'.\n \"\"\"\n if isinstance(tail, str):\n tail = self._string_to_tuple(tail)\n if isinstance(head, str):\n head = self._string_to_tuple(head)\n\n for end_node in head:\n for start_node in tail:\n self.add_edge(start_node, end_node)\n\n self.node[end_node]['_parents'] = sorted(\n self.predecessors(end_node))\n self.node[end_node]['_rule_for_parents'] = (\n index for index in range(len(head)))\n\n def set_states(self, node, states):\n \"\"\"Adds state-name from 'state' tuple to 'node'.\"\"\"\n self.node[node]['_states'] = [\n [state, False] for state in sorted(states)]\n self.node[node]['_rule_for_states'] = (\n n for n in range(len(states)))\n self._calc_observed(node)\n #internal storage = [['a',0],['b',0],['c',0],]\n #user-given order = ('c','a','b')\n #_rule_for_states = (2,0,1)\n #Rule will contain indices with which internal order should be\n #accessed\n\n def _calc_observed(self, node):\n \"\"\"\n Return True if any of the states of the node are observed\n @param node:\n @return:\n \"\"\"\n\n for state in self.node[node]['_states']:\n if state[1]:\n self.node[node]['_observed'] = True\n break\n else:\n self.node[node]['_observed'] = False\n\n def add_rule_for_states(self, node, states):\n \"\"\"Sets new rule for order of states\"\"\"\n #TODO check whether all states are mentioned?\n _order = list()\n for user_given_state in states:\n for state in self.node[node]['_states']:\n if state[0] == user_given_state:\n _order.append(self.node[node]['_states'].index(state))\n break\n self.node[node]['_rule_for_states'] = tuple(_order)\n\n def get_states(self, node):\n \"\"\"Returns tuple with states in user-defined order\"\"\"\n for index in self.node[node]['_rule_for_states']:\n yield self.node[node]['_states'][index][0]\n\n def add_rule_for_parents(self, node, parents):\n #check if all parents are mentioned and no extra parents are\n ##present\n #_extra = set(parents) - set(self.predecessors(node))\n #_missing = set(self.predecessors(node)) - set(parents)\n #if not len(_missing):\n #raise epp.MissingParentsError(_missing)\n #if not len(_extra):\n #raise epp.ExtraParentsError(_extra)\n _ord = list()\n for user_given_parent in parents:\n for parent in self.node[node]['_parents']:\n if parent == user_given_parent:\n _ord.append(self.node[node]['_parents'].index(parent))\n break\n self.node[node]['_rule_for_parents'] = tuple(_ord)\n\n def get_parents(self, node):\n \"\"\"Returns tuple with parents in order\"\"\"\n for index in self.node[node]['_rule_for_parents']:\n yield self.node[node]['_parents'][index]\n\n def set_cpd(self, node, cpd):\n \"\"\"Adds given CPD to node as numpy.array\n\n It is expected that CPD given will be a 2D array such that\n the order of probabilities in the array will be in according\n to the rules specified for parent and states.\n An example is shown below.\n\n EXAMPLE\n -------\n student.set_states('grades', ('A','C','B'))\n student.add_rule_for_parents('grades', ('diff', 'intel'))\n student.add_rule_for_states('grades', ('A', 'B', 'C'))\n student.add_cpd('grade',\n [[0.1,0.1,0.1,0.1,0.1,0.1],\n [0.1,0.1,0.1,0.1,0.1,0.1],\n [0.8,0.8,0.8,0.8,0.8,0.8]]\n )\n\n #diff: easy hard\n #intel: dumb avg smart dumb avg smart\n #gradeA: 0.1 0.1 0.1 0.1 0.1 0.1\n #gradeB: 0.1 0.1 0.1 0.1 0.1 0.1\n #gradeC: 0.8 0.8 0.8 0.8 0.8 0.8\n \"\"\"\n self.node[node]['_cpd'] = np.array(cpd)\n\n def get_cpd(self, node):\n return self.node[node]['_cpd']\n\n def _is_child_observed(self, node):\n \"\"\"Returns True if any descendant of the node\n is observed\"\"\"\n if node.observed:\n return True\n else:\n child_dict = nx.bfs_successors(G, node)\n for child in child_dict.keys():\n for child_ in child_dict[child]:\n if child_.observed:\n return True\n return False\n\n def _direction(self, start, end):\n \"\"\"Returns 'outgoing' if direction is from start to end\n else returns 'incoming'\"\"\"\n out_edges = G.out_edges(start)\n if (start, end) in out_edges:\n return 'outgoing'\n else:\n return 'incoming'\n\n def active_trail(self, start, end):\n \"\"\"Returns active trail between start and end nodes if exist\n else returns None\"\"\"\n G = self.to_undirected()\n for path in nx.all_simple_paths(G, start, end):\n for i in range(1, len(path)-1):\n #direction_1 is the direction of edge from previous node to\n #current node\n direction_1 = _direction(path[i], path[i-1])\n #direction_2 is the direction of edge from current node to\n #next node\n direction_2 = _direction(path[i], path[i+1])\n child_observed = _is_child_observed(path[i])\n if (direction_1 == 'incoming' and\n direction_2 == 'outgoing' and child_observed):\n break\n elif (direction_1 == 'incoming'\n and direction_2 == 'incoming' and child_observed):\n break\n elif (direction_1 == 'outgoing'\n and direction_2 == 'outgoing' and child_observed):\n break\n return path\n return None\n\n def set_observed(self, observations, reset=False):\n \"\"\"\n Sets states of nodes as observed.\n\n @param observations: dictionary with key as node and value as a tuple of states that are observed\n @return:\n \"\"\"\n #TODO check if multiple states of same node can be observed\n #TODO if not above then, put validation\n for _node in observations:\n for user_given_state in observations[_node]:\n for state in self.node[_node]['_states']:\n if state[0] == user_given_state:\n state[1] = True if not reset else False\n break\n self._calc_observed(_node)\n\n def reset_observed(self, nodes=False):\n \"\"\"Resets observed-status of given nodes.\n\n Will not change a particular state. For that use, set_observed with reset=True.\n\n If no arguments are given, all states of all nodes are reset.\n @param nodes:\n @return:\n \"\"\"\n if nodes is False:\n _to_reset = self.nodes()\n elif isinstance(nodes, str):\n _to_reset = self._string_to_tuple(nodes)\n else:\n _to_reset = nodes\n for node in _to_reset:\n for state in self.node[node]['_states']:\n state[1] = False\n self._calc_observed(node)\n\n def is_observed(self, node):\n return self.node[node]['_observed']\n\n\nif __name__ == '__main__':\n student = BayesianModel()\n student.add_nodes('diff', 'intel', 'grades')\n student.add_edges(('diff', 'intel'), ('grades',))\n print(sorted(student.edges()))\n student.set_states('diff', ('hard', 'easy'))\n print([m for m in student.states('diff')])\n student.set_states('intel', ('smart', 'avg', 'dumb'))\n print([m for m in student.states('intel')])\n student.set_states('grades', ('A', 'B', 'C'))\n print([m for m in student.states('grades')])\n student.add_rule_for_parents('grades', ('intel', 'diff'))\n print([m for m in student.parents('grades')])\n student.add_rule_for_states('grades', ('C', 'A', 'B'))\n print([m for m in student.states('grades')])\n","sub_path":"src/BayesianModel.py","file_name":"BayesianModel.py","file_ext":"py","file_size_in_byte":9631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"401731279","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url, include\n\nurlpatterns = patterns('blogs.views',\n url(r'^index', 'index', name='index'),\n url(r'^do_login$', 'do_login', name='do_login'),\n url(r'^do_search_blog$', 'do_search_blog', name='do_search_blog'),\n url(r'^do_regist_blog$', 'do_regist_blog', name='do_regist_blog'),\n url(r'^login_error$', 'login_error', name='login_error'),\n url(r'^do_logout$', 'do_logout', name='do_logout'),\n url(r'^ref_blog_top$', 'ref_blog_top', name='ref_blog_top'),\n\n # Entry\n url(r'^do_post_entry/(?P\\d+)$', 'do_post_entry', name='do_post_entry'),\n url(r'^do_post_entry$', 'do_post_entry', name='do_post_entry'),\n\n # anonymous\n url(r'^ref_blog/(?P\\w+)/(?P\\d+)$', 'ref_blog', name='ref_blog'),\n\n url(r'blog_new_user$', 'blog_new_user', name='blog_new_user'),\n)","sub_path":"blog_service/blogs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"57595015","text":"from django.contrib import admin\nfrom ordered_model.admin import OrderedModelAdmin\n\nfrom .models import Strength\n\n\n@admin.register(Strength)\nclass StrengthAdmin(OrderedModelAdmin):\n list_display = [\n \"name\",\n \"owner\",\n \"order\",\n \"move_up_down_links\",\n ]\n list_filter = [\"owner\"]\n readonly_fields = [\"created_at\"]\n","sub_path":"backend/coapp/palace/strengths/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"392434719","text":"from .utils import json_value\n\n\ndef extract_media_v1(data):\n \"\"\"Extract media from Private API\n \"\"\"\n user = data[\"user\"]\n location = data.get(\"location\")\n if location:\n location = {\"pk\": int(location.get(\"pk\")), \"name\": location.get(\"name\")}\n video_url = \"\"\n if \"video_versions\" in data:\n # Select Best Quality by Resolutiuon\n video_url = sorted(\n data[\"video_versions\"], key=lambda o: o[\"height\"] * o[\"width\"]\n ).pop()[\"url\"]\n product_type = data.get(\"product_type\", \"\")\n if data[\"media_type\"] == 2 and not product_type:\n product_type = \"feed\"\n thumbnail_url = ''\n if 'image_versions2' in data:\n thumbnail_url = sorted(\n data[\"image_versions2\"][\"candidates\"],\n key=lambda o: o[\"height\"] * o[\"width\"],\n ).pop()[\"url\"]\n return {\n \"pk\": int(data[\"pk\"]),\n \"taken_at\": int(data[\"taken_at\"]),\n \"id\": data[\"id\"],\n \"media_type\": data[\"media_type\"],\n \"product_type\": product_type,\n \"code\": data[\"code\"],\n \"thumbnail_url\": thumbnail_url,\n \"location\": location,\n \"user\": extract_user_short(user),\n \"comment_count\": int(data.get(\"comment_count\") or 0),\n \"like_count\": int(data.get(\"like_count\") or 0), # the media just published has no like_count\n \"caption_text\": json_value(data, \"caption\", \"text\", default=\"\"),\n \"usertags\": [\n extract_usertag(usertag)\n for usertag in data.get(\"usertags\", {}).get(\"in\", [])\n ],\n \"video_url\": video_url,\n \"view_count\": int(data.get('view_count') or 0),\n \"video_duration\": data.get('video_duration'),\n \"title\": data.get(\"title\") or None,\n \"resources\": [\n extract_resource_v1(edge)\n for edge in data.get('carousel_media', [])\n ]\n }\n\n\ndef extract_media_gql(data):\n \"\"\"Extract media from GraphQL\n \"\"\"\n user = data[\"owner\"]\n media_id = \"%s_%s\" % (data[\"id\"], user[\"id\"])\n if \"full_name\" in user:\n # for hashtag user contain {'id': '2041641294'}\n user = extract_user_short(user)\n else:\n user[\"pk\"] = user.pop(\"id\")\n location = data.get(\"location\")\n if location:\n location = {\"pk\": int(location.get(\"id\")), \"name\": location.get(\"name\")}\n media_type = {\"GraphImage\": 1, \"GraphVideo\": 2, \"GraphSidecar\": 8}[data[\"__typename\"]]\n product_type = data.get(\"product_type\", \"\")\n video_url = \"\"\n if media_type == 2:\n video_url = data[\"video_url\"]\n if not product_type:\n product_type = \"feed\"\n shortcode = ''\n if 'shortcode' in data:\n shortcode = data[\"shortcode\"]\n return {\n \"pk\": int(data[\"id\"]),\n \"taken_at\": int(data[\"taken_at_timestamp\"]),\n \"id\": media_id,\n \"media_type\": media_type,\n \"product_type\": product_type,\n \"code\": shortcode,\n \"thumbnail_url\": sorted(\n data.get(\"display_resources\", data.get('thumbnail_resources')), # display_resources - user feed, thumbnail_resources - hashtag feed\n key=lambda o: o[\"config_width\"] * o[\"config_height\"],\n ).pop()[\"src\"],\n \"location\": location,\n \"user\": user,\n \"comment_count\": json_value(data, \"edge_media_to_comment\", \"count\"),\n \"like_count\": json_value(data, \"edge_media_preview_like\", \"count\"),\n \"caption_text\": json_value(\n data, \"edge_media_to_caption\", \"edges\", 0, \"node\", \"text\", default=\"\"\n ),\n \"usertags\": [\n extract_usertag(usertag['node'])\n for usertag in data.get(\"edge_media_to_tagged_user\", {}).get(\"edges\", [])\n ],\n \"video_url\": video_url,\n \"view_count\": int(data.get('video_view_count') or 0),\n \"video_duration\": data.get('video_duration'),\n \"title\": data.get(\"title\") or None,\n \"resources\": [\n extract_resource_gql(edge['node'])\n for edge in data.get('edge_sidecar_to_children', {}).get('edges', [])\n ]\n }\n\n\ndef extract_resource_v1(data):\n video_url = \"\"\n if 'video_versions' in data:\n video_url = sorted(\n data[\"video_versions\"], key=lambda o: o[\"height\"] * o[\"width\"]\n ).pop()[\"url\"]\n thumbnail_url = sorted(\n data[\"image_versions2\"][\"candidates\"],\n key=lambda o: o[\"height\"] * o[\"width\"],\n ).pop()[\"url\"]\n return {\n \"video_url\": video_url,\n \"thumbnail_url\": thumbnail_url,\n \"media_type\": data['media_type'],\n \"pk\": int(data[\"pk\"]),\n # \"video_duration\": data.get('video_duration'),\n }\n\n\ndef extract_resource_gql(data):\n media_type = {\"GraphImage\": 1, \"GraphVideo\": 2, \"GraphSidecar\": 8}[data[\"__typename\"]]\n return {\n \"video_url\": data.get(\"video_url\", \"\"),\n \"thumbnail_url\": data[\"display_url\"],\n \"media_type\": media_type,\n \"pk\": int(data[\"id\"]),\n # \"view_count\": int(data.get(\"video_view_count\") or 0),\n # \"shortcode\": data[\"shortcode\"],\n # \"accessibility_caption\": data.get(\"accessibility_caption\")\n }\n\n\ndef extract_usertag(data):\n \"\"\"Extract user tag\n \"\"\"\n user = data['user']\n position = data.get('position')\n if not position:\n position = [data['x'], data['y']]\n return {\n \"user\": {\n \"pk\": int(user.get(\"id\", user.get(\"pk\"))),\n \"username\": user[\"username\"],\n \"full_name\": user.get(\"full_name\"),\n \"profile_pic_url\": user.get(\"profile_pic_url\"),\n \"is_verified\": user.get(\"is_verified\"),\n },\n \"position\": position\n }\n\n\ndef extract_user_short(data):\n \"\"\"For Public GraphQL API\n \"\"\"\n user_pk = data.get(\"id\", data.get(\"pk\"))\n assert user_pk, 'User without pk \"%s\"' % data\n return {\n \"pk\": int(user_pk),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data.get(\"is_private\"),\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n # \"is_unpublished\": data.get(\"is_unpublished\"),\n }\n\n\ndef extract_user_gql(data):\n \"\"\"For Public GraphQL API\n \"\"\"\n return {\n \"pk\": int(data[\"id\"]),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data[\"is_private\"],\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n \"media_count\": data[\"edge_owner_to_timeline_media\"][\"count\"],\n \"follower_count\": data[\"edge_followed_by\"][\"count\"],\n \"following_count\": data[\"edge_follow\"][\"count\"],\n \"biography\": data[\"biography\"],\n \"external_url\": data[\"external_url\"],\n \"is_business\": data[\"is_business_account\"],\n }\n\n\ndef extract_user_v1(data):\n \"\"\"For Private API\n \"\"\"\n return {\n \"pk\": int(data[\"pk\"]),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data[\"is_private\"],\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n \"media_count\": data[\"media_count\"],\n \"follower_count\": data[\"follower_count\"],\n \"following_count\": data[\"following_count\"],\n \"biography\": data[\"biography\"],\n \"external_url\": data[\"external_url\"],\n \"is_business\": data[\"is_business\"],\n }\n","sub_path":"instagrapi/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"109961046","text":"import datetime\nfrom datetime import date\nimport os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nimport gspread\nfrom gspread_dataframe import set_with_dataframe \n\nimport requests\nimport json\nimport os\nimport jobs\nimport pandas as pd\nfrom dotmap import DotMap\n\n\n# If modifying these scopes, delete the file token.json.\nSCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets']\n\ndef upload(filename, job):\n \n print(\"Uploading file to Google Drive...\")\n \n \"\"\"Shows basic usage of the Drive v3 API.\n Prints the names and ids of the first 10 files the user has access to.\n \"\"\"\n creds = None\n \n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n \n \n access_token = creds.token\n folder_id = job.drive_folder\n \n filepath = f'./{job.output}/{filename}'\n\n filesize = os.path.getsize(filepath)\n\n # 1. Retrieve session for resumable upload.\n headers = {\"Authorization\": \"Bearer \"+access_token, \"Content-Type\": \"application/json\"}\n params = {\n \"name\": f\"{filename}\",\n \"parents\": [folder_id],\n \"mimeType\": \"text/csv\"\n }\n r = requests.post(\n \"https://www.googleapis.com/upload/drive/v3/files?uploadType=resumable\",\n headers=headers,\n data=json.dumps(params)\n )\n location = r.headers['Location']\n\n # 2. Upload the file.\n headers = {\"Content-Range\": \"bytes 0-\" + str(filesize - 1) + \"/\" + str(filesize)}\n r = requests.put(\n location,\n headers=headers,\n data=open(filepath, 'rb')\n )\n if r.status_code == '200':\n print('yes!')\n\ndef publish(df, job):\n \n print(\"Publishing spreadsheet...\")\n \n # ACCESS GOOGLE SHEET\n # Credential files stored in ~/.config/gspread as credentials.json and authorized_user.json\n # Need to set up in Docker file\n \n date_str = df['date'].max().strftime('%m-%d-%Y')\n todate = datetime.datetime.utcnow().date().strftime('%m-%d-%Y')\n \n gc = gspread.oauth()\n sh = gc.open_by_key(job.sheet_id)\n \n worksheet = sh.get_worksheet(0) #-> 0 - first sheet, 1 - second sheet etc. \n \n \n # CLEAR SHEET CONTENT\n worksheet.clear()\n worksheet.update_title(f'Last updated: {todate}')\n \n # APPEND DATA TO SHEET\n if job.type == 'subgraphs':\n \n cols = jobs.columns_sheet[job.type]\n df_sheet = (df.loc[(df['date'] == df['date'].max()) & (df['active'] == True), cols]\n .sort_values(['curator_apr_30d_estimate', 'signal'], ascending=False))\n \n set_with_dataframe(worksheet, df_sheet) #-> THIS EXPORTS YOUR DATAFRAME TO THE GOOGLE SHEET\n \n worksheet.format('A1:U1', {'textFormat': {'bold': True}})\n worksheet.freeze(rows=1)\n worksheet.format('D:S', {\"numberFormat\": {\"type\": \"NUMBER\", \"pattern\": \"#,##0.00\"}})\n worksheet.format('D:E', {\"numberFormat\": {\"type\": \"PERCENT\", \"pattern\": \"0.00%\"}})\n worksheet.format('J:J', {\"numberFormat\": {\"type\": \"PERCENT\", \"pattern\": \"0.00%\"}})\n worksheet.format('S:U', {\"numberFormat\": {\"type\": \"PERCENT\", \"pattern\": \"0.00%\"}})\n worksheet.format('C:C', {\"numberFormat\": {\"type\": \"DATE\", \"pattern\": \"yyyy-mm-dd\"}})\n worksheet.format('H:I', {\"numberFormat\": {\"type\": \"NUMBER\", \"pattern\": \"#,##0\"}})\n worksheet.format('K:M', {\"numberFormat\": {\"type\": \"NUMBER\", \"pattern\": \"#,##0\"}})\n worksheet.format('O:R', {\"numberFormat\": {\"type\": \"NUMBER\", \"pattern\": \"#,##0\"}})\n\n elif job.type == 'global':\n \n cols = jobs.columns_sheet[job.type]\n df_sheet = df[cols].sort_values('date', ascending=False)\n \n set_with_dataframe(worksheet, df_sheet) #-> THIS EXPORTS YOUR DATAFRAME TO THE GOOGLE SHEET\n \n worksheet.format('A1:AL1', {'textFormat': {'bold': True}})\n worksheet.freeze(rows=1)\n worksheet.format('B:AG', {\"numberFormat\": {\"type\": \"NUMBER\", \"pattern\": \"#,##0\"}})\n \n for colrange in ['F:F', 'V:X', 'AE:AG']:\n worksheet.format(colrange, {\"numberFormat\": {\"type\": \"NUMBER\", \"pattern\": \"#,##0.00\"}})\n \n worksheet.format('AH:AH', {\"numberFormat\": {\"type\": \"PERCENT\", \"pattern\": \"0.0000%\"}}) \n worksheet.format('AI:AL', {\"numberFormat\": {\"type\": \"PERCENT\", \"pattern\": \"0.00%\"}})\n worksheet.format('A:A', {\"numberFormat\": {\"type\": \"DATE\", \"pattern\": \"yyyy-mm-dd\"}})\n \ndef main():\n df = pd.read_csv('outputs/global_07_20_2021.csv', parse_dates=['date'])\n \n job = DotMap(jobs.jobs[1])\n \n publish(df, job)\n\nif __name__ == \"__main__\":\n main()","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"652633541","text":"from django.urls import path\nfrom . import views\n\napp_name= \"propose_join\"\nurlpatterns=[\n path('button', views.model_form_upload, name='button'),\n path('list', views.list, name='list'),\n path('existing',views.existingclub,name='existing'),\n path('existing/add', views.add_to_join, name='add_to_join'),\n path('myclub', views.joined, name='myclub')\n\n]\n","sub_path":"propose_join/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"308783540","text":"import yaml\nfrom lxml import etree\n\n\nclass HPCCConfig:\n def __init__(self, config):\n self.config = config\n\n @staticmethod\n def load_config(config_path):\n with open(config_path, 'r') as f:\n return HPCCConfig(yaml.load(f))\n\n @staticmethod\n def parse_config(config_path):\n hpcc_config = {}\n hpcc_xml = None\n with open(config_path, 'r') as f:\n hpcc_xml = etree.parse(f)\n\n node_mapping = {}\n for node in hpcc_xml.xpath(\"//Environment/Hardware/Computer[@name]\"):\n node_mapping[node.get('name')] = node.get('netAddress')\n\n roxie_cluster = []\n for node in hpcc_xml.xpath(\"//Environment/Software/RoxieCluster/RoxieServerProcess[@netAddress]\"):\n roxie_cluster.append(node.get('netAddress'))\n\n thor_cluster = []\n for node in hpcc_xml.xpath(\"//Environment/Software/ThorCluster/ThorSlaveProcess[@computer]\"):\n thor_cluster.append(node_mapping[node.get('computer')])\n\n hpcc_config = {\n \"hosts\": node_mapping,\n \"roxie\": roxie_cluster,\n \"thor\": thor_cluster\n }\n\n print(hpcc_config)\n\n\n\n\n\n","sub_path":"elastic/hpcc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"238882618","text":"import argparse\nimport json\nimport plotly\nimport re\nimport itertools\n\nfrom plotly.graph_objs import Scatter, Layout\n\nlast_40 = False\nflat_sets = False\n\nclass Measurement:\n def __init__(self, input_size, time):\n self.input_size = input_size\n self.time = time\n\nclass Variation:\n def __init__(self, name, measurements):\n self.name = name\n self.measurements = measurements\n self.name = self.name.replace('_', ' ')\n\ndef parseMeasurement(json_dict):\n parsed_name = re.match(r'(.*?)(_solution)?/(.*)/(.*)',json_dict['name'])\n name = parsed_name.group(1)\n\n input_size = int(parsed_name.group(4))\n time = json_dict['real_time']\n return (name, Measurement(input_size, time))\n\ndef parseJson(loaded_benchmarks_json, baseline_name):\n measurements = {}\n for json_input in loaded_benchmarks_json['benchmarks']:\n name, m = parseMeasurement(json_input)\n measurements.setdefault(name, []).append(m)\n\n if baseline_name not in measurements:\n raise \"No baseline benchmark found\"\n\n variations = []\n for name, ms in measurements.items():\n if name == baseline_name:\n continue\n for m, baseline in zip(ms, measurements[baseline_name]):\n assert(m.input_size == baseline.input_size)\n m.time - baseline.time\n variations.append(Variation(name, ms))\n\n del measurements[baseline_name]\n\n return variations\n\ndef drawPlot(variations):\n traces = []\n for variation in variations:\n input_sizes = list(map(lambda m : m.input_size, variation.measurements))\n times = list(map(lambda m: m.time, variation.measurements))\n print(input_sizes)\n line = dict( width = 3)\n\n trace = Scatter(x = input_sizes,\n y = times,\n mode = 'lines',\n name = variation.name,\n line = line\n )\n traces.append(trace)\n\n data = plotly.graph_objs.Data(traces)\n layout = {}\n\n dtick_x = 40\n dtick_y = 250\n x_title = 'rhs size'\n if flat_sets == True:\n dtick_y = 10000\n x_title = 'inserting elements size'\n if last_40 == True:\n dtick_x = 1\n dtick_y = 50\n if flat_sets == True:\n dtick_y = 1000\n\n\n layout['xaxis'] = dict(title=x_title,\n #autotick=False,\n ticks='outside',\n tick0=0,\n dtick=dtick_x,\n ticklen=8,\n tickwidth=4,\n tickcolor='#000')\n layout['yaxis'] = dict(title='ns',\n ticks='outside',\n tick0=0,\n dtick=dtick_y,\n tickcolor='#000')\n\n return plotly.offline.plot(\n dict(data=data, layout=layout),\n auto_open = True,\n )\n\nif __name__ == \"__main__\":\n options_parser = argparse.ArgumentParser( \\\n description='Comparing performance of different implementations.')\n\n options_parser.add_argument('-b', '--benchmarks_result',\n dest='benchmarks_result',\n required=True)\n options_parser.add_argument('--last_40', action='store_true', dest='last_40')\n options_parser.add_argument('--flat_sets',\n action='store_true',\n dest='flat_sets')\n options_parser.add_argument('--baseline_name',\n dest='baseline_name',\n default='baseline')\n options = options_parser.parse_args()\n last_40 = options.last_40\n flat_sets = options.flat_sets\n baseline_name = options.baseline_name\n loaded_benchmarks = json.load(open(options.benchmarks_result))\n\n variations = parseJson(loaded_benchmarks, baseline_name)\n drawPlot(variations)\n","sub_path":"draw_results.py","file_name":"draw_results.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"618041571","text":"#!/usr/bin/env python3\n\n# Simple network socket demo - CLIENT\n#\n# Set script as executable via: chmod +x client.py\n# Run via: ./client.py \n#\n# To connect to a server on the same computer, could\n# either be 127.0.0.1 or localhost (they have the same meaning)\n\nimport socket\nimport sys\nimport argparse\n\ndef main():\n\n # Use argparse method\n parser = argparse.ArgumentParser()\n parser.add_argument('--version', action='version', version='%(prog)s 1.0')\n parser.add_argument('server_ip', nargs='?', default='localhost')\n parser.add_argument('server_port', nargs='?', default=8765)\n parser.add_argument('username')\n args = parser.parse_args()\n \n # Create TCP socket\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error as msg:\n print(\"Error: could not create socket\")\n print(\"Description: \" + str(msg))\n sys.exit()\n\n print(\"Connecting to server at \" + str(args.server_ip) + \" on port \" + str(args.server_port))\n \n # Connect to server\n try:\n s.connect((args.server_ip , args.server_port))\n except socket.error as msg:\n print(\"Error: Could not open connection\")\n print(\"Description: \" + str(msg))\n sys.exit()\n \n print(\"Connection established\")\n \n # Send message to server\n # string_unicode = \"Tiger Roar!\"\n string_unicode = \"Car Meow!\"\n raw_bytes = bytes(string_unicode,'ascii')\n \n try:\n # Send the string\n # Note: send() might not send all the bytes!\n # You should loop, or use sendall()\n bytes_sent = s.send(raw_bytes)\n except socket.error as msg:\n print(\"Error: send() failed\")\n print(\"Description: \" + str(msg))\n sys.exit()\n \n print(\"Sent %d bytes to server\" % bytes_sent)\n\n # Close socket\n try:\n s.close()\n except socket.error as msg:\n print(\"Error: unable to close() socket\")\n print(\"Description: \" + str(msg))\n sys.exit()\n\n print(\"Sockets closed, now exiting\")\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"ChatRoom/Resources/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"177633952","text":"\ndef binary_search(array, key, first, last):\n if first == last:\n if key == array[first]:\n return first\n else:\n return None\n mid = first + int((last - first) / 2)\n if array[mid] == key:\n return mid\n elif key < array[mid]:\n return binary_search(array, key, first, mid)\n else:\n return binary_search(array, key, mid + 1, last)\n\ndef binary_search_mod(array, key, first, last):\n if first == last:\n return first\n mid = first + int((last - first) / 2)\n if array[mid] == key:\n return mid\n elif key < array[mid]:\n return binary_search_mod(array, key, first, mid)\n else:\n return binary_search_mod(array, key, mid + 1, last)\n\n\n#a = [2,4,5,8,12,34,56]\na = [[1,3,6,9,11], [2,5,8,34,67], [4,9,11,45,77], [9,13,19,66,99]]\n#print(\"%s\" %binary_search_mod(a, 20, 0, 6))\ntarget = 68\ncolumn_limit = binary_search_mod(a[0], target, 0, len(a[0]))\ncolumn_limit = min(len(a[0]) - 1, column_limit)\n\n#print(\"%s\" %column_limit)\nif a[0][column_limit] == target:\n print(\"Found target at %s\" %column_limit)\nelse:\n for i in range(1, len(a)):\n ans = binary_search_mod(a[i], target, 0, column_limit)\n if a[i][ans] == target:\n print(\"Found target at %s, %s\" %(i, ans))\n","sub_path":"IK/Array, Adhoc/2DArraySearch.py","file_name":"2DArraySearch.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"518263342","text":"import gym\nimport itertools\n# import os\n# os.environ['CUDA_VISIBLE_DEVICES'] = \"-1\"\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\nimport sys\nsys.path.append('../')\nimport baselines.common.tf_util as U\n\nfrom baselines import logger\nfrom baselines import deepq\nfrom baselines.deepq.replay_buffer import ReplayBuffer\nfrom baselines.deepq.utils import ObservationInput\nfrom baselines.common.schedules import LinearSchedule\n\nimport gym_snake\n\nBATCH_SIZE = 32\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--env', help='environment ID', default='snake-single-v0')\n parser.add_argument('--seed', help='Random seed', type=int, default=0)\n parser.add_argument('--prioritized', type=int, default=1)\n parser.add_argument('--prioritized-replay-alpha', type=float, default=0.6)\n parser.add_argument('--dueling', type=int, default=1)\n parser.add_argument('--num-timesteps', type=int, default=int(10e6))\n parser.add_argument('--checkpoint-freq', type=int, default=10000)\n parser.add_argument('--checkpoint-path', type=str, default='./single-dqn/')\n\n args = parser.parse_args()\n\n # make_session first argument : num of cpus\n with U.make_session(8):\n env = gym.make(args.env)\n print(\"observation space is \", env.observation_space)\n print(\"action space is \", env.action_space)\n model = deepq.models.cnn_to_mlp(\n convs=[(32, 5, 1), (64, 5, 1)],\n hiddens=[256],\n dueling=bool(args.dueling)\n )\n\n act = deepq.learn(env,\n q_func=model,\n lr=1e-4,\n max_timesteps=2000000,\n buffer_size=50000,\n train_freq=10,\n exploration_fraction=0.1,\n exploration_final_eps=0.02,\n gamma=0.99,\n print_freq=10,\n checkpoint_freq=args.checkpoint_freq,\n checkpoint_path=args.checkpoint_path,\n param_noise=True)\n act.save(\"../models/single-dqn/single_dqn_model_final.pkl\")\n env.close()\n\nif __name__ == '__main__':\n main()\n\n# import gym\n# import sys\n# import gym_snake\n# sys.path.append('../model/')\n# import dqn2015\n# from time import sleep\n# import argparse\n#\n# parser = argparse.ArgumentParser()\n# parser.add_argument('--env', type=str, default='snake-single-v0',\n# help=\"\"\"\\\n# Select environment ID.\n# \"\"\")\n# FLAGS, unparsed = parser.parse_known_args()\n#\n# env = gym.make(FLAGS.env)\n# env.reset()\n# qNet = dqn2015.DQN2015(env)\n# qNet.run()\n# #env = gym.wrappers.Monitor(env, 'tmp_video')\n#\n# # for e in range(500):\n# # obs = env.reset()\n# # done = False\n# # r = 0\n# # while not done:\n# # action = env.action_space.sample()\n# # obs, reward, done, info = env.step(action)\n# # r += reward\n# # env.render(mode='human')\n# # sleep(0.01)\n#\n# env.close()\n","sub_path":"src/test/single_test.py","file_name":"single_test.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"256123441","text":"import glob, os\nimport math\nimport nibabel as nib\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport cv2\nfrom scipy.ndimage import gaussian_filter\nfrom skimage import feature\nimport scipy.ndimage as ndi\nimport scipy\n\ndef AES(img):\n\t\"\"\" \n\tAES(k)=sqrt{sum[E(Iij)(Gx^2(Iij)+Gy^2(Iij))]}/sum[E(Iij)]\n\t\t\n\t\tE(Iij)--> binary mask of the edges extracted using Canny edge detector \n\t\tGx -----> [-1 -1 -1; 0 0 0; 1 1 1]\n\t\tGy -----> [-1 0 1; -1 0 1; -1 0 1]\n\t\tG x , and G y represent the centered gradient kernels along x and y, respectively\n\t\tThe mean value across all the slices was considered for the analysis. When blurring increases,\n\t\tAES values decrease.\n\t\"\"\"\n\tGx = np.array([[-1,-1,-1],[0,0,0],[1,1,1]])\n\tGy = np.array([[-1,0,1],[-1,0,1],[-1,0,1]])\n\t## \n\tx = np.zeros((img.shape[2]))\n\t## \n\tfor kk in range(0, img.shape[2]):\n\t\ts = img[:,:,kk]\n\t\tE = feature.canny(s, sigma=0.6)\n\t\tE = E.astype(np.float64)\n\t\tfiltx = cv2.filter2D(s, -1, Gx)\n\t\tfilty = cv2.filter2D(s, -1, Gy)\n\t\taes_ = np.sqrt((E*((filtx**2)+(filty**2))).sum()) / E.sum()\n\t\tx[kk]=aes_\n\t\t\n\tx = x[~np.isnan(x)]\n\n\treturn x\n# -------------------------------\n# -------------------------------\n\nsessions = ['OFF', 'ON']\ncontrasts = ['PD', 'T1', 'T2', 'T2star-05', 'T2star-035', 'T2star-025']\n\n\nfor sess in sessions:\n\tfor cont_ in contrasts:\n\t\tfiletxt = str(cont_)+\"_\"+str(sess)+\".txt\"\n\t\tmyfile = open(filetxt, 'w')\n\t\tfor file in sorted(os.listdir(\"./\"+str(sess)+\"/\"+str(cont_)+\"/\")):\n\t\t\tif file.endswith(\".nii.gz\"):\n\t\t\t\tfilepath_ = os.path.join(\"./\"+str(sess)+\"/\"+str(cont_)+\"/\", file)\n\t\t\t\timg = nib.load(str(filepath_)).get_fdata()\n\t\t\t\tprint('... processing ... ', file, img.shape)\n\t\t\t\t## normalize:\n\t\t\t\timg = img/img.max()\n\t\t\t\t## call AES function\n\t\t\t\teval_ = AES(img)\n\t\t\t\ttmp_line_ = str(eval_)\n\t\t\t\ttmp_line_ = tmp_line_.replace('[','')\n\t\t\t\ttmp_line_ = tmp_line_.replace(']','')\n\t\t\t\ttmp_lineb_ = (str(file), tmp_line_)\n\t\t\t\tmyfile.write(\"%s\\n\" % str(tmp_lineb_))\n\t\t\t\t# --------------------------------------\n# myfile.close()\n##########################################################\n##########################################################\n##########################################################\n\n\"\"\"\nsessions = ['OFF']#['OFF', 'ON']\ncontrasts = ['T2star-025']#['PD', 'T1', 'T2', 'T2star-05', 'T2star-035', 'T2star-025']\n\nfiletxt = str(contrasts[0])+\"_\"+str(sessions[0])+\".txt\"\nmyfile = open(filetxt, 'w')\nfor cont_ in contrasts:\n\tfor sess in sessions:\n\t\tfor file in sorted(os.listdir(\"./\"+str(sess)+\"/\"+str(cont_)+\"/\")):\n\t\t\tif file.endswith(\".nii.gz\"):\n\t\t\t\tfilepath_ = os.path.join(\"./\"+str(sess)+\"/\"+str(cont_)+\"/\", file)\n\t\t\t\timg = nib.load(str(filepath_)).get_fdata()\n\t\t\t\tprint('... processing ... ', file, img.shape)\n\t\t\t\t## normalize:\n\t\t\t\timg = img/img.max()\n\t\t\t\t## call AES function\n\t\t\t\teval_ = AES(img)\n\t\t\t\ttmp_line_ = str(eval_)\n\t\t\t\ttmp_line_ = tmp_line_.replace('[','')\n\t\t\t\ttmp_line_ = tmp_line_.replace(']','')\n\t\t\t\ttmp_lineb_ = (str(file), tmp_line_)\n\t\t\t\tmyfile.write(\"%s\\n\" % str(tmp_lineb_))\n\t\t\t\t# myfile.write(\"%s\\n\" % tmp_line_)\n\t\t\t\t\n# myfile.close()\n\"\"\"\n","sub_path":"AES_for_PMC-data.py","file_name":"AES_for_PMC-data.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"143256577","text":"'''\nSi definiscono divisori propri di un numero tutti i suoi divisori tranne l'uno e il numero stesso.\nScrivere una funzione modi(ls,k) che, presa una lista ls di interi ed un intero \nnon negativo k:\n 1) cancella dalla lista ls gli interi che non hanno esattamente k divisori propri\n 2) restituisce una seconda lista che contiene i soli numeri primi di ls.\nNOTA: un numero maggiore di 1 e' primo se ha 0 divisori propri.\n\nad esempio per ls = [121, 4, 37, 441, 7, 16] \nmodi(ls,3) restituisce la lista con i numeri primi [37,7] mentre al termine della funzione si avra' che la lista ls=[16]\n\nPer altri esempi vedere il file grade.txt\n\nATTENZIONE: NON USATE LETTERE ACCENTATE.\nATTENZIONE: Se il grader non termina entro 30 secondi il punteggio dell'esercizio e' zero.\n'''\n\nimport math\n\n\ndef modi(ls,k):\n \"inserite qui il vostro codice\"\n listaNumeriPrimi=[]\n ls2=ls[:]\n \n\n\n for indice in ls2:\n numeroDivisori=0\n contatore=2\n limite=int(math.sqrt(indice))+1\n while contatore= 10)\n\n def test_buildings_rows(self):\n \"\"\"\n This tests whether there are greater than 10 rows in the dataframe\n\n Args:\n\n Returns:\n True (bool) if the condition is true\n \"\"\"\n processed_buildings_output = buildings_clean(\n \"seattlecollision/data/raw_data/raw_buildings_input.csv\")\n self.assertTrue(processed_buildings_output.shape[0] >= 10)\n\n def test_collidium_rows(self):\n \"\"\"\n This tests whether there are greater than 10 rows in the dataframe\n\n Note: This test will fail if the first building row does not have 10 collisions\n within 1500 feet and 1 year of construction.\n\n Only the first row of building data is used to limit computation time.\n\n Args:\n\n Returns:\n True (bool) if the condition is true\n \"\"\"\n good_colls = collisions_clean(\"seattlecollision/data/raw_data/raw_collisions_input.csv\")\n good_builds_1 = buildings_clean(\n \"seattlecollision/data/raw_data/raw_buildings_input.csv\").head(1)\n processed_collidium_output = create_collidium_table(good_colls, good_builds_1)\n self.assertTrue(processed_collidium_output.shape[0] >= 10)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"seattlecollision/tests/test_process_data.py","file_name":"test_process_data.py","file_ext":"py","file_size_in_byte":7343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"274660173","text":"import socket\nimport sys\nimport os\n\nlongorshort = str(sys.argv[1])\nstarttime = float(sys.argv[2])\nendtime = float(sys.argv[3])\nif len(sys.argv) == 5:\n\tfiletype = str(sys.argv[4])\n\ndef get_historyB(filename,path):\n\ts = socket.socket()\n\thost = socket.gethostname()\n\tport = 60001\n\ts.connect((host, port))\n\ts.send(filename.encode('utf-8'))\n\tfull_path = os.path.join(path, filename)\n\twith open(full_path, 'wb') as f:\n\t # print('file opened')\n\t while True:\n\t # print('receiving data...')\n\t data = s.recv(1024)\n\t # print('data=%s', (data))\n\t if not data:\n\t break\n\t # write data to a file\n\t f.write(data)\n\tf.close()\n\t# print('Successfully get the file')\n\ts.close()\n\t# print('connection closed')\n\ndef logupdate():\n\tif len(sys.argv) == 5:\n\t\tf = open(os.path.join('./components', \"historyA\"), \"a\")\n\t\tf.write(str(sys.argv[0])+str(sys.argv[1])+str(sys.argv[2])+str(sys.argv[3])+str(sys.argv[4])+'\\n')\n\t\tf.close()\n\telse:\n\t\tf = open(os.path.join('./components', \"historyA\"), \"a\")\n\t\tf.write(str(sys.argv[0])+str(sys.argv[1])+str(sys.argv[2])+str(sys.argv[3])+'\\n')\n\t\tf.close()\n\ncommand = 'send_hashtable'\npath = './components'\n\ns = socket.socket()\nhost = socket.gethostname()\nport = 60001\n\ns.connect((host, port))\ns.send(command.encode('utf-8'))\n\nfull_path = os.path.join(path, command)\n\nwith open(full_path, 'wb') as f:\n while True:\n data = s.recv(1024)\n if not data:\n break\n f.write(data)\nf.close()\ns.close()\nf = open(full_path, \"r\")\nlines = f.readlines()\nfor x in lines:\n\ttable = x.split(\" \")\n\tif longorshort == \"shortlist\":\n\t\tif len(sys.argv) == 5:\n\t\t\tif starttime <= float(table[2]) <= endtime and table[0].split('.')[1].strip() == filetype.split('.')[1].strip():\n\t\t\t\tprint(table[0])\n\t\tif len(sys.argv) == 4:\n\t\t\tif starttime <= float(table[2]) <= endtime:\n\t\t\t\tprint(table[0])\t\n\telse:\n\t\tif len(sys.argv) == 5:\n\t\t\tif starttime <= float(table[2]) <= endtime and table[0].split('.')[1].strip() == filetype.split('.')[1].strip():\n\t\t\t\tprint(table[0],table[2],table[3])\n\t\tif len(sys.argv) == 4:\n\t\t\tif starttime <= float(table[2]) <= endtime:\n\t\t\t\tprint(table[0],table[2],table[3])\nlogupdate()\nget_historyB('historyB','./components')\n","sub_path":"CN1/IndexGet.py","file_name":"IndexGet.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"576485084","text":"# coding=utf-8\n# Copyright 2021 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python3\n\"\"\"A module for Shoptimizer API that fixes invalid gtin values.\n\nReference: https://support.google.com/merchants/answer/6324461\n\nThis optimizer does several validations on the gtin value:\n\n1. The gtin must consist of integers, and fall within a range of valid lengths.\n\n2. The gtin must not start with the bulk indicator digit (9):\nhttps://support.google.com/merchants/answer/6286298?hl=en\n\n3. The gtin must not use the reserved range for its prefix:\nhttps://support.google.com/merchants/answer/7000684?hl=en\n\n4. The gtin must not use the coupon range for its prefix:\nhttps://support.google.com/merchants/answer/6286302?hl=en\n\n5. The last digit of the gtin must match the formula defined here:\nhttps://www.gs1.org/services/how-calculate-check-digit-manually\n\nIf it fails these checks, this optimizer will remove the gtin field from the\nproduct to prevent the product from being disapproved in Merchant Center.\n\"\"\"\nimport logging\nimport math\nfrom typing import Any, Dict\n\nfrom optimizers_abstract import base_optimizer\n\n_VALID_GTIN_LENGTHS = [8, 12, 13, 14]\n_COUPON_PREFIXES = ['981', '982', '983', '984', '99', '05']\n_RESTRICTED_PREFIXES = ['020-029', '040-049', '200-299']\n_INVALID_BULK_INDICATOR = '9'\n\n\nclass GTINOptimizer(base_optimizer.BaseOptimizer):\n \"\"\"\"An optimizer that fixes invalid gtin values.\"\"\"\n\n _OPTIMIZER_PARAMETER = 'gtin-optimizer'\n\n def _optimize(self, product_batch: Dict[str, Any], language: str,\n country: str, currency: str) -> int:\n \"\"\"Runs the optimization.\n\n Fixes invalid gtin fields.\n See above for the definition of an invalid gtin field.\n\n Args:\n product_batch: A batch of product data.\n language: The language to use for this optimizer.\n country: The country to use for this optimizer.\n currency: The currency to use for this optimizer.\n\n Returns:\n The number of products affected by this optimization: int\n \"\"\"\n num_of_products_optimized = 0\n for entry in product_batch['entries']:\n product = entry['product']\n if 'gtin' in product:\n gtin = product.get('gtin', '')\n\n violates_any_gtin_check = (_gtin_fails_format_check(gtin) or\n _gtin_uses_bulk_indicator(gtin) or\n _gtin_uses_reserved_range(gtin) or\n _gtin_uses_coupon_range(gtin) or\n _gtin_fails_checksum(gtin))\n if violates_any_gtin_check:\n _remove_gtin(product)\n num_of_products_optimized += 1\n\n return num_of_products_optimized\n\n\ndef _remove_gtin(product: Dict[str, Any]) -> None:\n \"\"\"Clears the gtin value from the product.\n\n Args:\n product: A dictionary representing a single shopping product.\n \"\"\"\n violating_gtin = product.get('gtin', '')\n del product['gtin']\n logging.info(\n 'Modified item %s: Cleared invalid gtin: %s to '\n 'prevent disapproval', product.get('offerId', ''), violating_gtin)\n base_optimizer.set_optimization_tracking(product, base_optimizer.SANITIZED)\n\n\ndef _gtin_uses_bulk_indicator(gtin: str) -> bool:\n \"\"\"Determines if the provided gtin violates the bulk indicator digit check.\n\n Args:\n gtin: a string representing the product's GTIN.\n\n Returns:\n True if the indicator digit is 9, otherwise False.\n \"\"\"\n return len(gtin) == 14 and gtin[0] == _INVALID_BULK_INDICATOR\n\n\ndef _gtin_uses_reserved_range(gtin: str) -> str:\n \"\"\"Determines if the provided gtin violates the reserved prefix check.\n\n Args:\n gtin: a string representing the product's GTIN.\n\n Returns:\n True if the prefix is in a reserved prefix range, otherwise False.\n \"\"\"\n company_prefix = int(gtin[1:4])\n for restricted_prefix in _RESTRICTED_PREFIXES:\n if company_prefix >= int(\n restricted_prefix.split('-')[0]) and company_prefix <= int(\n restricted_prefix.split('-')[1]):\n return True\n return False\n\n\ndef _gtin_uses_coupon_range(gtin: str) -> bool:\n \"\"\"Determines if the provided gtin violates the coupon prefix check.\n\n Args:\n gtin: a string representing the product's GTIN.\n\n Returns:\n True if the prefix is in a coupon prefix range, otherwise False.\n \"\"\"\n return gtin[1:].startswith(tuple(_COUPON_PREFIXES))\n\n\ndef _gtin_fails_format_check(gtin: str) -> bool:\n \"\"\"Determines if the provided gtin violates basic sanity checks.\n\n Args:\n gtin: a string representing the product's GTIN\n\n Returns:\n True if the gtin fails the validations, otherwise False.\n \"\"\"\n if not gtin.isdigit() or len(\n gtin) not in _VALID_GTIN_LENGTHS or _contains_repeating_digits(\n gtin[:-1]) or _contains_sequential_digits(gtin):\n return True\n return False\n\n\ndef _gtin_fails_checksum(gtin: str) -> bool:\n \"\"\"Determines if the provided gtin violates the check digit calculation.\n\n Args:\n gtin: a string representing the product's GTIN\n\n Returns:\n True if the gtin fails check digit validation, otherwise False.\n \"\"\"\n padded_gtin = gtin.zfill(14)\n existing_check_digit = int(padded_gtin[-1])\n target_check_digit = _calculate_check_digit(padded_gtin[:-1])\n return target_check_digit != existing_check_digit\n\n\ndef _calculate_check_digit(partial_gtin: str) -> int:\n \"\"\"Calculates the expected check digit of a GTIN (without the last digit).\n\n Args:\n partial_gtin: a string representing a product GTIN without the check digit.\n\n Returns:\n the calculated expected check digit of the input GTIN.\n \"\"\"\n odds = list(partial_gtin[::2])\n evens = [int(x) for x in list(partial_gtin[1::2])]\n odds_times_three = [int(x) * 3 for x in odds]\n sum_mults = sum(evens) + sum(odds_times_three)\n check_digit = _round_up(sum_mults) - sum_mults\n return check_digit\n\n\ndef _round_up(x) -> int:\n return int(math.ceil(x / 10.0)) * 10\n\n\ndef _contains_repeating_digits(gtin: str) -> bool:\n return gtin.count(gtin[0]) == len(gtin)\n\n\ndef _contains_sequential_digits(gtin: str) -> bool:\n return gtin.startswith('123456789')\n","sub_path":"shoptimizer_api/optimizers_builtin/gtin_optimizer.py","file_name":"gtin_optimizer.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"511435899","text":"import numpy as np\n\nFACE_POINT = (\n 107, 55, 105, 52, 70, 46, 336, 285, 334, 282, 300, 276, 159, 144, 33, 133, 386, 373, 362, 263, 17, 61, 291, 0, 78,\n 308, 14, 13,\n 10, 297, 389, 356, 288, 378, 152, 149, 58, 127, 162, 67,\n 50, 280) # 10부터 시계방향 50,280(왼쪽 광대, 오른쪽 광대)\nPOSE_POINT = {\n 'NOSE': 0,\n 'LEFT_SHOULDER': 11, 'RIGHT_SHOULDER': 12,\n 'LEFT_ELBOW' : 13, 'RIGHT_ELBOW' : 14,\n 'LEFT_WRIST' : 15, 'RIGHT_WRIST' : 16\n}\nFACE_RATE_DICT={\"왼쪽 광대뼈 수직 비율\": [39, 40, 39, 35], \"오른쪽 광대뼈 수직 비율\": [29, 41, 29, 33],\n \"왼쪽 눈썹 수직 비율\": [3, 13, 39, 35], \"오른쪽 눈썹 수직 비율\": [9, 13, 29, 33],\n \"왼쪽눈 수직 비율\": [12, 13, 39, 35], \"오른쪽눈 수직 비율\": [16, 17, 29, 33],\n \"왼눈 수평 비율\": [14, 15, 31, 37], \"오른눈 수평 비율\": [18, 19, 31, 37],\n \"입 수직 비율\": [20, 23, 28, 34], \"입 수평 비율\": [21, 22, 32, 36],\n \"왼눈 오른쪽 각도\": [12, 15, 13], \"왼눈 왼쪽 각도\": [12, 14, 13],\n \"오른눈 오른쪽 각도\": [16, 19, 17],\"오른눈 왼쪽 각도\": [16, 18, 17],\n \"입술 왼쪽 각도\": [27, 24, 26],\"입술 오른쪽 각도\": [27, 25, 26]\n }\n\ndef point_to_point_distance(p1:list, p2:list)->float:\n x = p2[0]- p1[0]\n y = p2[1]- p1[1]\n # z = p2[2]- p1[2]\n\n # return np.sqrt(np.power(x,2) + np.power(y,2) + np.power(z,2))\n if y < 0:\n return np.sqrt(np.power(x, 2) + np.power(y, 2)) * -1\n\n return np.sqrt(np.power(x, 2) + np.power(y, 2))\n\ndef angle_between(p1, p2)->float: # 두점 사이의 각도:(getAngle3P 계산용) 시계 방향으로 계산한다. P1-(0,0)-P2의 각도를 시계방향으로\n ang1 = np.arctan2(*p1[::-1])\n ang2 = np.arctan2(*p2[::-1])\n res = np.rad2deg((ang1 - ang2) % (2 * np.pi))\n return res\n\ndef get_angle(p1:list, p2:list, p3:list)->float:\n pt1 = (p1[0] - p2[0], p1[1] - p2[1])\n pt2 = (p3[0] - p2[0], p3[1] - p2[1])\n angle = angle_between(pt1, pt2)\n angle = (angle + 360) % 360\n # if direction == \"CCW\": # 시계방향\n if angle > 180: # 시계방향\n angle = (360 - angle) % 360\n\n return angle\n\ndef cvt_Landmark_to_list(landmark)-> list:\n x = landmark.x\n y = landmark.y\n # z = landmark.z\n\n # return [x, y, z]\n return [x, y]\n\ndef preprocess(features):\n\n pose_data = []\n face_data = []\n\n try:\n\n for idx in POSE_POINT.values():\n pose_data.append(cvt_Landmark_to_list(features.pose_landmarks.landmark[idx])) # list[list] 형태를 가짐\n\n center_of_shoulder = [(pose_data[1][0] + pose_data[2][0])/2, (pose_data[1][1] + pose_data[2][1])/2]\n pose_data.append(center_of_shoulder)\n\n except Exception as e:\n print('Cannot find some pose features')\n # print(e)\n pose_data.clear()\n\n try:\n for idx in FACE_POINT:\n face_data.append(cvt_Landmark_to_list(features.face_landmarks.landmark[idx])) # list[list] 형태를 가짐\n\n except Exception as e:\n print('Cannot find some face features')\n # print(e)\n face_data.clear()\n\n\n return face_data, pose_data\n\ndef classify(features)->float:\n\n face_landmarks, pose_landmarks = preprocess(features)\n if not face_landmarks or not pose_landmarks :\n return None\n\n score = 1.0\n\n head_down_score = detect_head_down(pose_landmarks)\n\n eye_closed_score = detect_eye_closed(process_data_rates(face_landmarks))\n\n score = head_down_score + eye_closed_score + score\n\n print('Log from classify of classification_module')\n return score\n\ndef detect_head_down(pose_landmarks:list)->float:\n nose_to_sholuder_l = point_to_point_distance(pose_landmarks[0],pose_landmarks[1])\n nose_to_sholuder_c = point_to_point_distance(pose_landmarks[0],pose_landmarks[7])\n nose_to_sholuder_r = point_to_point_distance(pose_landmarks[0],pose_landmarks[2])\n\n l_elbow_angle = get_angle(pose_landmarks[1],pose_landmarks[3],pose_landmarks[5])\n r_elbow_angle = get_angle(pose_landmarks[2],pose_landmarks[4],pose_landmarks[6])\n\n # print(f'코, 왼어깨:{nose_to_sholuder_l} | 코, 어깨가운데:{nose_to_sholuder_c} | 코, 오른어깨:{nose_to_sholuder_r} | 왼팔꿈치 각도:{l_elbow_angle} | 오른팔꿈치 각도:{r_elbow_angle}\\n')\n\n score = 1.0\n\n if nose_to_sholuder_l < 0 or nose_to_sholuder_c < 0 or nose_to_sholuder_r < 0: # 고개를 푹 숙인경우\n score -= 1\n\n ######## 어깨 가운데 #########\n if nose_to_sholuder_c < 0.15 and nose_to_sholuder_c > 0.13:\n # print('살짝 숙임')\n score -= 0.5\n elif nose_to_sholuder_c <= 0.13:\n # print('많이 숙임')\n score -= 10.0\n else:\n # print('고개 안숙임')\n score += 1.0\n\n ######## 왼쪽 어깨 #########\n if nose_to_sholuder_l < 0.20 and nose_to_sholuder_l > 0.17:\n # print('살짝 기울어짐')\n score -= 0.25\n elif nose_to_sholuder_l <= 0.17:\n # print('많이 기울어짐')\n score -= 10\n else:\n # print('안기울어짐')\n score += 0.5\n\n ######## 오른쪽 어깨 #########\n if nose_to_sholuder_r < 0.20 and nose_to_sholuder_r > 0.17:\n # print('살짝 기울어짐')\n score -= 0.25\n elif nose_to_sholuder_r <= 0.17:\n # print('많이 기울어짐')\n score -= 10\n else:\n # print('안기울어짐')\n score += 0.5\n\n # print(f'고개: {score}')\n\n print('Log from detect_head_down of classification_module')\n return score\n\ndef detect_eye_closed(rate_angle_data:dict)->float: # rate_angle_data ---> 얼굴 좌표로 얻은 비율과 각도\n score = 0\n keys_list = list(rate_angle_data.keys())\n # print(abs(values_gap[\"왼쪽눈 수직 비율\"]))\n\n # 길이까지 같이 재야 정확하게 잴 수 있을듯\n eye_angle_sum = rate_angle_data[keys_list[-4]] + rate_angle_data[keys_list[-5]]\n if eye_angle_sum > 80: # 집중\n score += 10\n elif eye_angle_sum > 70: # 지루\n score -= 0.5\n else: # 잠\n score -= 10\n # print(rate_angle_data[keys_list[-4]])\n # print(rate_angle_data[keys_list[-5]])\n\n # print(f'눈점수: {score}')\n print('Log from detect_eye_closed of classification_module')\n\n return score\n\ndef process_data_rates(face_landmarks:list) -> dict:\n\n current_img_processing_data = {} # 현재 이미지 값 저장 변수\n\n for name in FACE_RATE_DICT.keys():\n if \"비율\" in name:\n current_img_processing_data[name] = rate_processing(FACE_RATE_DICT[name], face_landmarks)\n elif \"각도\" in name:\n current_img_processing_data[name] = get_angle(face_landmarks[FACE_RATE_DICT[name][0]],face_landmarks[FACE_RATE_DICT[name][1]],face_landmarks[FACE_RATE_DICT[name][2]]) # 인자 안맞는거 오버로딩으로 해결을 할까--->안됨\n # print(f'각도{name}:{current_img_processing_data[name]}')\n\n else:\n print(\"오류?\")\n\n return current_img_processing_data\n\ndef rate_processing(idx:list, face_landmarks:list) -> float:\n\n first_rate = point_to_point_distance(face_landmarks[idx[0]],face_landmarks[idx[1]])\n second_rate = point_to_point_distance(face_landmarks[idx[2]],face_landmarks[idx[3]])\n result_rate = first_rate / second_rate\n\n return result_rate\n\n\n\n\n\n\n\n","sub_path":"개별테스트/PoseDetection/classification_module.py","file_name":"classification_module.py","file_ext":"py","file_size_in_byte":7059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"584613407","text":"import json\nfrom printing import print_file\n\n\ndef print_json(filename, charset):\n s = list()\n with open(filename, encoding=charset) as file:\n try:\n sr = json.load(file)\n s.append(list(sr[0].keys()))\n for i in sr:\n if list(i.keys()) != s[0]:\n raise KeyError\n s.append(list(i.values()))\n except (KeyError, json.JSONDecodeError):\n raise ValueError(\"Формат не валиден\")\n except IndexError:\n raise RuntimeError(\"Формат не валиден\")\n print_file(s)\n","sub_path":"homeworks/homework_02/For_json.py","file_name":"For_json.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"474268496","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom ScrapyCrawler.items import WxappItem\n\nclass WxappSpider(CrawlSpider):\n name = 'wxapp'\n allowed_domains = ['wxapp-union.com']\n start_urls = ['http://www.wxapp-union.com/portal.php?mod=list&catid=2&page=1']\n\n # 为spider设置单独的pipeline(数据不会存到其他的pipeline中)\n custom_settings = {\n \"ITEM_PIPELINES\": {\n 'ScrapyCrawler.pipelines.WxappPipeline': 300,\n }\n }\n\n rules = (\n # allow: 允许提取满足这个规则的链接\n # callback: 当提取到了符合allow规则的链接,使用哪个函数去解析\n # follow: 默认值为True,当在爬取的页面中发现了符合allow规则的链接,是否继续跟进\n # Rule(LinkExtractor(allow=r'.+mod=list&catid=2&page=\\d'), follow=True),\n Rule(LinkExtractor(allow=r'http://www.wxapp-union.com/portal.php?mod=list&catid=2&page=\\d'), follow=True),\n # Rule(LinkExtractor(allow=r'.+article-.+\\.html'),\n # callback='parse_datail', follow=False),\n Rule(LinkExtractor(allow=r'http://www.wxapp-union.com/article-.+\\.html'),\n callback='parse_datail', follow=False),\n )\n\n def parse_datail(self, response):\n # 爬取微信小程序详情页面的标题\n # print(type(response.xpath('//h1[@class=\"ph\"]/text()')))\n title = response.xpath('//h1[@class=\"ph\"]/text()').extract_first()\n author = response.xpath('//p[@class=\"authors\"]/a/text()').extract_first()\n pubdate = response.xpath('//span[@class=\"time\"]/text()').extract_first()\n content = response.xpath('//td[@id=\"article_content\"]//text()').extract() # 得到是一个列表类型的数据\n content = \"\".join(content).strip() # 把列表类型数据转成成字符串类型,并去除两端的空格\n print(title,author,pubdate)\n # print(content)\n wxappItem = WxappItem(title=title,author=author,pubdate=pubdate,content=content)\n yield wxappItem\n\n","sub_path":"04.ScrapyDoc/ScrapyCrawler/ScrapyCrawler/spiders/wxapp.py","file_name":"wxapp.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"67371890","text":"# shows a user's saved tracks (need to be authenticated via oauth)\n\nimport sys\nimport spotipy\nimport spotipy.util as util\nseen = {}\n\ndef show_tracks(tracks):\n for i, item in enumerate(tracks['items']):\n track = item['track']\n tup = (track['artists'][0]['name'], track['name'])\n print('%s|%s|%d' % (track['artists'][0]['name'], track['name'], i), end=\"\")\n if tup in seen:\n print('|DUP:%s' % seen[tup], end=\"\")\n else:\n seen[tup] = True;\n print()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n username = sys.argv[1]\n else:\n print(\"Whoops, need your username!\")\n print(\"Usage: %s username\" % (sys.argv[0],))\n sys.exit()\n\n\n scope = 'user-library-read'\n\n token = util.prompt_for_user_token(username, scope)\n\n if token:\n sp = spotipy.Spotify(auth=token)\n tracks = sp.current_user_saved_tracks(limit=50)\n show_tracks(tracks)\n while tracks['next']:\n tracks = sp.next(tracks)\n show_tracks(tracks)\n else:\n print(\"Can't get token for\", username)\n\n","sub_path":"examples/show_my_saved_tracks.py","file_name":"show_my_saved_tracks.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"409898030","text":"import tensorflow as tf\n\n\nNN_MODEL = \"/home/team/iMG/submit/results/nn_model_ep_18200.ckpt\" # model path settings\n\nclass Algorithm:\n def __init__(self):\n # fill your init vars\n self.buffer_size = 0\n \n # Intial \n def Initial(self):\n\n IntialVars = []\n \n return IntialVars\n\n #Define your algorithm\n def run(self, time, S_time_interval, S_send_data_size, S_chunk_len, S_rebuf, S_buffer_size, S_play_time_len,S_end_delay, S_decision_flag, S_buffer_flag,S_cdn_flag, end_of_video, cdn_newest_id,download_id,cdn_has_frame, IntialVars):\n\n th = 0\n fpsx2 = 50\n segment_count = 0\n flag = 0 \n for i in range(7000):\n if S_decision_flag[-2-i]:\n segment_count = i+1\n flag += 1\n if flag == 1:\n break\n for j in range(segment_count):\n if S_time_interval[-1-j] != 0:\n th += S_send_data_size[-1-j]/S_time_interval[-1-j]/1000\n ave_throughput = th/fpsx2\n buffer_size = S_buffer_size[-1]\n\n if buffer_size <= 0.5:\n bit_rate = 0 \n elif buffer_size >= 2.5:\n bit_rate = 3\n elif buffer_size >= 1:\n if buffer_size >=2 and ave_throughput >= 1250:\n bit_rate = 3\n elif buffer_size >=1.5 and ave_throughput >= 1500:\n bit_rate = 3\n else:\n bit_rate = 2\n else:\n bit_rate = 1\n\n\n\n target_buffer = 0\n \n return bit_rate, target_buffer\n\n\n # If you choose other\n #......\n","sub_path":"ABR_-1.py","file_name":"ABR_-1.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"342199607","text":"# Copyright 2020 DataStax, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"test the read_ahead module\"\"\"\nfrom pysper.core.diag import read_ahead\n\ndef test_block_dev():\n \"\"\"test parsing the block dev report\"\"\"\n rows = [\n {\"ssz\": 512, \"ra\": 8, \"device\": \"/dev/sda\"},\n {\"ssz\": 512, \"ra\": 8192, \"device\": \"/dev/sdb\"},\n ]\n devices = read_ahead.extract_block_dev(rows)\n assert len(devices) == 2\n assert devices.get(\"/dev/sda\") == 4096\n assert devices.get(\"/dev/sdb\") == 4194304\n","sub_path":"tests/core/diag/test_read_ahead.py","file_name":"test_read_ahead.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"222734469","text":"\nfrom pwn import *\nfrom pwnlib.util import misc\nimport tempfile\nfrom time import sleep\n\ndef pwndbg(script=b\"\") :\n tmp = tempfile.NamedTemporaryFile(prefix = 'pwn', \n suffix = '.gdb', delete = False)\n tmp.write(b\"\"\"\n set architecture mips\n set endian little\n target remote localhost:9999\n \"\"\" + script + b\"\\n\")\n if args.GDB : \n misc.run_in_new_terminal('gdb-multiarch -q -x \"%s\"' % tmp.name)\n\ncontext.log_level = 'debug'\ncontext.terminal = ['tmux', 'splitw', '-h']\n\np = remote('localhost', 9998)\npause()\ncmd = b\"\"\"\n\"\"\"\npwndbg(cmd)\n\np.interactive()","sub_path":"Pwnable/mips/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629215401","text":"# 1\n\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nfor i in a:\n if i < 5:\n print(i)\n\n# 2\nnew_a = []\nfor i in a:\n if i < 5:\n new_a.append(i)\n\nprint(new_a)\n\n# 3\nprint([aa for aa in a if aa < 5])\n\n# 4\nus_input = int(input(\"Enter value: \"))\nfor i in a:\n if i < us_input:\n print(i)\n","sub_path":"Practice Python/list-less-then-ten.py","file_name":"list-less-then-ten.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"652305218","text":"## utils.py\nimport hashlib\nimport pandas as pd\nimport os\nimport rdkit\nfrom rdkit.Chem import MolFromSmiles\n__all__ = [\"file_validator\",\n \"Data_dict\",\n \"Data_frame\",\n \"File_hash_calculator\",\n \"smiles_validator\"\n ]\n\ndef file_validator(path):\n if (path == None) or (path.strip() == \"\"):\n raise ValueError(\"Enter path of csv file\")\n if os.path.isfile(path):\n return True\n else:\n raise ValueError(f\"{path} the specified path doesnot exist....\")\n\ndef Data_dict(data,):\n Data = {}\n for k in data:\n if data.get(k) == None:\n continue\n Data.update(**data[k])\n return Data\n\ndef Data_frame(data,target):\n descriptor_data = pd.DataFrame(data)\n if descriptor_data.shape[0] == len(target):\n descriptor_data.insert(descriptor_data.shape[1],column=\"Target\",value=target)\n return descriptor_data\n\ndef Dir_validator(path):\n pass\n\ndef File_hash_calculator(filename):\n if file_validator(filename):\n md5_hash = hashlib.md5()\n with open(filename,\"rb\") as f:\n # Read and update hash in chunks of 4K\n for byte_block in iter(lambda: f.read(4096),b\"\"):\n md5_hash.update(byte_block)\n return md5_hash.hexdigest()\n\ndef smiles_validator(smiles,target):\n print(smiles)\n new_target = []\n valid_smiles = []\n for _,i in enumerate(smiles):\n try:\n if isinstance(MolFromSmiles(i),rdkit.Chem.rdchem.Mol):\n valid_smiles.insert(_,i)\n new_target.insert(_,target[_])\n except:\n print(smiles)\n \n if len(valid_smiles) == len(new_target):\n return [valid_smiles,new_target]\n \n\n\n\n\n","sub_path":"Proxy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"548807711","text":"# coding=utf-8\n__author__ = 'roman'\n\nimport time\nimport telegram\nfrom telegram.error import TelegramError\nimport vk_api\nfrom token import BOT_TOKEN\n\n\nAPI_URL = \"https://api.telegram.org/bot{token}/\".format(token=BOT_TOKEN)\n\nbot = telegram.Bot(token=BOT_TOKEN)\n# Словарь, содержащий user_id как ключ и (friend_id, vk_auth) как значение.\nvk_users = {}\n\n\n# Декоратор для запуска корутинов.\ndef coroutine(func):\n def wrap(*args, **kwargs):\n gen = func(*args, **kwargs)\n gen.send(None)\n return gen\n\n return wrap\n\n\n# Прослушиваем два сервера.\ndef listen_servers():\n last_message = 0\n while True:\n # Прослушивание сервера Telegram.\n try:\n information = bot.getUpdates(offset=last_message + 1)\n except (TelegramError, ValueError):\n information = []\n for info in information:\n last_message = info['update_id']\n sender_id = info['message']['chat']['id']\n sender_message = info['message']['text']\n if not sender_message:\n continue\n yield (sender_id, sender_message)\n\n # Прослушивание сервера vk.\n for vk_user in vk_users:\n vk_friend, vk_auth = vk_users[vk_user]\n try:\n unread = vk_auth.method('messages.getHistory',\n {'user_id': vk_friend})['unread']\n mes_from_friend = vk_auth.method('messages.getHistory',\n {'user_id': vk_friend,\n 'count': unread})['items'][::-1]\n for mes in mes_from_friend:\n vk_auth.method('messages.markAsRead',\n {'message_ids': mes['id']})\n yield (vk_user, mes)\n # Обход ошибки при отсутствии непрочитанных сообщений.\n except (KeyError, vk_api.vk_api.ApiError):\n pass\n except vk_api.vk_api.ApiHttpError:\n yield (vk_user, 'From bot:\\n'\n 'Проблемы с сервером VK, нужно немного подождать')\n time.sleep(10)\n\n time.sleep(1)\n\n\n# Слушает входящие сообщения.\n# При появлении нового юзера создает ему воркера.\ndef main():\n # Словарь, содержащий user_id как ключ и worker-а как значение\n workers = {}\n server = listen_servers()\n for info in server:\n if info[0] not in workers:\n workers[info[0]] = worker(info[0])\n workers[info[0]].send(info[1])\n\n\n# Worker, создающийся для каждого нового юзера.\n# Здесь вся логика.\n@coroutine\ndef worker(user):\n auth = None\n friend = None\n # Маркер для перебрасывания потока на капчу.\n captcha = None\n # Объект VkApi().\n vk = None\n service_commands = ['/start', '/help', '/ping', '/connect', '/get_friends', '/auth']\n\n # Авторизация во Вконтакте.\n def authentication(mes):\n nonlocal captcha, vk, auth\n try:\n login = mes.split()[1]\n password = mes.split()[2]\n if auth:\n reauth = True\n else:\n reauth = False\n vk = vk_api.VkApi(login, password, app_id=5041371)\n try:\n vk.authorization(reauth=reauth)\n bot.sendMessage(chat_id=user, text='Вы успешно авторизованы. '\n 'Дальнейшие действия смотрите в /help')\n auth = vk\n except vk_api.vk_api.BadPassword:\n bot.sendMessage(chat_id=user, text='Неверный логин или пароль. Повторите авторизацию')\n except vk_api.vk_api.Captcha:\n bot.sendMessage(text='Captcha: {0} '.format(vk.captcha.get_url()),\n chat_id=user)\n captcha = True\n except vk_api.vk_api.SecurityCheck:\n print('Phone error')\n bot.sendMessage(chat_id=user, text='Он говорит, что вы заходите с необычного места. '\n 'Для входа используйте номер телефона вместо электронной почты.')\n except IndexError:\n bot.sendMessage(chat_id=user, text='Команда должна принимать два аргумента: login password. '\n 'И иметь вид /auth login password')\n\n def captcha_handler(key):\n nonlocal captcha, vk, auth\n try:\n vk.captcha.try_again(key)\n captcha = None\n auth = vk\n bot.sendMessage(chat_id=user, text='Вы успешно авторизованы. '\n 'Дальнейшие действия смотрите в /help')\n except vk_api.vk_api.Captcha:\n bot.sendMessage(chat_id=user, text='Неверно введена эта дурацкая капча.')\n bot.sendMessage(text='Captcha: {0} '.format(vk.captcha.get_url()),\n chat_id=user)\n except vk_api.vk_api.BadPassword:\n captcha = None\n bot.sendMessage(chat_id=user, text='Неверный логин или пароль. Повторите авторизацию.')\n return None\n except vk_api.vk_api.SecurityCheck:\n bot.sendMessage(chat_id=user, text='Он говорит, что вы заходите с необычного места. '\n 'Для входа используйте номер телефона вместо электронной почты.')\n return None\n\n def service_manager(mes):\n nonlocal auth\n if mes == '/start':\n bot.sendMessage(chat_id=user, text='Я предназначен для общения с определенным пользователем '\n 'из vk через Telegram.\\n'\n 'Для начала работы необходимо авторизоваться в vk и выбрать '\n 'собеседника\\n\\n'\n 'Полный список команд:\\n'\n '/ping -- Если бот активен, должно вернуть \"pong\"\\n'\n '/auth -- Авторизация в vk. Команда принимает два аргумента: '\n 'login password и имеет вид /auth login password\\n'\n '/connect -- Выбор пользователя из vk, сообщения которого '\n 'будут редиректится в Telegram. Команда принимает '\n 'id пользователя как аргумент и имеет вид /connect user_id\\n'\n '/get_friends -- Вывод списка первых num друзей. Команда принимает '\n 'количество друзей как аргумент и имеет вид /get_friends num '\n 'Без указания аргумента выводит список первых 7 друзей')\n elif mes == '/help':\n bot.sendMessage(chat_id=user, text='Я предназначен для пересылки сообщений из Вконтакте '\n 'в Telegram. Для начала работы необходимо авторизоваться в vk\\n\\n'\n 'Полный список команд:\\n'\n '/ping -- Если бот активен, должно вернуть \"pong\"\\n'\n '/auth -- Авторизация в vk. Команда принимает два аргумента: '\n 'login password и имеет вид /auth login password\\n'\n '/connect -- Выбор пользователя из vk, сообщения которого '\n 'будут редиректится в Telegram. Команда принимает '\n 'id пользователя как аргумент и имеет вид /connect user_id\\n'\n '/get_friends -- Вывод списка первых num друзей. Команда принимает '\n 'количество друзей как аргумент и имеет вид /get_friends num '\n 'Без указания аргумента выводит список первых 7 друзей')\n elif mes == '/ping':\n bot.sendMessage(chat_id=user, text='pong')\n elif mes.split()[0] == '/auth':\n authentication(mes)\n elif mes.split()[0] == '/connect':\n if auth:\n try:\n connect(mes.split()[1])\n except IndexError:\n bot.sendMessage(chat_id=user, text='Команда должна принимать id пользователя как аргумент '\n 'и иметь вид /connect user_id\\n'\n 'Если id пользователя вам неизвестно, воспользуйтесь '\n 'командой /get_friends')\n else:\n bot.sendMessage(chat_id=user, text='Для выбора собеседника необходимо авторизоваться. /help')\n elif mes.split()[0] == '/get_friends':\n if auth:\n if len(mes.split()) == 1:\n num = 7\n else:\n num = mes.split()[1]\n friend_string = ''\n friend_dict = auth.method('friends.get',\n {'fields': ('first_name', 'last_name'),\n 'order': 'hints',\n 'count': num})\n for item in friend_dict['items']:\n string = \"{} {}\\n/connect {} \\n\".format(item['last_name'], item['first_name'],\n item['id'])\n friend_string += string + '\\n'\n bot.sendMessage(chat_id=user, text=friend_string)\n else:\n bot.sendMessage(chat_id=user, text='Для получения списка друзей необходимо авторизоваться. /help')\n\n def connect(user_id):\n global vk_users\n nonlocal auth, friend\n friend = int(user_id)\n # Передача в глобальную переменную данных для просшлушивания сервера vk.\n vk_users[user] = (friend, auth)\n print(vk_users)\n friend_name = auth.method('users.get', {'user_ids': [friend],\n 'name_case': 'ins'})[0]\n friend_name = friend_name['first_name'] + ' ' + friend_name['last_name']\n bot.sendMessage(chat_id=user, text='Есть контакт! Теперь вы можете общаться с {} через Telegram!\\n'\n 'Покинуть этот диалог можно с помощью команды '\n '/stop_conversation\\n'\n 'Во время диалога отсутствует '\n 'возможность вызова других команд'.format(friend_name))\n\n def send_to_vk():\n auth.method('messages.send',\n {'user_id': friend,\n 'message': message})\n\n def message_from_vk():\n try:\n if 'body' in message:\n bot.sendMessage(chat_id=user, text=message['body'])\n if 'attachments' in message:\n for attachment in message['attachments']:\n if attachment['type'] == 'photo':\n photo = None\n if 'photo_1280' in attachment['photo']:\n photo = attachment['photo']['photo_1280']\n elif 'photo_807' in attachment['photo']:\n photo = attachment['photo']['photo_807']\n elif 'photo_604' in attachment['photo']:\n photo = attachment['photo']['photo_604']\n if photo:\n bot.sendPhoto(chat_id=user, photo=photo)\n except telegram.TelegramError:\n bot.sendMessage(chat_id=user, text='From bot:\\n'\n 'Вам прислали сообщение, которое я пока что не умею '\n 'пересылать в Telegram :(')\n\n while True:\n try:\n message = yield\n if isinstance(message, str):\n if friend:\n if message == '/stop_conversation':\n friend = None\n del vk_users[user]\n bot.sendMessage(chat_id=user, text='Готово. Теперь вы можете выбрать другого собеседника.')\n elif message == '/ping':\n bot.sendMessage(chat_id=user, text='From bot:\\n'\n 'pong :)')\n else:\n send_to_vk()\n\n elif message.split()[0] in service_commands and not friend:\n service_manager(message)\n elif captcha:\n try:\n captcha_handler(message)\n except StopIteration:\n pass\n else:\n message_from_vk()\n except (vk_api.vk_api.ApiError, vk_api.vk_api.ApiHttpError, TelegramError):\n bot.sendMessage(chat_id=user, text='From bot:\\n'\n 'Здесь какие-то проблемы на стороне сервера. '\n 'Нужно немного подождать.')\n time.sleep(4)\n\n\nmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"14581482","text":"# coding=utf-8\n\nimport django\ndjango.setup()\n\nfrom optparse import OptionParser\nimport unicodecsv as csv\nfrom ein_parser import segment_column\nfrom sources.functions import post_link\n\n\ndef validity_and_cluster(dict_list):\n clean = True\n all_clusters = []\n for i, row in enumerate(dict_list):\n col_refs = []\n if not row['Segment']:\n continue\n seg_ref = Ref(row['Segment']) # two lines so to see where it leaves the try on the ref creation?\n col_refs.append(seg_ref)\n for col in ['Rambam', 'Semag', 'Tur Shulchan Arukh']:\n try:\n if not row[col]:\n continue\n com_list = eval(row[col])\n for cit in com_list:\n r = Ref(cit)\n if r.is_empty():\n print(u\"Line: {} No text at Ref: {}\".format(i, cit))\n clean = False\n col_refs.append(r)\n except InputError as e:\n print(u\"Line: {} Failed to parse Ref: {}\".format(i, cit))\n clean = False\n except AttributeError as e:\n print(u\"Line: {} Malformed Ref: {}\".format(i, cit))\n clean = False\n all_clusters.append(col_refs)\n return (clean, all_clusters)\n\n# c - a list of refs all to be interconnected\ndef create_cluster(c, massekhet):\n return create_link_cluster(c, 30044, \"Ein Mishpat / Ner Mitsvah\",\n attrs={\"generated_by\": \"Ein Mishpat Cluster {}\".format(massekhet)},\n exception_pairs=[(\"Tur\", \"Shulchan Arukh\")])\n#\ndef save_links_local(dict_list, massekhet):\n v_and_c = validity_and_cluster(dict_list)\n if not v_and_c[0]:\n return\n for cluster in v_and_c[1]:\n create_cluster(cluster, massekhet)\n\ndef post_ein_mishpat(massekhet):\n query = {\"generated_by\":\"Ein Mishpat Cluster {}\".format(massekhet)}\n # query_talmud = {''' \"generated_by\": \"Ein Mishpat Cluster {}\", $and: [ {{ \"refs.0\": /.*{}.*/i }} ] '''.format(massekhet,massekhet)}\n # query_tush = {''' \"generated_by\": \"Ein Mishpat Cluster {}\", $and: [ {{ \"refs.0\": /.*{}.*/i }} ] '''.format(massekhet)}\n # query_rambam = {''' \"generated_by\": \"Ein Mishpat Cluster {}\", $and: [ {{ \"refs.0\": /.*{}.*/i }} ] '''.format(massekhet)}\n # query_semag = {''' \"generated_by\": \"Ein Mishpat Cluster {}\", $and: [ {{ \"refs.0\": /.*{}.*/i }} ] '''.format(massekhet)}\n linkset = LinkSet(query)\n links = [l.contents() for l in linkset]\n # for l in links:\n # l[\"generated_by\"] = \"Ein Mishpat Cluster\"\n post_link(links)\n return links\n\n# usage = \"\\n%prog [options] inputfile\\n inputfile is a TSV, with references in columns Q-U\"\n# parser = OptionParser(usage=usage)\n# parser.add_option(\"-c\", \"--check\", action=\"store_true\", dest=\"check_only\",\n# help=\"Check references only, don't write links\")\n# parser.add_option(\"-s\", \"--statcheck\", action=\"store_true\", dest=\"check_stats\",\n# help=\"Check for statistical outliers\")\n# parser.add_option(\"-f\", \"--force\", action=\"store_true\", dest=\"force\",\n# help=\"Force post of data, even with known errors\")\n# (options, args) = parser.parse_args()\n#\n# if len(args) != 1:\n# print \"Please supply a data file name on the command line.\"\n# exit()\n\n# Delay import, so that --help case doesn't delay on library load\nfrom sefaria.system.exceptions import InputError\nfrom sefaria.model import *\nfrom sefaria.helper.link import create_link_cluster\n\n\nif __name__ == \"__main__\":\n massekhet = 'Rif'\n # final_list = segment_column(u'done/niddah_little_letters.csv', u'done/niddah_little_letters.csv', massekhet, wikitext=False)\n final_list = segment_column(u'EM_Rif.csv', u'EM_Rif.csv', massekhet, wikitext=False)\n print(final_list)\n validation = validity_and_cluster(final_list)\n save_links_local(final_list, massekhet)\n links = post_ein_mishpat(massekhet)\n print('done')","sub_path":"sources/EinMishpat/parse_ein_mishpat.py","file_name":"parse_ein_mishpat.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"278219679","text":"import numpy as np\n\nimport openmdao.api as om\n\nfrom ..grid_data import GridData\nfrom ...utils.misc import get_rate_units, CoerceDesvar\nfrom ...utils.constants import INF_BOUND\n\n\nclass ControlInterpComp(om.ExplicitComponent):\n \"\"\"\n Compute the approximated control values and rates given the values of a control at all nodes,\n given values at the control discretization nodes.\n\n Notes\n -----\n .. math::\n\n u = \\\\left[ L \\\\right] u_d\n\n \\\\dot{u} = \\\\frac{d\\\\tau_s}{dt} \\\\left[ D \\\\right] u_d\n\n \\\\ddot{u} = \\\\left( \\\\frac{d\\\\tau_s}{dt} \\\\right)^2 \\\\left[ D_2 \\\\right] u_d\n\n where\n :math:`u_d` are the values of the control at the control discretization nodes,\n :math:`u` are the values of the control at all nodes,\n :math:`\\\\dot{u}` are the time-derivatives of the control at all nodes,\n :math:`\\\\ddot{u}` are the second time-derivatives of the control at all nodes,\n :math:`L` is the Lagrange interpolation matrix,\n :math:`D` is the Lagrange differentiation matrix,\n and :math:`\\\\frac{d\\\\tau_s}{dt}` is the ratio of segment duration in segment tau space\n [-1 1] to segment duration in time.\n \"\"\"\n\n def initialize(self):\n self.options.declare(\n 'control_options', types=dict,\n desc='Dictionary of options for the dynamic controls')\n self.options.declare(\n 'time_units', default=None, allow_none=True, types=str,\n desc='Units of time')\n self.options.declare(\n 'grid_data', types=GridData,\n desc='Container object for grid info')\n\n # Save the names of the dynamic controls/parameters\n self._dynamic_names = []\n self._input_names = {}\n self._output_val_names = {}\n self._output_rate_names = {}\n self._output_rate2_names = {}\n\n def _setup_controls(self):\n control_options = self.options['control_options']\n num_nodes = self.num_nodes\n num_control_input_nodes = self.options['grid_data'].subset_num_nodes['control_input']\n time_units = self.options['time_units']\n\n for name, options in control_options.items():\n self._input_names[name] = 'controls:{0}'.format(name)\n self._output_val_names[name] = 'control_values:{0}'.format(name)\n self._output_rate_names[name] = 'control_rates:{0}_rate'.format(name)\n self._output_rate2_names[name] = 'control_rates:{0}_rate2'.format(name)\n shape = options['shape']\n input_shape = (num_control_input_nodes,) + shape\n output_shape = (num_nodes,) + shape\n\n units = options['units']\n rate_units = get_rate_units(units, time_units)\n rate2_units = get_rate_units(units, time_units, deriv=2)\n\n self._dynamic_names.append(name)\n\n self.add_input(self._input_names[name], val=np.ones(input_shape), units=units)\n\n self.add_output(self._output_val_names[name], shape=output_shape, units=units)\n\n self.add_output(self._output_rate_names[name], shape=output_shape, units=rate_units)\n\n self.add_output(self._output_rate2_names[name], shape=output_shape,\n units=rate2_units)\n\n size = np.prod(shape)\n self.val_jacs[name] = np.zeros((num_nodes, size, num_control_input_nodes, size))\n self.rate_jacs[name] = np.zeros((num_nodes, size, num_control_input_nodes, size))\n self.rate2_jacs[name] = np.zeros((num_nodes, size, num_control_input_nodes, size))\n\n for i in range(size):\n self.val_jacs[name][:, i, :, i] = self.L\n self.rate_jacs[name][:, i, :, i] = self.D\n self.rate2_jacs[name][:, i, :, i] = self.D2\n self.val_jacs[name] = self.val_jacs[name].reshape((num_nodes * size,\n num_control_input_nodes * size),\n order='C')\n self.rate_jacs[name] = self.rate_jacs[name].reshape((num_nodes * size,\n num_control_input_nodes * size),\n order='C')\n self.rate2_jacs[name] = self.rate2_jacs[name].reshape((num_nodes * size,\n num_control_input_nodes * size),\n order='C')\n self.val_jac_rows[name], self.val_jac_cols[name] = \\\n np.where(self.val_jacs[name] != 0)\n self.rate_jac_rows[name], self.rate_jac_cols[name] = \\\n np.where(self.rate_jacs[name] != 0)\n self.rate2_jac_rows[name], self.rate2_jac_cols[name] = \\\n np.where(self.rate2_jacs[name] != 0)\n\n self.sizes[name] = size\n\n rs, cs = self.val_jac_rows[name], self.val_jac_cols[name]\n self.declare_partials(of=self._output_val_names[name],\n wrt=self._input_names[name],\n rows=rs, cols=cs, val=self.val_jacs[name][rs, cs])\n\n cs = np.tile(np.arange(num_nodes, dtype=int), reps=size)\n rs = np.concatenate([np.arange(0, num_nodes * size, size, dtype=int) + i\n for i in range(size)])\n\n self.declare_partials(of=self._output_rate_names[name],\n wrt='dt_dstau',\n rows=rs, cols=cs)\n\n self.declare_partials(of=self._output_rate_names[name],\n wrt=self._input_names[name],\n rows=self.rate_jac_rows[name], cols=self.rate_jac_cols[name])\n\n self.declare_partials(of=self._output_rate2_names[name],\n wrt='dt_dstau',\n rows=rs, cols=cs)\n\n self.declare_partials(of=self._output_rate2_names[name],\n wrt=self._input_names[name],\n rows=self.rate2_jac_rows[name], cols=self.rate2_jac_cols[name])\n\n def setup(self):\n num_nodes = self.options['grid_data'].num_nodes\n time_units = self.options['time_units']\n gd = self.options['grid_data']\n\n self.add_input('dt_dstau', shape=num_nodes, units=time_units)\n\n self.val_jacs = {}\n self.rate_jacs = {}\n self.rate2_jacs = {}\n self.val_jac_rows = {}\n self.val_jac_cols = {}\n self.rate_jac_rows = {}\n self.rate_jac_cols = {}\n self.rate2_jac_rows = {}\n self.rate2_jac_cols = {}\n self.sizes = {}\n self.num_nodes = num_nodes\n\n num_disc_nodes = gd.subset_num_nodes['control_disc']\n num_input_nodes = gd.subset_num_nodes['control_input']\n\n # Find the indexing matrix that, multiplied by the values at the input nodes,\n # gives the values at the discretization nodes\n L_id = np.zeros((num_disc_nodes, num_input_nodes), dtype=float)\n L_id[np.arange(num_disc_nodes, dtype=int),\n gd.input_maps['dynamic_control_input_to_disc']] = 1.0\n\n # Matrices L_da and D_da interpolate values and rates (respectively) at all nodes from\n # values specified at control discretization nodes.\n L_da, D_da = gd.phase_lagrange_matrices('control_disc', 'all')\n self.L = np.dot(L_da, L_id)\n self.D = np.dot(D_da, L_id)\n\n # Matrix D_dd interpolates rates at discretization nodes from values given at control\n # discretization nodes.\n _, D_dd = gd.phase_lagrange_matrices('control_disc', 'control_disc')\n\n # Matrix D2 provides second derivatives at all nodes given values at input nodes.\n self.D2 = np.dot(D_da, np.dot(D_dd, L_id))\n\n self._setup_controls()\n\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n control_options = self.options['control_options']\n\n for name, options in control_options.items():\n\n u = inputs[self._input_names[name]]\n\n a = np.tensordot(self.D, u, axes=(1, 0)).T\n b = np.tensordot(self.D2, u, axes=(1, 0)).T\n\n # divide each \"row\" by dt_dstau or dt_dstau**2\n outputs[self._output_val_names[name]] = np.tensordot(self.L, u, axes=(1, 0))\n outputs[self._output_rate_names[name]] = (a / inputs['dt_dstau']).T\n outputs[self._output_rate2_names[name]] = (b / inputs['dt_dstau'] ** 2).T\n\n def compute_partials(self, inputs, partials):\n control_options = self.options['control_options']\n num_input_nodes = self.options['grid_data'].subset_num_nodes['control_input']\n\n for name, options in control_options.items():\n control_name = self._input_names[name]\n\n size = self.sizes[name]\n rate_name = self._output_rate_names[name]\n rate2_name = self._output_rate2_names[name]\n\n # Unroll matrix-shaped controls into an array at each node\n u_d = np.reshape(inputs[control_name], (num_input_nodes, size))\n\n dt_dstau = inputs['dt_dstau']\n dt_dstau_tile = np.tile(dt_dstau, size)\n\n partials[rate_name, 'dt_dstau'] = \\\n (-np.dot(self.D, u_d).ravel(order='F') / dt_dstau_tile ** 2)\n\n partials[rate2_name, 'dt_dstau'] = \\\n -2.0 * (np.dot(self.D2, u_d).ravel(order='F') / dt_dstau_tile ** 3)\n\n dt_dstau_x_size = np.repeat(dt_dstau, size)[:, np.newaxis]\n\n r_nz, c_nz = self.rate_jac_rows[name], self.rate_jac_cols[name]\n partials[rate_name, control_name] = \\\n (self.rate_jacs[name] / dt_dstau_x_size)[r_nz, c_nz]\n\n r_nz, c_nz = self.rate2_jac_rows[name], self.rate2_jac_cols[name]\n partials[rate2_name, control_name] = \\\n (self.rate2_jacs[name] / dt_dstau_x_size ** 2)[r_nz, c_nz]\n\n\nclass ControlGroup(om.Group):\n\n def initialize(self):\n self.options.declare('control_options', types=dict,\n desc='Dictionary of options for the dynamic controls')\n self.options.declare('time_units', default=None, allow_none=True, types=str,\n desc='Units of time')\n self.options.declare('grid_data', types=GridData, desc='Container object for grid info')\n\n def setup(self):\n\n ivc = om.IndepVarComp()\n\n # opts = self.options\n gd = self.options['grid_data']\n control_options = self.options['control_options']\n time_units = self.options['time_units']\n\n if len(control_options) < 1:\n return\n\n opt_controls = [name for (name, opts) in control_options.items() if opts['opt']]\n\n if len(opt_controls) > 0:\n ivc = self.add_subsystem('indep_controls', subsys=om.IndepVarComp(),\n promotes_outputs=['*'])\n\n self.add_subsystem(\n 'control_interp_comp',\n subsys=ControlInterpComp(time_units=time_units, grid_data=gd,\n control_options=control_options),\n promotes_inputs=['*'],\n promotes_outputs=['*'])\n\n for name, options in control_options.items():\n if options['opt']:\n num_input_nodes = gd.subset_num_nodes['control_input']\n\n desvar_indices = list(range(gd.subset_num_nodes['control_input']))\n if options['fix_initial']:\n desvar_indices.pop(0)\n if options['fix_final']:\n desvar_indices.pop()\n\n if len(desvar_indices) > 0:\n coerce_desvar = CoerceDesvar(gd.subset_num_nodes['control_disc'],\n desvar_indices, options)\n\n lb = -INF_BOUND if coerce_desvar('lower') is None else coerce_desvar('lower')\n ub = INF_BOUND if coerce_desvar('upper') is None else coerce_desvar('upper')\n\n self.add_design_var(name='controls:{0}'.format(name),\n lower=lb,\n upper=ub,\n scaler=coerce_desvar('scaler'),\n adder=coerce_desvar('adder'),\n ref0=coerce_desvar('ref0'),\n ref=coerce_desvar('ref'),\n indices=desvar_indices)\n\n ivc.add_output(name='controls:{0}'.format(name),\n val=options['val'],\n shape=(num_input_nodes, np.prod(options['shape'])),\n units=options['units'])\n","sub_path":"dymos/transcriptions/common/control_group.py","file_name":"control_group.py","file_ext":"py","file_size_in_byte":12870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"236459887","text":"from numba import ir, types, typing, config, analysis\nfrom numba.typing.templates import signature\nimport numpy\nfrom numba.analysis import (compute_live_map, compute_use_defs,\n compute_cfg_from_blocks)\nimport copy\n\n_unique_var_count = 0\ndef mk_unique_var(prefix):\n global _unique_var_count\n var = prefix + \".\" + str(_unique_var_count)\n _unique_var_count = _unique_var_count + 1\n return var\n\n_max_label = 0\ndef next_label():\n global _max_label\n _max_label += 1\n return _max_label\n\ndef mk_alloc(typemap, calltypes, lhs, size_var, dtype, scope, loc):\n \"\"\"generate an array allocation with np.empty() and return list of nodes.\n size_var can be an int variable or tuple of int variables.\n \"\"\"\n out = []\n ndims = 1\n size_typ = types.intp\n if isinstance(size_var, tuple):\n if len(size_var) == 1:\n size_var = size_var[0]\n else:\n # tuple_var = build_tuple([size_var...])\n ndims = len(size_var)\n tuple_var = ir.Var(scope, mk_unique_var(\"$tuple_var\"), loc)\n typemap[tuple_var.name] = types.containers.UniTuple(types.intp, ndims)\n tuple_call = ir.Expr.build_tuple(list(size_var), loc)\n tuple_assign = ir.Assign(tuple_call, tuple_var, loc)\n out.append(tuple_assign)\n size_var = tuple_var\n size_typ = types.containers.UniTuple(types.intp, ndims)\n # g_np_var = Global(numpy)\n g_np_var = ir.Var(scope, mk_unique_var(\"$np_g_var\"), loc)\n typemap[g_np_var.name] = types.misc.Module(numpy)\n g_np = ir.Global('np', numpy, loc)\n g_np_assign = ir.Assign(g_np, g_np_var, loc)\n # attr call: empty_attr = getattr(g_np_var, empty)\n empty_attr_call = ir.Expr.getattr(g_np_var, \"empty\", loc)\n attr_var = ir.Var(scope, mk_unique_var(\"$empty_attr_attr\"), loc)\n typemap[attr_var.name] = get_np_ufunc_typ(numpy.empty)\n attr_assign = ir.Assign(empty_attr_call, attr_var, loc)\n # alloc call: lhs = empty_attr(size_var, typ_var)\n typ_var = ir.Var(scope, mk_unique_var(\"$np_typ_var\"), loc)\n typemap[typ_var.name] = types.functions.NumberClass(dtype)\n # assuming str(dtype) returns valid np dtype string\n np_typ_getattr = ir.Expr.getattr(g_np_var, str(dtype), loc)\n typ_var_assign = ir.Assign(np_typ_getattr, typ_var, loc)\n alloc_call = ir.Expr.call(attr_var, [size_var, typ_var], (), loc)\n calltypes[alloc_call] = typemap[attr_var.name].get_call_type(\n typing.Context(), [size_typ, types.functions.NumberClass(dtype)], {})\n #signature(\n # types.npytypes.Array(dtype, ndims, 'C'), size_typ,\n # types.functions.NumberClass(dtype))\n alloc_assign = ir.Assign(alloc_call, lhs, loc)\n\n out.extend([g_np_assign, attr_assign, typ_var_assign, alloc_assign])\n return out\n\ndef get_np_ufunc_typ(func):\n \"\"\"get type of the incoming function from builtin registry\"\"\"\n for (k,v) in typing.npydecl.registry.globals:\n if k==func:\n return v\n raise RuntimeError(\"type for func \", func, \" not found\")\n\ndef mk_range_block(typemap, size_var, calltypes, scope, loc):\n \"\"\"make a block that initializes loop range and iteration variables.\n target label in jump needs to be set.\n \"\"\"\n # g_range_var = Global(range)\n g_range_var = ir.Var(scope, mk_unique_var(\"$range_g_var\"), loc)\n typemap[g_range_var.name] = _get_range_func_typ()\n g_range = ir.Global('range', range, loc)\n g_range_assign = ir.Assign(g_range, g_range_var, loc)\n # range_call_var = call g_range_var(size_var)\n range_call = ir.Expr.call(g_range_var, [size_var], (), loc)\n calltypes[range_call] = typemap[g_range_var.name].get_call_type(\n typing.Context(), [types.intp], {})\n #signature(types.range_state64_type, types.intp)\n range_call_var = ir.Var(scope, mk_unique_var(\"$range_c_var\"), loc)\n typemap[range_call_var.name] = types.iterators.RangeType(types.intp)\n range_call_assign = ir.Assign(range_call, range_call_var, loc)\n # iter_var = getiter(range_call_var)\n iter_call = ir.Expr.getiter(range_call_var ,loc)\n calltypes[iter_call] = signature(types.range_iter64_type,\n types.range_state64_type)\n iter_var = ir.Var(scope, mk_unique_var(\"$iter_var\"), loc)\n typemap[iter_var.name] = types.iterators.RangeIteratorType(types.intp)\n iter_call_assign = ir.Assign(iter_call, iter_var, loc)\n # $phi = iter_var\n phi_var = ir.Var(scope, mk_unique_var(\"$phi\"), loc)\n typemap[phi_var.name] = types.iterators.RangeIteratorType(types.intp)\n phi_assign = ir.Assign(iter_var, phi_var, loc)\n # jump to header\n jump_header = ir.Jump(-1, loc)\n range_block = ir.Block(scope, loc)\n range_block.body = [g_range_assign, range_call_assign, iter_call_assign,\n phi_assign, jump_header]\n return range_block\n\ndef _get_range_func_typ():\n \"\"\"get type variable for range() from builtin registry\"\"\"\n for (k,v) in typing.templates.builtin_registry.globals:\n if k==range:\n return v\n raise RuntimeError(\"range type not found\")\n\ndef mk_loop_header(typemap, phi_var, calltypes, scope, loc):\n \"\"\"make a block that is a loop header updating iteration variables.\n target labels in branch need to be set.\n \"\"\"\n # iternext_var = iternext(phi_var)\n iternext_var = ir.Var(scope, mk_unique_var(\"$iternext_var\"), loc)\n typemap[iternext_var.name] = types.containers.Pair(types.intp, types.boolean)\n iternext_call = ir.Expr.iternext(phi_var, loc)\n calltypes[iternext_call] = signature(\n types.containers.Pair(types.intp, types.boolean), types.range_iter64_type)\n iternext_assign = ir.Assign(iternext_call, iternext_var, loc)\n # pair_first_var = pair_first(iternext_var)\n pair_first_var = ir.Var(scope, mk_unique_var(\"$pair_first_var\"), loc)\n typemap[pair_first_var.name] = types.intp\n pair_first_call = ir.Expr.pair_first(iternext_var, loc)\n pair_first_assign = ir.Assign(pair_first_call, pair_first_var, loc)\n # pair_second_var = pair_second(iternext_var)\n pair_second_var = ir.Var(scope, mk_unique_var(\"$pair_second_var\"), loc)\n typemap[pair_second_var.name] = types.boolean\n pair_second_call = ir.Expr.pair_second(iternext_var, loc)\n pair_second_assign = ir.Assign(pair_second_call, pair_second_var, loc)\n # phi_b_var = pair_first_var\n phi_b_var = ir.Var(scope, mk_unique_var(\"$phi\"), loc)\n typemap[phi_b_var.name] = types.intp\n phi_b_assign = ir.Assign(pair_first_var, phi_b_var, loc)\n # branch pair_second_var body_block out_block\n branch = ir.Branch(pair_second_var, -1, -1, loc)\n header_block = ir.Block(scope, loc)\n header_block.body = [iternext_assign, pair_first_assign,\n pair_second_assign, phi_b_assign, branch]\n return header_block\n\ndef find_op_typ(op, arg_typs):\n for ft in typing.templates.builtin_registry.functions:\n if ft.key==op:\n func_typ = types.Function(ft).get_call_type(typing.Context(),\n arg_typs, {})\n if func_typ is not None:\n return func_typ\n raise RuntimeError(\"unknown array operation\")\n\ndef legalize_names(varnames):\n \"\"\"returns a dictionary for conversion of variable names to legal\n parameter names.\n \"\"\"\n var_map = {}\n for var in varnames:\n new_name = var.replace(\"_\",\"__\").replace(\"$\", \"_\").replace(\".\", \"_\")\n assert new_name not in var_map\n var_map[var] = new_name\n return var_map\n\ndef get_name_var_table(blocks):\n \"\"\"create a mapping from variable names to their ir.Var objects\"\"\"\n def get_name_var_visit(var, namevar):\n namevar[var.name] = var\n return var\n namevar = {}\n visit_vars(blocks, get_name_var_visit, namevar)\n return namevar\n\ndef replace_var_names(blocks, namedict):\n \"\"\"replace variables (ir.Var to ir.Var) from dictionary (name -> name)\"\"\"\n # remove identity values to avoid infinite loop\n new_namedict = {}\n for l,r in namedict.items():\n if l!=r:\n new_namedict[l] = r\n def replace_name(var, namedict):\n assert isinstance(var, ir.Var)\n while var.name in namedict:\n var = ir.Var(var.scope, namedict[var.name], var.loc)\n return var\n visit_vars(blocks, replace_name, new_namedict)\n\ndef replace_var_callback(var, vardict):\n assert isinstance(var, ir.Var)\n while var.name in vardict.keys():\n new_var = vardict[var.name]\n var = ir.Var(new_var.scope, new_var.name, new_var.loc)\n return var\n\ndef replace_vars(blocks, vardict):\n \"\"\"replace variables (ir.Var to ir.Var) from dictionary (name -> ir.Var)\"\"\"\n # remove identity values to avoid infinite loop\n new_vardict = {}\n for l,r in vardict.items():\n if l!=r.name:\n new_vardict[l] = r\n visit_vars(blocks, replace_var_callback, new_vardict)\n\ndef replace_vars_stmt(stmt, vardict):\n visit_vars_stmt(stmt, replace_var_callback, vardict)\n\ndef replace_vars_inner(node, vardict):\n return visit_vars_inner(node, replace_var_callback, vardict)\n\n# other packages that define new nodes add calls to visit variables in them\n# format: {type:function}\nvisit_vars_extensions = {}\n\ndef visit_vars(blocks, callback, cbdata):\n \"\"\"go over statements of block bodies and replace variable names with\n dictionary.\n \"\"\"\n for block in blocks.values():\n for stmt in block.body:\n visit_vars_stmt(stmt, callback, cbdata)\n return\n\ndef visit_vars_stmt(stmt, callback, cbdata):\n # let external calls handle stmt if type matches\n for t,f in visit_vars_extensions.items():\n if isinstance(stmt,t):\n f(stmt, callback, cbdata)\n return\n if isinstance(stmt, ir.Assign):\n stmt.target = visit_vars_inner(stmt.target, callback, cbdata)\n stmt.value = visit_vars_inner(stmt.value, callback, cbdata)\n elif isinstance(stmt, ir.Arg):\n stmt.name = visit_vars_inner(stmt.name, callback, cbdata)\n elif isinstance(stmt, ir.Return):\n stmt.value = visit_vars_inner(stmt.value, callback, cbdata)\n elif isinstance(stmt, ir.Branch):\n stmt.cond = visit_vars_inner(stmt.cond, callback, cbdata)\n elif isinstance(stmt, ir.Jump):\n stmt.target = visit_vars_inner(stmt.target, callback, cbdata)\n elif isinstance(stmt, ir.Del):\n # Because Del takes only a var name, we make up by\n # constructing a temporary variable.\n var = ir.Var(None, stmt.value, stmt.loc)\n var = visit_vars_inner(var, callback, cbdata)\n stmt.value = var.name\n elif isinstance(stmt, ir.DelAttr):\n stmt.target = visit_vars_inner(stmt.target, callback, cbdata)\n stmt.attr = visit_vars_inner(stmt.attr, callback, cbdata)\n elif isinstance(stmt, ir.SetAttr):\n stmt.target = visit_vars_inner(stmt.target, callback, cbdata)\n stmt.attr = visit_vars_inner(stmt.attr, callback, cbdata)\n stmt.value = visit_vars_inner(stmt.value, callback, cbdata)\n elif isinstance(stmt, ir.DelItem):\n stmt.target = visit_vars_inner(stmt.target, callback, cbdata)\n stmt.index = visit_vars_inner(stmt.index, callback, cbdata)\n elif isinstance(stmt, ir.StaticSetItem):\n stmt.target = visit_vars_inner(stmt.target, callback, cbdata)\n stmt.index_var = visit_vars_inner(stmt.index_var, callback, cbdata)\n stmt.value = visit_vars_inner(stmt.value, callback, cbdata)\n elif isinstance(stmt, ir.SetItem):\n stmt.target = visit_vars_inner(stmt.target, callback, cbdata)\n stmt.index = visit_vars_inner(stmt.index, callback, cbdata)\n stmt.value = visit_vars_inner(stmt.value, callback, cbdata)\n else:\n pass # TODO: raise NotImplementedError(\"no replacement for IR node: \", stmt)\n return\n\ndef visit_vars_inner(node, callback, cbdata):\n if isinstance(node, ir.Var):\n return callback(node, cbdata)\n elif isinstance(node, list):\n return [visit_vars_inner(n, callback, cbdata) for n in node]\n elif isinstance(node, tuple):\n return tuple([visit_vars_inner(n, callback, cbdata) for n in node])\n elif isinstance(node, ir.Expr):\n # if node.op in ['binop', 'inplace_binop']:\n # lhs = node.lhs.name\n # rhs = node.rhs.name\n # node.lhs.name = callback, cbdata.get(lhs, lhs)\n # node.rhs.name = callback, cbdata.get(rhs, rhs)\n for arg in node._kws.keys():\n node._kws[arg] = visit_vars_inner(node._kws[arg], callback, cbdata)\n return node\n\ndef add_offset_to_labels(blocks, offset):\n \"\"\"add an offset to all block labels and jump/branch targets\n \"\"\"\n new_blocks = {}\n for l,b in blocks.items():\n term = b.body[-1]\n if isinstance(term, ir.Jump):\n term.target += offset\n if isinstance(term, ir.Branch):\n term.truebr += offset\n term.falsebr += offset\n new_blocks[l+offset] = b\n return new_blocks\n\ndef remove_dels(blocks):\n \"\"\"remove ir.Del nodes\"\"\"\n for block in blocks.values():\n new_body = []\n for stmt in block.body:\n if not isinstance(stmt, ir.Del):\n new_body.append(stmt)\n block.body = new_body\n return\n\ndef remove_dead(blocks, args):\n \"\"\"dead code elimination using liveness and CFG info\"\"\"\n cfg = compute_cfg_from_blocks(blocks)\n usedefs = compute_use_defs(blocks)\n live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)\n arg_aliases = find_potential_aliases(blocks, args)\n\n for label, block in blocks.items():\n # find live variables at each statement to delete dead assignment\n lives = { v.name for v in block.terminator.list_vars() }\n # find live variables at the end of block\n for out_blk, _data in cfg.successors(label):\n lives |= live_map[out_blk]\n if label in cfg.exit_points():\n lives |= arg_aliases\n remove_dead_block(block, lives, arg_aliases)\n return\n\n# other packages that define new nodes add calls to remove dead code in them\n# format: {type:function}\nremove_dead_extensions = {}\n\ndef remove_dead_block(block, lives, args):\n \"\"\"remove dead code using liveness info.\n Mutable arguments (e.g. arrays) that are not definitely assigned are live\n after return of function.\n \"\"\"\n # TODO: find mutable args that are not definitely assigned instead of\n # assuming all args are live after return\n\n # add statements in reverse order\n new_body = [block.terminator]\n # for each statement in reverse order, excluding terminator\n for stmt in reversed(block.body[:-1]):\n # let external calls handle stmt if type matches\n for t,f in remove_dead_extensions.items():\n if isinstance(stmt,t):\n f(stmt, lives, args)\n # ignore assignments that their lhs is not live or lhs==rhs\n if isinstance(stmt, ir.Assign):\n lhs = stmt.target\n rhs = stmt.value\n if lhs.name not in lives and has_no_side_effect(rhs, lives):\n continue\n if isinstance(rhs, ir.Var) and lhs.name==rhs.name:\n continue\n # TODO: remove other nodes like SetItem etc.\n\n lives |= { v.name for v in stmt.list_vars() }\n if isinstance(stmt, ir.Assign):\n lives.remove(lhs.name)\n for T, def_func in analysis.ir_extension_defs.items():\n if isinstance(stmt, T):\n lives -= def_func(stmt)\n new_body.append(stmt)\n new_body.reverse()\n block.body = new_body\n return\n\ndef has_no_side_effect(rhs, lives):\n # TODO: find side-effect free calls like Numpy calls\n if isinstance(rhs, ir.Expr) and rhs.op=='call':\n return False\n if isinstance(rhs, ir.Expr) and rhs.op=='inplace_binop':\n return rhs.lhs.name not in lives\n if isinstance(rhs, ir.Yield):\n return False\n return True\n\ndef find_potential_aliases(blocks, args):\n aliases = set(args)\n for bl in blocks.values():\n for instr in bl.body:\n if isinstance(instr, ir.Assign):\n expr = instr.value\n lhs = instr.target.name\n if isinstance(expr, ir.Var) and expr.name in aliases:\n aliases.add(lhs)\n return aliases\n\ndef copy_propagate(blocks, typemap):\n \"\"\"compute copy propagation information for each block using fixed-point\n iteration on data flow equations:\n in_b = intersect(predec(B))\n out_b = gen_b | (in_b - kill_b)\n \"\"\"\n cfg = compute_cfg_from_blocks(blocks)\n entry = cfg.entry_point()\n\n # format: dict of block labels to copies as tuples\n # label -> (l,r)\n c_data = init_copy_propagate_data(blocks, entry, typemap)\n (gen_copies, all_copies, kill_copies, in_copies, out_copies) = c_data\n\n old_point = None\n new_point = copy.deepcopy(out_copies)\n # comparison works since dictionary of built-in types\n while old_point!=new_point:\n for label in blocks.keys():\n if label==entry:\n continue\n predecs = [i for i,_d in cfg.predecessors(label)]\n # in_b = intersect(predec(B))\n in_copies[label] = out_copies[predecs[0]].copy()\n for p in predecs:\n in_copies[label] &= out_copies[p]\n\n # out_b = gen_b | (in_b - kill_b)\n out_copies[label] = (gen_copies[label]\n | (in_copies[label] - kill_copies[label]))\n old_point = new_point\n new_point = copy.deepcopy(out_copies)\n if config.DEBUG_ARRAY_OPT==1:\n print(\"copy propagate out_copies:\", out_copies)\n return in_copies, out_copies\n\ndef init_copy_propagate_data(blocks, entry, typemap):\n \"\"\"get initial condition of copy propagation data flow for each block.\n \"\"\"\n # gen is all definite copies, extra_kill is additional ones that may hit\n # for example, parfors can have control flow so they may hit extra copies\n gen_copies, extra_kill = get_block_copies(blocks, typemap)\n # set of all program copies\n all_copies = set()\n for l,s in gen_copies.items():\n all_copies |= gen_copies[l]\n kill_copies = {}\n for label, gen_set in gen_copies.items():\n kill_copies[label] = set()\n for lhs,rhs in all_copies:\n if lhs in extra_kill[label] or rhs in extra_kill[label]:\n kill_copies[label].add((lhs,rhs))\n # a copy is killed if it is not in this block and lhs or rhs are\n # assigned in this block\n assigned = { lhs for lhs,rhs in gen_set }\n if ((lhs,rhs) not in gen_set\n and (lhs in assigned or rhs in assigned)):\n kill_copies[label].add((lhs,rhs))\n # set initial values\n # all copies are in for all blocks except entry\n in_copies = { l:all_copies.copy() for l in blocks.keys() }\n in_copies[entry] = set()\n out_copies = {}\n for label in blocks.keys():\n # out_b = gen_b | (in_b - kill_b)\n out_copies[label] = (gen_copies[label]\n | (in_copies[label] - kill_copies[label]))\n out_copies[entry] = gen_copies[entry]\n return (gen_copies, all_copies, kill_copies, in_copies, out_copies)\n\n# other packages that define new nodes add calls to get copies in them\n# format: {type:function}\ncopy_propagate_extensions = {}\n\ndef get_block_copies(blocks, typemap):\n \"\"\"get copies generated and killed by each block\n \"\"\"\n block_copies = {}\n extra_kill = {}\n for label, block in blocks.items():\n assign_dict = {}\n extra_kill[label] = set()\n # assignments as dict to replace with latest value\n for stmt in block.body:\n for T,f in copy_propagate_extensions.items():\n if isinstance(stmt,T):\n gen_set, kill_set = f(stmt, typemap)\n for lhs,rhs in gen_set:\n assign_dict[lhs] = rhs\n extra_kill[label] |= kill_set\n if isinstance(stmt, ir.Assign):\n lhs = stmt.target.name\n if isinstance(stmt.value, ir.Var):\n rhs = stmt.value.name\n # copy is valid only if same type (see TestCFunc.test_locals)\n if typemap[lhs]==typemap[rhs]:\n assign_dict[lhs] = rhs\n continue\n extra_kill[label].add(lhs)\n block_copies[label] = set(assign_dict.items())\n return block_copies, extra_kill\n\n# other packages that define new nodes add calls to apply copy propagate in them\n# format: {type:function}\napply_copy_propagate_extensions = {}\n\ndef apply_copy_propagate(blocks, in_copies, name_var_table, ext_func, ext_data,\n typemap, calltypes):\n \"\"\"apply copy propagation to IR: replace variables when copies available\"\"\"\n for label, block in blocks.items():\n var_dict = {l:name_var_table[r] for l,r in in_copies[label]}\n # assignments as dict to replace with latest value\n for stmt in block.body:\n ext_func(stmt, var_dict, ext_data)\n for T,f in apply_copy_propagate_extensions.items():\n if isinstance(stmt,T):\n f(stmt, var_dict, name_var_table, ext_func, ext_data,\n typemap, calltypes)\n # only rhs of assignments should be replaced\n # e.g. if x=y is available, x in x=z shouldn't be replaced\n if isinstance(stmt, ir.Assign):\n stmt.value = replace_vars_inner(stmt.value, var_dict)\n else:\n replace_vars_stmt(stmt, var_dict)\n fix_setitem_type(stmt, typemap, calltypes)\n for T,f in copy_propagate_extensions.items():\n if isinstance(stmt,T):\n gen_set, kill_set = f(stmt, typemap)\n for lhs,rhs in gen_set:\n var_dict[lhs] = name_var_table[rhs]\n for l,r in var_dict.copy().items():\n if l in kill_set or r.name in kill_set:\n var_dict.pop(l)\n if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var):\n lhs = stmt.target.name\n rhs = stmt.value.name\n # rhs could be replaced with lhs from previous copies\n if lhs!=rhs:\n # copy is valid only if same type (see TestCFunc.test_locals)\n if typemap[lhs]==typemap[rhs]:\n var_dict[lhs] = name_var_table[rhs]\n else:\n var_dict.pop(lhs, None)\n # a=b kills previous t=a\n lhs_kill = []\n for k,v in var_dict.items():\n if v.name==lhs:\n lhs_kill.append(k)\n for k in lhs_kill:\n var_dict.pop(k, None)\n return\n\ndef fix_setitem_type(stmt, typemap, calltypes):\n \"\"\"Copy propagation can replace setitem target variable, which can be array\n with 'A' layout. The replaced variable can be 'C' or 'F', so we update\n setitem call type reflect this (from matrix power test)\n \"\"\"\n if not isinstance(stmt, (ir.SetItem, ir.StaticSetItem)):\n return\n t_typ = typemap[stmt.target.name]\n s_typ = calltypes[stmt].args[0]\n # test_optional t_typ can be Optional with array\n if not isinstance(s_typ, types.npytypes.Array) or not isinstance(t_typ, types.npytypes.Array):\n return\n if s_typ.layout=='A' and t_typ.layout!='A':\n new_s_typ = s_typ.copy(layout=t_typ.layout)\n calltypes[stmt].args = (new_s_typ, calltypes[stmt].args[1], calltypes[stmt].args[2])\n return\n\n\ndef dprint_func_ir(func_ir, title):\n if config.DEBUG_ARRAY_OPT==1:\n name = func_ir.func_id.func_qualname\n print((\"IR %s: %s\" % (title, name)).center(80, \"-\"))\n func_ir.dump()\n print(\"-\"*40)\n\ndef find_topo_order(blocks):\n \"\"\"find topological order of blocks such that true branches are visited\n first (e.g. for_break test in test_dataflow).\n \"\"\"\n cfg = compute_cfg_from_blocks(blocks)\n post_order = []\n seen = set()\n\n def _dfs_rec(node):\n if node not in seen:\n seen.add(node)\n succs = cfg._succs[node]\n last_inst = blocks[node].body[-1]\n if isinstance(last_inst, ir.Branch):\n succs = [last_inst.falsebr, last_inst.truebr]\n for dest in succs:\n if (node, dest) not in cfg._back_edges:\n _dfs_rec(dest)\n post_order.append(node)\n\n _dfs_rec(cfg.entry_point())\n post_order.reverse()\n return post_order\n\ndef get_stmt_writes(stmt):\n writes = set()\n if isinstance(stmt, (ir.Assign, ir.SetItem, ir.StaticSetItem)):\n writes.add(stmt.target.name)\n return writes\n","sub_path":"numba/ir_utils.py","file_name":"ir_utils.py","file_ext":"py","file_size_in_byte":24737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"65161936","text":"#pip install to-ascii\n#pip install pyautogui\n#pip install subprocess\n\nfrom toascii import VideoConverter\nimport time\nimport subprocess as sp\nimport pyautogui\n\nvideoFileName = \"../badapple.mp4\"\ntxtFileName = \"BadTxt.txt\"\n\nv = VideoConverter(videoFileName, 0.1, 2.0, \"high\")\nprint(\"Converting video...\")\nv.convert()\nprint(\"Video Conversion Complete\")\n\nprint(\"Notepad write Start\")\nf = open(txtFileName, \"w\")\na = 0\nb = len(v.ascii_frames)\nspf = 1 / 30\nwhile a < b:\n c = str(v.ascii_frames[a])\n f.write('[-]\\n'+c+'\\n')\n a += 1\nf.close()\nprint(\"Notepad write Complete\")\n\nprint(\"start the badapple\")\nsp.Popen([\"notepad.exe\", txtFileName], stdout=sp.PIPE, shell=True)\ntime.sleep(1)\npyautogui.hotkey(\"ctrl\", \"f\")\npyautogui.write(\"[-]\")\ntime.sleep(3)\n\na = 0\ndelta = 0\n# print(\"a : \" + str(a) + \"b : \" + str(b) + \"delta : \" + str(delta))\nwhile a < b:\n start = time.time()\n pyautogui.press('enter', presses=int(30))\n delta = start - time.time()\n #time.sleep(1 - delta)\n print(\"Now frame : \" + str(int(a)) + \"delta : \" + str(delta))\n #pyautogui.keyDown(\"enter\")\n a += 30\n","sub_path":"BadApple_But_Its_NotePad/backup/NotFlickeringbadappleNotepad(A0).py","file_name":"NotFlickeringbadappleNotepad(A0).py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"564965909","text":"import random\nimport sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams[\"axes.labelweight\"] = \"bold\"\nplt.rcParams[\"font.weight\"] = \"bold\"\nplt.rcParams[\"font.size\"] = 16\nfrom matplotlib.figure import figaspect\nimport seaborn as sns\n\nimport scipy.stats\nfrom sklearn.linear_model import LinearRegression\n\nfrom scipy.stats import shapiro\nfrom statistics import mean\nfrom random import sample\n\nfrom scipy.stats import ttest_ind\nfrom scipy.stats import wilcoxon\nfrom scipy.stats import iqr\nimport scikit_posthocs as sp\n\n\nif len(sys.argv) < 3:\n print('Error: Arguments required')\n print('Run: python transform-data-to-sequence.py ')\n print('For example (Madrid Central and NO2 and spring): python transform-data-to-sequence.py 035 08 spring')\n\n sys.exit(0)\nelse:\n station = sys.argv[1]\n metric = sys.argv[2]\n season = sys.argv[3]\n data_path = '../data/csv/' + station + '-' + metric + '-sequence_air_all.csv'\n\n\n\ndataset = pd.read_csv(data_path, header=0, index_col=0)\n\ndf_pre_mc = dataset[(dataset.index >= \"2011-12-01\") & (dataset.index < \"2018-09-31\") & (dataset.season == season)]\ndf_post_mc = dataset[(dataset.index >= \"2018-12-01\") & (dataset.index < \"2019-09-31\") & (dataset.season == season)]\n\n\nday_name = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n\ndef get_df_week_week(data):\n min_len = 99999999\n week_data = {}\n for i, day in enumerate(day_name):\n week_data[day] = list(data[data['weekday'] == i]['measure'])\n print(len(week_data[day]))\n if min_len > len(week_data[day]):\n min_len = len(week_data[day])\n\n # Igualamos los arrays para crear el DF\n for i, day in enumerate(day_name):\n week_data[day] = sample(week_data[day], min_len)\n\n return pd.DataFrame(week_data)\n\n\n#df_pre_mc = get_df_week_week(df_pre_mc)\ndf_pre_mc['period'] = 'Without pedestrianization'\n#week_post_mc = get_df_week_week(df_post_mc)\ndf_post_mc['period'] = 'With pedestrianization'\n\ndata = pd.concat([df_pre_mc, df_post_mc])\ndata.to_csv('../data/csv/data_for_boxplot.csv')\nprint(data)\n\n\n\n\nw, h = figaspect(3 / 8)\nf, ax = plt.subplots(figsize=(w, h))\nsns.set(style=\"whitegrid\")\nax.set_ylabel('NO$_2$ concentration', fontweight='bold')\n#ax = sns.boxplot(data=week_pre_mc[day_name], showfliers=False)\n#ax = sns.boxplot(data=week_post_mc[day_name], showfliers=False)\nsns.boxplot(y='measure', x='weekday', hue='period', data=data, showfliers=False).set(\n xlabel='',\n ylabel='NO$_2$ concentration'\n)\n\nax.set_xticklabels(day_name)\nplt.margins(x=0)\nplt.savefig(station + '-' + metric + '-' + season + '-comparison_no2_concentration_week.png')\n#plt.show()\n\n\ndef normality_test(algorithm, data, metric_title, alpha):\n print(data)\n stat, p = shapiro(data)\n stats_string = '{}\\t {}\\t {}\\t {}\\t {}'.format(algorithm, metric_title, p, stat, p maximum:\n offset_value = maximum\n\n l.append(offset_value)\n\n return tuple(l)\n\n\ndef random_position(problem):\n width = problem.width\n height = problem.height\n return (random.randint(0, width - 1), random.randint(0, height - 1))\n\n\ndef random_color():\n return (\n random.randint(0, 255),\n random.randint(0, 255),\n random.randint(0, 255),\n )\n\n\ndef offset_color(color, low=-10, high=10):\n return offset_iterable(color, low, high, 0, 255)\n\n\nclass BaseIndividual(object):\n SIZE_RATIO_MIN = 0.005\n SIZE_RATIO_MAX = 0.01\n\n def __init__(self, problem, fitness=None, **kwargs):\n self.problem = problem\n self.fitness = fitness\n\n def create_representation(self):\n raise NotImplementedError()\n\n def mutate(self):\n raise NotImplementedError()\n\n def breed_with(self, other):\n raise NotImplementedError()\n\n def json(self):\n return {\"fitness\" : self.fitness}\n\n\nclass Ellipse(BaseIndividual):\n def __init__(self, problem, center=None, axes=None, angle=None, color=None,\n startAngle=0, endAngle=360, thickness=-1, **kwargs):\n super(Ellipse, self).__init__(problem, **kwargs)\n\n if center is None:\n center = random_position(problem)\n else:\n center = tuple(center)\n\n if axes is None:\n axes = self._random_axes(problem)\n else:\n axes = tuple(axes)\n\n if angle is None:\n angle = random.randint(0, 360)\n\n if color is None:\n color = random_color()\n else:\n color = tuple(color)\n\n self.center = center\n self.axes = axes\n self.angle = angle\n self.color = color\n self.startAngle = startAngle\n self.endAngle = endAngle\n self.thickness = thickness\n\n def json(self):\n d = super(Ellipse, self).json()\n cls_d = {\n \"center\" : self.center,\n \"axes\" : self.axes,\n \"angle\" : self.angle,\n \"color\" : self.color,\n \"startAngle\" : self.startAngle,\n \"endAngle\" : self.endAngle,\n \"thickness\" : self.thickness,\n }\n d.update(cls_d)\n return d\n\n def create_representation(self):\n mask = np.zeros_like(self.problem.image)\n d = {\n \"center\" : self.center,\n \"axes\" : self.axes,\n \"angle\" : self.angle,\n \"color\" : self.color,\n \"startAngle\" : self.startAngle,\n \"endAngle\" : self.endAngle,\n \"thickness\" : self.thickness,\n }\n\n cv2.ellipse(mask, **d)\n return mask\n\n def breed_with(self, other):\n color = converters.crossover_uint_iterables(\n self.color,\n other.color,\n 1,\n )\n center = converters.crossover_uint_iterables(\n self.center,\n other.center,\n 2,\n )\n axes = converters.crossover_uint_iterables(\n self.axes,\n other.axes,\n 2,\n )\n\n return self.__class__(\n self.problem,\n color=color,\n center=center,\n axes=axes,\n )\n\n def mutate(self):\n mutation_type = random.randint(1, 3)\n\n # center\n if mutation_type == 1:\n self.center = self.offset_center()\n # size / axes\n elif mutation_type == 2:\n self.axes = self.offset_axes()\n # color\n elif mutation_type == 3:\n self.color = offset_color(self.color)\n else:\n raise ValueError(\"should never reach here\")\n\n def offset_center(self, low=-10, high=10):\n return offset_iterable(self.center, low, high)\n\n def offset_axes(self, low=-10, high=10):\n return offset_iterable(self.axes, low, high, minimum=5)\n\n def _random_axes(self, problem):\n width = problem.width\n height = problem.height\n\n x_min = int(width * self.SIZE_RATIO_MIN)\n x_max = int(width * self.SIZE_RATIO_MAX)\n\n y_min = int(height * self.SIZE_RATIO_MIN)\n y_max = int(height * self.SIZE_RATIO_MAX)\n\n return (random.randint(x_min, x_max), random.randint(y_min, y_max))\n\n\nname_to_obj = {}\nto_check = [BaseIndividual]\nwhile to_check:\n cls = to_check.pop()\n for sub_cls in cls.__subclasses__():\n name = sub_cls.__name__\n if name not in name_to_obj:\n name_to_obj[name] = sub_cls\n to_check.append(sub_cls)\n","sub_path":"species.py","file_name":"species.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"228489360","text":"#File Object - Read\n#Using context manager ... This will automatically close file when done.\nwith open('data.txt', 'r') as f:\n array = []\n for line in f:\n line = line.split()\n if line:\n line = [int(i) for i in line]\n array.append(line)\n\ndef merge(a, b):\n c = [] # final output array\n a_idx, b_idx = 0,0\n while a_idx < len(a) and b_idx < len(b):\n if a[a_idx] < b[b_idx]:\n c.append(a[a_idx])\n a_idx+=1\n else:\n c.append(b[b_idx])\n b_idx+=1\n if a_idx == len(a): c.extend(b[b_idx:])\n else: c.extend(a[a_idx:])\n return c\n\ndef merge_sort(a):\n # a list of 0 or 1 elements is sorted by definition\n if len(a) <= 1: return a\n\n # split list in half and call merge sort recursively on each half\n left, right = merge_sort(a[:len(a)/2]), merge_sort(a[len(a)/2:])\n\n # merge the now-sorted sublists\n return merge(left, right)\n\ndef write_to(arr_in, f_in):\n for i in arr_in:\n f_in.write(str(i) + \" \")\n f_in.write(\"\\n\")\n\ndef main():\n f_out = open('merge.out', 'w')\n\n for i in range(0, len(array)):\n write_to(merge_sort(array[i][1:]), f_out)\n\n f_out.close()\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"HW1-insertsort-mergesort/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"434546255","text":"from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.urls.base import reverse, reverse_lazy\nfrom ctfpad.decorators import only_if_authenticated_user\nfrom django.http.request import HttpRequest\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect\n\n\nimport datetime\n\nfrom . import (\n users,\n teams,\n ctfs,\n challenges,\n categories,\n files,\n tags,\n)\n\nfrom ..models import (\n Ctf, CtfStats, SearchEngine, Team,\n Member,\n)\n\n\ndef index(request: HttpRequest) -> HttpResponse:\n \"\"\"\n Redirects to the dashboard\n \"\"\"\n teams = Team.objects.all()\n if teams.count() == 0:\n return redirect(\"ctfpad:team-register\")\n\n if Member.objects.all().count() == 0:\n return redirect(\"ctfpad:users-register\")\n\n return redirect(\"ctfpad:dashboard\")\n\n\n@only_if_authenticated_user\ndef dashboard(request: HttpRequest) -> HttpResponse:\n \"\"\"Dashboard view: contains basic summary of all the info in the ctfpad\n\n Args:\n request (HttpRequest): [description]\n\n Returns:\n HttpResponse: [description]\n \"\"\"\n user = request.user\n member = user.member\n if member.is_guest:\n members = Member.objects.filter( selected_ctf = member.selected_ctf )\n else:\n members = Member.objects.all()\n latest_ctfs = member.ctfs.order_by(\"-start_date\")\n now = datetime.datetime.now()\n nb_ctf_played = member.ctfs.count()\n current_ctfs = member.public_ctfs.filter(\n end_date__isnull=False,\n start_date__lte = now,\n end_date__gt = now,\n )\n next_ctf = member.public_ctfs.filter(\n end_date__isnull=False,\n start_date__gt=now,\n ).order_by(\"start_date\").first()\n context = {\n \"members\": members,\n \"latest_ctfs\": latest_ctfs[:10],\n \"current_ctfs\": current_ctfs,\n \"next_ctf\": next_ctf,\n \"nb_ctf_played\": nb_ctf_played,\n }\n return render(request, \"ctfpad/dashboard/dashboard.html\", context)\n\n\n@only_if_authenticated_user\ndef generate_stats(request: HttpRequest, year: int = None) -> HttpResponse:\n \"\"\"Generate some statistics of the CTFPad\n\n Args:\n request (HttpRequest): [description]\n\n Returns:\n HttpResponse: [description]\n \"\"\"\n if not year:\n return redirect(\"ctfpad:stats-detail\", year=datetime.datetime.now().year)\n\n stats = CtfStats(year)\n context = {\n \"team\": Team.objects.first(),\n \"members\": stats.members(),\n \"player_activity\": stats.player_activity(),\n \"category_stats\": stats.category_stats(),\n \"ctf_stats\": stats.ctf_stats(),\n \"ranking_stats\": stats.ranking_stats(),\n \"year_stats\": stats.year_stats(),\n \"year_pick\": year\n }\n return render(request, \"ctfpad/stats/detail.html\", context)\n\n\n\n\n@only_if_authenticated_user\ndef search(request: HttpRequest) -> HttpResponse:\n \"\"\"Search pattern(s) in database\n\n Args:\n request (HttpRequest): [description]\n\n Returns:\n HttpResponse: [description]\n \"\"\"\n q = request.GET.get(\"q\")\n if not q:\n messages.warning(request, f\"No search pattern given\")\n return redirect(\"ctfpad:dashboard\")\n\n search = SearchEngine(q)\n paginator = Paginator(search.results, 25)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {\n \"q\": q,\n \"selected_category\": search.selected_category or \"All\",\n \"total_result\": len(search.results),\n \"page_obj\": page_obj,\n \"paginator\": paginator,\n }\n return render(request, \"search/list.html\", context)\n\n\n@only_if_authenticated_user\ndef toggle_dark_mode(request: HttpRequest) -> HttpResponse:\n \"\"\"Toggle dark mode cookie for user\n\n Args:\n request (HttpRequest): [description]\n\n Returns:\n HttpResponse: [description]\n \"\"\"\n val = request.POST.get(\"darkModeCookie\")\n redirect_to = request.META.get(\"HTTP_REFERER\") or reverse(\"ctfpad:dashboard\")\n res = redirect(redirect_to)\n if val:\n res.set_cookie('theme', 'dark')\n else:\n res.set_cookie('theme', 'light')\n return res\n\n","sub_path":"ctfpad/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"556650887","text":"# -*- coding: utf-8 -*-\nfrom typing import Dict, Text, Any, List, Union, Optional\n\nfrom rasa_sdk import Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom rasa_sdk.forms import FormAction\n\nclass UserInforForm(FormAction):\n \"\"\"\n Handle user information\n \"\"\"\n def name(self) -> Text:\n \"\"\"Unique identifier of the form\"\"\"\n\n return \"user_info_form\"\n\n @staticmethod\n def required_slots(tracker: Tracker) -> List[Text]:\n \"\"\"A list of required slots that the form has to fill\"\"\"\n return [\"user_name\", \"user_relationship\"]\n # return []\n def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n \"\"\"A dictionary to map required slots to\n - an extracted entity\n - intent: value pairs\n - a whole message\n or a list of them, where a first match will be picked\"\"\"\n return {\n \"user_name\": self.from_entity(entity=\"user_name\", intent=[\"introduce\"]),\n \"user_relationship\": [\n self.from_entity(entity=\"user_relationship\"),\n self.from_entity(intent=\"affirm_agree\", value=True),\n self.from_entity(intent=\"affirm_disagree\", value=False),\n ]\n }\n\n def submit(\n self,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> List[Dict]:\n \"\"\"Define what the form has to do\n after all required slots are filled\"\"\"\n # utter submit template\n user_name = tracker.get_slot('user_name')\n if tracker.get_slot('user_relationship'):\n user_relationship = \"yêu\"\n else:\n user_relationship = \"độc thân\"\n dispatcher.utter_template(\"utter_show_user_info\", tracker, user_name=user_name, user_relationship=user_relationship)\n return []\n","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"520531032","text":"from django import forms\nimport pytz\nimport datetime\n\n\nclass DateTimeForm(forms.Form):\n system_time = forms.CharField(required=False)\n system_date = forms.CharField(required=False)\n system_timezone = forms.ChoiceField(choices = [(item, item + ' ' + datetime.datetime.now(pytz.timezone(item)).strftime('%Z (GMT%z)')) for item in pytz.common_timezones], required=False)\n\n def clean(self):\n cd = super(DateTimeForm, self).clean()\n if ('system_time' not in cd or cd['system_time'] == '') and ('system_date' not in cd or cd['system_date'] == '') and ('system_timezone' not in cd or cd['system_timezone'] == '') :\n raise forms.ValidationError(\n \"Atleast date, time or timezone should be present\")\n else:\n if 'system_date' in cd:\n if cd['system_date'] == '':\n cd['system_date'] = None\n if 'system_time' in cd:\n if cd['system_time'] == '':\n cd['system_time'] = None\n if 'system_timezone' in cd:\n if cd['system_timezone'] == '':\n cd['system_timezone'] = None\n return cd\n","sub_path":"integral_view/forms/system_date_time_forms.py","file_name":"system_date_time_forms.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"303615494","text":"class Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n map = [-1] * 256\n res, left = 0, -1 # begin of the string\n for i in range(len(s)):\n left = max(map[ord(s[i])], left)\n map[ord(s[i])] = i\n res = max(i-left,res)\n return res","sub_path":"Python/Data Structure/String/Longest Substring Without Repeating Characters.py","file_name":"Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"398458695","text":"# encoding: utf8\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n \n dependencies = []\n\n operations = [\n migrations.CreateModel(\n fields = [(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True),), ('descripcion', models.CharField(max_length=50),), ('latitud', models.FloatField(),), ('longitud', models.FloatField(),), ('activo', models.BooleanField(default=True),)],\n bases = (models.Model,),\n options = {},\n name = 'Zona',\n ),\n ]\n","sub_path":"foodzoneapp/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"494852256","text":"# -*- coding: utf-8 -*-\n# @Author: zyc\n# @Date: 2017-05-21 19:16:11\n# @Last Modified by: zyc\n# @Last Modified time: 2017-05-31 19:18:20\n# This script is to split the trajectory data into single frames.\n\nimport base\nimport readers\ntry:\n import numpy as np\nexcept:\n import mynumpy as np\nimport sys\nimport os\nimport getopt\n\nGROOVE_ENV_VAR = \"OXDNA_GROOVE\"\nPOS_BACK = -0.4\ntry:\n args, files = getopt.getopt(sys.argv[1:], \"-c\")\nexcept:\n base.Logger.log (\"wrong usage. aborting\", base.Logger.CRITICAL)\n sys.exit (-2)\n\nif len (files) < 2:\n base.Logger.log(\"Usage is %s configuration topolofgy\" % sys.argv[0], base.Logger.CRITICAL)\n sys.exit ()\n\nif len(files) > 2:\n #num_files= len(files)\n output = files[2]\nelse:\n num_files = 5000 # the total num of frames\n #output = files[0]+\n\nappend = 'w'\nl = readers.LorenzoReader(files[0],files[1])\ns = l.get_system()\ncount = 1\n\nwhile s:\n s._prepare(None)\n result = \"HEADER frame t= \" +str(s._time)\n commands = []\n commands.append(\"set bg_color white\")\n commands.append(\"~bond #0\")\n sel_ref = \"\"\n color = \"\"\n for ss in s._strands:\n strid = ss.index + 1\n nid = 0\n result += \"MODEL \" + str(strid) + \" \\n\"\n atomoutput = \"\"\n n_start = ss._first -1\n sel_ref += str(n_start) + \"\\n\"\n for nucleo in ss._nucleotides:\n nid += 1\n n_ind = nucleo.index - n_start\n # the s# holds the position vector of each nucleotide element\n s1 = nucleo.cm_pos_box + nucleo.get_pos_back_rel()\n index_jump = 2\n s3 = nucleo.cm_pos_box + (POS_BACK + 0.68) * nucleo._a1\n # print the backbone site\n atomoutput += \"ATOM %5d %4s %3s %c%4d%c %8.3f%8.3f%8.3f%6.2f%6.2f\\n\" % (\n index_jump * n_ind - 1, \"A\", \"ALA\", 'A', n_ind, ' ', s1[0], s1[1], s1[2], 1, 7.895)\n # print the base site\n atomoutput += \"ATOM %5d %4s %3s %c%4d%c %8.3f%8.3f%8.3f%6.2f%6.2f\\n\" % (\n index_jump * n_ind, 'N', \"ALA\", 'C', n_ind, ' ', s3[0], s3[1], s3[2], 1,\n 6.316)\n\n # get command file output\n if os.environ.get(GROOVE_ENV_VAR) == '1':\n commands.append(\"bond #0.%i:%i.A:%i.B\" % (strid, n_ind, n_ind))\n commands.append(\"bond #0.%i:%i.B:%i.C\" % (strid, n_ind, n_ind))\n else:\n commands.append(\"bond #0.%i:%i\" % (strid, n_ind))\n color += \"\\nbondcolor cyan #0.%i:%i\" % (strid, n_ind)\n\n if n_ind != 1:\n commands.append(\"bond #0.%i:%i.A,%i.A\" % (strid, n_ind - 1, n_ind))\n\n result += atomoutput + \"TER \\nENDMDL \\n\"\n commands.append(\"setattr m stickScale 0.6 #0\")\n output = files[0]+str(count)+\".pdb\"\n f = open(output,append)\n f.write(result)\n f.close()\n\n f = open(\"chimera.com\", 'w')\n for c in commands:\n print >> f, c\n f.close()\n s=l.get_system()\n append = 'a'\n base.Logger.log(\"Finished frame %i\" % count, base.Logger.INFO)\n count += 1\n\nf=open(\"sel_ref\", \"w\")\nf.write(sel_ref)\nf.close()\nf = open(\"colorbond.com\", \"w\")\nf.write(color)\nf.close()\n\n\n\n\n","sub_path":"UTILS/tranj2single.py","file_name":"tranj2single.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"589017799","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nauthor: Niklas\n\n\"\"\"\n\nfrom Tkinter import Frame, Tk, BOTH, Text, Menu, END\nimport tkFileDialog \nfrom SportRanking import PCHAranking\n\nclass RankGui(Frame):\n \n def __init__(self, parent):\n Frame.__init__(self, parent) \n \n self.parent = parent \n self.initUI()\n \n \n def initUI(self):\n \n self.parent.title(\"File dialog\")\n self.pack(fill=BOTH, expand=1)\n \n menubar = Menu(self.parent)\n self.parent.config(menu=menubar)\n \n fileMenu = Menu(menubar)\n fileMenu.add_command(label=\"Open\", command=self.onOpen)\n menubar.add_cascade(label=\"File\", menu=fileMenu) \n \n self.txt = Text(self)\n self.txt.pack(fill=BOTH, expand=1)\n\n\n def onOpen(self):\n \n ftypes = [('CSV Files', '*.csv'), ('All files', '*')]\n dlg = tkFileDialog.Open(self, filetypes = ftypes)\n fl = dlg.show()\n \n if fl != '':\n text = self.readFile(fl)\n self.txt.insert(END, text)\n \n\n def readFile(self, filename):\n\n f = open(filename, \"r\")\n rankobj = PCHA_KRACH(f)\n rpiranks = rankobj.RPI_ranking('ranking.csv')\n return(rpiranks)\n\ndef main():\n \n root = Tk()\n ex = RankGui(root)\n root.geometry(\"400x300\")\n root.mainloop() \n\n\nif __name__ == '__main__':\n main() \n","sub_path":"ranking_gui.py","file_name":"ranking_gui.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"235576819","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\nfrom matplotlib.ticker import AutoMinorLocator\n\nclass Measurement(object):\n def __init__(self, x_values, y_values, name, x_error = None, y_error = None):\n self.x_values = np.array(x_values)\n self.y_values = np.array(y_values)\n if(x_error == None):\n self.x_error = None\n else:\n self.x_error = np.array(x_error)\n\n if(y_error == None):\n self.y_error = None\n else:\n self.y_error = np.array(y_error)\n\n self.name = name\n \n def set_x_error(self, x_error):\n self.x_error = np.array(x_error)\n\n def set_y_error(self, y_error):\n self.y_error = np.array(y_error)\n\n def savegraph(self, xlabel, ylabel, markersize = 3, fmt = \"ko--\"):\n minorLocator = AutoMinorLocator()\n figure = plt.figure()\n figure.set_size_inches(7, 4)\n axis = figure.add_subplot(111)\n\n axis.plot(self.x_values, self.y_values, fmt, label = self.name, markersize = markersize)\n \n axis.xaxis.set_minor_locator(minorLocator)\n plt.xlim(min(self.x_values)-0.2, max(self.x_values)+0.2)\n\n axis.grid()\n axis.xaxis.grid(True, which='minor')\n axis.legend(loc=\"best\")\n plt.xlabel(xlabel) \n plt.ylabel(ylabel)\n figure.savefig(\"../bilder/\"+self.name, bbox_inches='tight',dpi=100)\n\ndef Linfunc(x, m, t):\n return x*m + t\n\nclass LinFit(object):\n def __init__(self, x_values, y_values):\n self.x_values = np.array(x_values)\n self.y_values = np.array(y_values)\n self.slope = 0\n self.slope_err = 0\n self.y_axis = 0 \n self.y_axis_error = 0\n self.calculate_Opt()\n \n def __str__(self): \n string = \"Slope: \" + str(self.slope) + \" +- \" + str(self.slope_err) + '\\n' + \"y_axis: \" + str(self.y_axis) + \" +- \" + str(self.y_axis_error)\n return string\n \n def calculate_Opt(self):\n (popt, pcov) = scipy.optimize.curve_fit(Linfunc, self.x_values, self.y_values)\n (self.slope, self.y_axis) = popt\n (self.slope_err, self.y_axis_error) = np.sqrt(np.diag(pcov))\n\n def calc(self, x): \n return float(self.slope) * float(x) + float(self.y_axis)\n\n\n\n # class colour_max_measurement(object):\n # def __init__(self, name, distance, x_values, y_values):\n # self.x_values = np.array(values)\n # self.y_values = np.array(y_values)\n # self.name = str(name)\n # self.distance = distance\n # self.linfit = None\n\n # def calculate_fit(self):\n # self.linfit = LinFit()\n","sub_path":"calc/Measurement.py","file_name":"Measurement.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"384716325","text":"import random\nprint(\"Dise 2.0 test version \\n \")\ndef dise(x):#Весь процесс идет тут\n number_of_dise=x #Количество кидаемых кубиков\n dise_count = list(range(1,number_of_dise))\n #Бросок кубиков\n znach_count=[]\n for znach_add in dise_count:\n znach_count.append(random.randrange(1,7,1)) \n #print(znach_count) #Проверка рандома\n #Бросок счасливчика \n lucky=random.randrange(1,7,1)\n #Бросок счасливчика если выпало 1\n unlucky=random.randrange(1,7,1)\n #Бросок счасливчика если выпало 6\n lucky_6=random.randrange(1,7,1)\n #вывод итогов\n itog=sum(znach_count)+lucky\n a=0\n for dise in dise_count: \n print(\"DISE_\"+ str(dise)+\" result \"+str(znach_count[a]))\n a=a+1\n print(\"_Lucky_ \"+\" result \"+str(lucky)+\"\\n \")\n print(\"Выпало на кубиках \" +str(itog)+\"\\n \")\n\n\n if lucky<2:\n print(\"На счастливчике выпало \" +str(lucky)) \n print(\"отнимаем максимальный результат \"+str(max(znach_count)))\n prom=itog-max(znach_count)\n print(\"Итог \"+str(prom)) \n print(\"на счастливчике выпало \"+str(unlucky)+ \" придется отнять\")\n result_lucky=prom-unlucky\n print(\"Итог \"+ str(result_lucky))\n elif lucky ==6:\n print(\"На счастливчике выпало \" +str(lucky))\n print(\"повезло кидаем счастливчик еще раз\")\n verylucky=[]\n verylucky.append(itog)\n #print (verylucky)\n lucky_6=random.randrange(1,7,1)\n verylucky.append(lucky_6)\n print (\"Выпало \"+ str(lucky_6))\n print(\"Итог \" +str(sum(verylucky)))\n while lucky_6==6:\n print(\"и снова повезло кидаем счастливчик еще раз\") \n lucky_6=random.randrange(1,7,1)\n verylucky.append(lucky_6)\n print (\"Выпало \"+ str(lucky_6))\n print(\"Итог \" +str(sum(verylucky))+\"\\n \") \n \n \n\na=input(\"Бросок Кубиков Y/N \")\nwhile a.lower()==\"y\" or a.lower()==\"н\":\n dise(int(input(\"Сколько кубиков бросаем: \")))\n a=input(\"Бросаем ЕЩЕ Y/N \")\nelse:\n print(\"Bye Bye\")","sub_path":"dise2.0.py","file_name":"dise2.0.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"503920905","text":"import datetime\n\nfrom django.http import HttpResponse, HttpResponseRedirect\n# Create your views here.\nfrom django.shortcuts import render\n\nfrom tuck_shop.models import Item, ThisTotal, TotalCash\n\n\ndef index(request):\n return HttpResponse('items are here!')\n\n\ndef current_stock(request, sale_pk):\n items = Item.objects.filter(stock__gt=0)\n\n sale = ThisTotal.objects.get_or_create(pk=sale_pk)\n\n total, created = TotalCash.objects.get_or_create(date_of_sale__exact=datetime.date.today())\n if created:\n total.object.total = 0\n total.object.date_of_sale = datetime.date.today()\n total.save()\n\n return render(request, 'tuck_shop/stock.htm', {'items': items, \"this_total\": sale, \"todays_total\": total})\n\n\ndef sell_stock(request, item_pk, sale_pk):\n item = Item.objects.get(pk=item_pk)\n this_sale = ThisTotal.objects.get(pk=sale_pk)\n\n item.stock = item.stock - 1\n item.save()\n\n this_sale = this_sale + item.price\n this_sale.save()\n\n return HttpResponseRedirect('/tuck_shop/current_stock/' + str(sale_pk))\n","sub_path":"tuck_shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"1654245","text":"def solution(bridge_length, weight, truck_weights):\n \n land = []\n bridge = []\n ready = []\n \n def move(bridge):\n for b in range(len(bridge)):\n bridge[b][1] = bridge[b][1]-1\n return bridge\n \n for t in truck_weights:\n land.append([t,bridge_length])\n bridge.append(land.pop(0))\n bridge = move(bridge)\n \n time = 1\n \n while land != [] or bridge !=[]:\n \n bridge = move(bridge)\n \n if ready == [] and land != [ ]:\n ready.append(land.pop(0))\n \n if bridge[0][1] == -1:\n bridge = bridge[1:]\n \n if sum([i for i,j in bridge+ready]) <= weight and ready != [ ]:\n ready = move(ready)\n bridge.append(ready.pop(0))\n \n time += 1\n \n return time","sub_path":"Programmers/2단계_다리를_지나는_트럭.py","file_name":"2단계_다리를_지나는_트럭.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"648990075","text":"#\n# XCoin API-call related functions\n#\n# @author\tbtckorea\n# @date\t2017-04-12\n#\n# Compatible with python3 version.\n\nimport sys\nimport time\nimport math\nimport base64\nimport hmac, hashlib\nimport urllib.parse\nimport pycurl\nimport certifi # added\nimport json\nimport io\n\n\nclass XCoinAPI:\n\tapi_url = \"https://api.bithumb.com\";\n\t#api_key = \"\"\n\t#api_secret = \"\"\n\n\tdef __init__(self, api_key, api_secret):\n\t\tself.api_key = api_key;\n\t\tself.api_secret = api_secret;\n\t\tself.contents=\"\"\n\n\t\t\"\"\" 나는 이렇게 고쳤는데, 그리 좋지 않았다.\n\tdef body_callback(self, buf):\n\t\t#self.contents = buf;\n\t\tif hasattr(self, 'contents'):\t\t\t#!!!\n\t\t\tself.contents = self.contents + buf\n\t\telse:\n\t\t\tself.contents = buf\n\t\t\"\"\"\n\n\t#출처 : https://m.blog.naver.com/PostView.nhn?blogId=whdghk414141&logNo=221124992851&targetKeyword=&targetRecommendationCode=1\n\t#하지만 다음과 같은 에러가 나타남.\n\n#2018-05-12 13:15:20\n#Error occurred: 'NoneType' object is not subscriptable\n#Traceback (most recent call last):\n# File \"c:\\Dropbox\\git\\Crypto\\BithumbExPython3\\xcoin_api_client.py\", line 41, in body_callback\n# def body_callback(self, buf):\n#KeyboardInterrupt\n#Error occurred: (23, 'Failed writing body (0 != 236)')\n#Json Load Error Extra data: line 1 column 1942 (char 1941)\n#2018-05-12 13:15:22\n#Error occurred: 'NoneType' object is not subscriptable\n#Json Load Error Extra data: line 1 column 1942 (char 1941)\n\t\t\t\n\tdef body_callback(self, buf):\n\t\tm_buf = buf\n\t\tif \"byte\" in str(type(buf)):\n\t\t\tm_buf = m_buf.decode(\"utf-8\");\n\n\t\t# 만약 fragment data가 중복해서 요청한다면 데이터가 섞이게 됨\n\t\tif m_buf[0] == \"{\" and m_buf[-1] == \"}\":\n\t\t\tself.contents = m_buf\n\t\telse:\n\t\t\tself.contents += m_buf; \n\n\tdef microtime(self, get_as_float = False):\n\t\tif get_as_float:\n\t\t\treturn time.time()\n\t\telse:\n\t\t\treturn '%f %d' % math.modf(time.time())\n\n\tdef usecTime(self) :\n\t\tmt = self.microtime(False)\n\t\tmt_array = mt.split(\" \")[:2];\n\t\treturn mt_array[1] + mt_array[0][2:5];\n\n\tdef xcoinApiCall(self, endpoint, rgParams): # endpoint = url \n\t\t# 1. Api-Sign and Api-Nonce information generation.\n\t\t# 2. Request related information from the Bithumb API server.\n\t\t#\n\t\t# - nonce: it is an arbitrary number that may only be used once.\n\t\t# - api_sign: API signature information created in various combinations values.\n\n\t\tendpoint_item_array = {\n\t\t\t\"endpoint\" : endpoint\n\t\t};\n\n\t\turi_array = dict(endpoint_item_array, **rgParams); # Concatenate the two arrays.\n\n\t\tstr_data = urllib.parse.urlencode(uri_array);\n\n\t\tnonce = self.usecTime();\n\n\t\tdata = endpoint + chr(0) + str_data + chr(0) + nonce;\n\t\tutf8_data = data.encode('utf-8');\n\n\t\tkey = self.api_secret;\n\t\tutf8_key = key.encode('utf-8');\n\n\t\th = hmac.new(bytes(utf8_key), utf8_data, hashlib.sha512);\n\t\thex_output = h.hexdigest();\n\t\tutf8_hex_output = hex_output.encode('utf-8');\n\n\t\tapi_sign = base64.b64encode(utf8_hex_output);\n\t\tutf8_api_sign = api_sign.decode('utf-8');\n\n\t\tbuffer = io.BytesIO()\n\t\t#buffer = io.StringIO()\n\n\t\tcurl_handle = pycurl.Curl();\n\t\tcurl_handle.setopt(pycurl.CAINFO, certifi.where()) # added\n\t\tcurl_handle.setopt(pycurl.POST, 1);\n\t\tcurl_handle.setopt(pycurl.VERBOSE, 0); # vervose mode :: 1 => True, 0 => False\n\t\tcurl_handle.setopt(pycurl.POSTFIELDS, str_data);\n\n\t\turl = self.api_url + endpoint;\n\t\tcurl_handle.setopt(curl_handle.URL, url);\n\t\tcurl_handle.setopt(curl_handle.HTTPHEADER, ['Api-Key: ' + self.api_key, 'Api-Sign: ' + utf8_api_sign, 'Api-Nonce: ' + nonce]);\n\t\t#curl_handle.setopt(curl_handle.WRITEFUNCTION, self.body_callback);\n\t\tcurl_handle.setopt(curl_handle.WRITEFUNCTION, buffer.write); ### self.contents\n\t\ttry:\n\t\t\tcurl_handle.perform();\n\t\texcept Exception as e:\n\t\t\tcurl_handle.close();\n\t\t\tprint(\"Json Load Error\", e)\n\t\t\treturn\n\n\t\t#response_code = curl_handle.getinfo(pycurl.RESPONSE_CODE); # Get http response status code.\n\n\t\tcurl_handle.close();\n\n\t\ttry:\n\t\t\tresult = json.loads(buffer.getvalue().decode('utf-8')) ### self.contents\n\t\t\t#result =json.loads(self.contents)\n\t\texcept Exception as e:\n\t\t\tprint(\"Json Load Error\", e)\n\t\t\treturn\n\t\t#self.contents = json.loads(buffer.getvalue().decode('utf-8'))\n\t\t\n\t\treturn(result)\n\t\t#print(self.contents)\n\t\t#return (json.loads(self.contents));\n","sub_path":"BithumbExPython3/xcoin_api_client.py","file_name":"xcoin_api_client.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"168348567","text":"import socket \n\nHOST = ''\nPORT = 9999\nBU = 1024\nADDR = (HOST, PORT)\n\nss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nss.bind(ADDR)\nss.listen(5)\n\nwhile True:\n\tprint('waiting for connection...')\n\tcs, address = ss.accept()\n\tprint('connected: ', address)\n\tcs.send('Input your name please: '.encode('utf-8'))\n\twhile True:\n\t\tdata = cs.recv(BU)\n\t\tif not data:\n\t\t\tbreak\n\t\tcs.send(('hello, ' + data.decode('utf-8')).encode('utf-8'))\n\tcs.close()\n\nss.close()","sub_path":"web/test_socket_server.py","file_name":"test_socket_server.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"242994631","text":"# noiselevel.py - Noise level estimator\r\n# -----------------------------------------------\r\n# This file is a part of DeerLab. License is MIT (see LICENSE.md).\r\n# Copyright(c) 2019-2021: Luis Fabregas, Stefan Stoll and other contributors.\r\n\r\nfrom numpy import isreal, std, mean, shape, atleast_1d\r\nfrom deerlab.utils import movmean\r\nfrom deerlab import correctphase\r\nfrom scipy.signal import savgol_filter\r\nimport warnings\r\n\r\ndef noiselevel(V,*args):\r\n r\"\"\"\r\n Returns the standard deviation estimation of the noise in a given signal using different methods:\r\n\r\n\r\n * ``sigma = noiselevel(V2D)``: If ``V`` is a 2D-dataset of different scans, the noise standard deviation\r\n is estimated from the deviations between scans. The second dimension of\r\n ``V2D`` must contain the different scans. The function returns the standard\r\n deviation of the averaged signal not of the individual scans\r\n\r\n * ``sigma = noiselevel(V,filter)``: If a 1D signal ``V`` is given, the noise level is estimated via filtering\r\n of the signal with a moving mean filter. The nature of the filter can\r\n be specified by means of a string ``filter``.\r\n\r\n * ``sigma = noiselevel(V,Vref)``: If a reference model signal ``Vref`` is given, the noise level is\r\n estimated from the difference between both signals.\r\n\r\n * ``sigma = noiselevel(Vco)``: If the input signal ``Vco`` contains an imaginary component, the noise\r\n level is estimated form the imaginary component after phase optimization.\r\n\r\n Parameters\r\n ----------\r\n V2D : 2D-array_like\r\n Datasets of single scans of a dipolar signal.\r\n V : array_like\r\n Real-valued dipolar signal\r\n filter : string\r\n Filtering method:\r\n\r\n * ``'movmean'`` - Moving mean filter\r\n * ``'savgol'`` - Savitzky-Golay filter\r\n \r\n The default is ``'movmean'``\r\n \r\n Vref : array_like\r\n Reference dipolar signal.\r\n Vco : array_like\r\n Complex-valued dipolar signal.\r\n \r\n Returns\r\n -------\r\n sigma : scalar\r\n Estimated noise standard deviation\r\n \r\n \"\"\"\r\n\r\n # Parse the multiple input schemes\r\n # --------------------------------\r\n\r\n V = atleast_1d(V)\r\n\r\n # Input: noiselevel(V2D)\r\n if V.ndim == 2:\r\n estimationMethod = '2D'\r\n if args:\r\n raise KeyError('For 2D-datasets, only one input is required.')\r\n\r\n # Input: noiselevel(V)\r\n elif V.ndim==1 and not args and all(isreal(V)):\r\n estimationMethod = 'filtering'\r\n filterType = 'movmean'\r\n\r\n # Input: noiselevel(Vco)\r\n elif V.ndim==1 and not args and not all(isreal(V)):\r\n estimationMethod = 'complex'\r\n \r\n # Input: noiselevel(V,filter)\r\n elif V.ndim==1 and type(args[0]) is str and all(isreal(V)):\r\n estimationMethod = 'filtering'\r\n filterType = args[0]\r\n\r\n # Input: noiselevel(V,Vref)\r\n elif V.ndim==1 and type(args[0]) is not str and all(isreal(V)):\r\n estimationMethod = 'reference'\r\n Vref = args[0]\r\n if len(V) != len(Vref):\r\n raise TypeError('The input and reference signal must have the same number of elements.') \r\n else:\r\n raise KeyError('The input is not valid.')\r\n\r\n\r\n # Estimation of the noise level\r\n # -----------------------------\r\n\r\n if estimationMethod == '2D':\r\n # Estimate standard deviations for all time point, and average over scans\r\n if shape(V)[1] < 10:\r\n raise Warning('Only a few scans are given. Noise standard deviation estimate will be inaccurate.')\r\n sigma = std(V,1)\r\n sigma = mean(sigma)\r\n \r\n elif estimationMethod == 'filtering':\r\n # Filter the noise in the signal \r\n if filterType == 'movmean':\r\n Vfilt = movmean(V,3)\r\n elif filterType == 'savgol':\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore')\r\n Vfilt = savgol_filter(V,11,3)\r\n else:\r\n raise TypeError(\"Filter type not found. Must be either 'savgol' or 'movmean'.\")\r\n # And estimate the noiselevel from the resulting residual\r\n sigma = std(V - Vfilt)\r\n \r\n elif estimationMethod == 'complex':\r\n # Optimize the phase of the signal\r\n _,Vim,_ = correctphase(V,full_output=True)\r\n # And estimate the noiselevel from the imaginary part\r\n sigma = std(Vim)\r\n \r\n elif estimationMethod == 'reference':\r\n sigma = std(V - Vref)\r\n\r\n return sigma\r\n","sub_path":"deerlab/noiselevel.py","file_name":"noiselevel.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"433517168","text":"import tensorflow as tf\nfrom tensorflow.contrib import rnn, legacy_seq2seq\n\nclass Model:\n def __init__(self, step_size, input_size, hidden_size, output_size, layer_depth, batch_size, dropout_rate, feed_previous):\n self.step_size = step_size\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.layer_depth = layer_depth\n self.batch_size = batch_size\n self.dropout_rate = dropout_rate\n self.feed_previous = feed_previous\n\n with tf.name_scope('input_layer'):\n self.add_input_layer()\n\n with tf.variable_scope('rnn_encoder_layer'):\n self.add_rnn_encoder_layer()\n\n with tf.variable_scope('rnn_decoder_layer'):\n self.add_rnn_decoder_layer()\n\n with tf.variable_scope('output_layer'):\n self.add_output_layer()\n\n with tf.name_scope('compute_cost'):\n self.compute_mse_cost()\n # self.compute_entropy_cost()\n\n if not feed_previous:\n with tf.name_scope('train_step'):\n self.add_train_step()\n\n def add_input_layer(self):\n self.xs = tf.placeholder(\n tf.float64,\n [None, self.step_size, self.input_size],\n name='xs'\n )\n self.ys = tf.placeholder(\n tf.float64,\n [None, self.step_size, self.output_size],\n name='ys'\n )\n self.learning_rate = tf.placeholder(\n tf.float32,\n name='learning_rate'\n )\n\n def lstm_cell(self, output_size):\n cell = rnn.BasicLSTMCell(\n num_units=output_size,\n forget_bias=1.0,\n state_is_tuple=True,\n # activation=tf.nn.sigmoid,\n activation=tf.nn.tanh,\n reuse=tf.get_variable_scope().reuse\n )\n return rnn.DropoutWrapper(cell, input_keep_prob=1 - self.dropout_rate)\n\n def add_rnn_encoder_layer(self):\n layers = rnn.MultiRNNCell(\n [self.lstm_cell(self.hidden_size) for _ in range(self.layer_depth)],\n state_is_tuple=True\n )\n _, self.final_enc_states = tf.nn.dynamic_rnn(\n layers,\n self.xs,\n dtype=tf.float64\n )\n\n def add_rnn_decoder_layer(self):\n # https://stackoverflow.com/questions/36994067/no-feed-previous-argument-for-tensorflow-basic-rnn-seq2seq-function\n weights = tf.Variable(tf.random_normal(\n [self.hidden_size, self.output_size],\n # mean=0.5,\n stddev=0.5,\n dtype=tf.float64\n ))\n biases = tf.Variable(tf.random_normal(\n [self.output_size],\n # mean=0.5,\n stddev=0.5,\n dtype=tf.float64\n ))\n def inference_loop_function(prev, _):\n return tf.matmul(prev, weights) + biases\n\n loop_function = inference_loop_function if self.feed_previous else None\n layers = rnn.MultiRNNCell(\n [self.lstm_cell(self.hidden_size) for i in range(self.layer_depth)],\n state_is_tuple=True\n )\n outputs, self.cell_states = legacy_seq2seq.rnn_decoder(\n tf.unstack(self.ys, axis=1),\n self.final_enc_states,\n layers,\n loop_function=loop_function\n )\n\n self.cell_outputs = tf.stack(outputs, axis=1)\n\n def add_output_layer(self):\n # self.cell_outputs shape = (batch_size, step_size, hidden_size)\n # extracted_outputs shape = (batch_size, step_size * hidden_size)\n extracted_outputs = tf.reshape(\n self.cell_outputs,\n [-1, self.step_size * self.hidden_size]\n )\n weights = tf.Variable(tf.random_normal(\n [self.step_size * self.hidden_size, self.step_size * self.output_size],\n # mean=0.5,\n stddev=0.5,\n dtype=tf.float64\n ))\n biases = tf.Variable(tf.random_normal(\n [self.step_size * self.output_size],\n # mean=0.5,\n stddev=0.5,\n dtype=tf.float64\n ))\n\n # shape = (batch_size, step_size, output_size)\n linear_combination_of_outputs = tf.reshape(\n tf.matmul(extracted_outputs, weights) + biases,\n [-1, self.step_size, self.output_size]\n )\n self.prediction = linear_combination_of_outputs\n self.prediction = tf.nn.sigmoid(linear_combination_of_outputs)\n # self.prediction = tf.nn.tanh(linear_combination_of_outputs)\n # self.prediction = tf.nn.softplus(linear_combination_of_outputs)\n\n #\n # def compute_entropy_cost(self):\n # def ms_error(labels, logits):\n # return tf.square(tf.subtract(labels, logits))\n # # def ms_error(y_target, y_prediction):\n # # return tf.square(tf.subtract(y_target, y_prediction))\n #\n # current_batch_size = tf.shape(self.prediction)[0]\n # losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n # [tf.reshape(self.prediction, [-1])],\n # [tf.reshape(self.ys, [-1])],\n # [tf.ones([current_batch_size], dtype=tf.float64)],\n # average_across_timesteps=True,\n # softmax_loss_function=ms_error\n # )\n # self.error = tf.reduce_mean(losses)\n\n def compute_mse_cost(self):\n self.error = tf.losses.mean_squared_error(\n self.ys[:, 0, 0],\n self.prediction[:, 0, 0]\n )\n\n def add_train_step(self):\n # self.train_step = tf.train.MomentumOptimizer(self.learning_rate, 0.9).minimize(self.error)\n # self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.error)\n self.train_step = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.error)\n","sub_path":"src/models/EncDec.py","file_name":"EncDec.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"34316793","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\n\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nfrom torchvision.transforms import Compose, ToTensor, Resize\nfrom torchvision.datasets import ImageFolder\nfrom torch.utils.data import (\n Dataset,\n DataLoader,\n Subset\n)\nfrom customDataset import LungXrayDataset\n\n# other\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport numpy as np\nimport warnings\nfrom skimage import io\n# from PIL import Image\nimport time\nimport copy\nimport cv2\n\nfrom tqdm import tqdm\nwarnings.filterwarnings(\"ignore\")\n\n# Extract pretrained activations\n#######################################################################\n# This following code was borrowed from https://www.kaggle.com/debarshichanda/gradcam-visualize-your-cnn\n# author: Debarshi Chanda\n\n#######################################################################\n\nclass FeatureExtractor():\n \"\"\" Class for extracting activations and\n registering gradients from targetted intermediate layers \"\"\"\n\n def __init__(self, model, target_layers):\n self.model = model\n self.target_layers = target_layers\n self.gradients = []\n\n def save_gradient(self, grad):\n self.gradients.append(grad)\n\n def __call__(self, x):\n outputs = []\n self.gradients = []\n for name, module in self.model._modules.items():\n x = module(x)\n if name in self.target_layers:\n x.register_hook(self.save_gradient)\n outputs += [x]\n return outputs, x\n\n\nclass ModelOutputs():\n \"\"\" Class for making a forward pass, and getting:\n 1. The network output.\n 2. Activations from intermeddiate targetted layers.\n 3. Gradients from intermeddiate targetted layers. \"\"\"\n\n def __init__(self, model, feature_module, target_layers):\n self.model = model\n self.feature_module = feature_module\n self.feature_extractor = FeatureExtractor(\n self.feature_module, target_layers)\n\n def get_gradients(self):\n return self.feature_extractor.gradients\n\n def __call__(self, x):\n target_activations = []\n for name, module in self.model._modules.items():\n if module == self.feature_module:\n target_activations, x = self.feature_extractor(x)\n elif \"avgpool\" in name.lower():\n x = module(x)\n x = x.view(x.size(0), -1)\n else:\n x = module(x)\n\n return target_activations, x\n\n\ndef preprocess_image(img):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.2255])\n preprocessing = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n return preprocessing(img.copy()).unsqueeze(0)\n\n\ndef show_cam_on_image(img, mask):\n heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)\n heatmap = np.float32(heatmap) / 255\n cam = heatmap + np.float32(img)\n cam = cam / np.max(cam)\n return np.uint8(255 * cam)\n\n\nclass GradCam:\n def __init__(self, model, feature_module, target_layer_names, use_cuda):\n self.model = model\n self.feature_module = feature_module\n self.model.eval()\n self.cuda = use_cuda\n if self.cuda:\n self.model = model.cuda()\n\n self.extractor = ModelOutputs(\n self.model, self.feature_module, target_layer_names)\n\n def forward(self, input_img):\n return self.model(input_img)\n\n def __call__(self, input_img, target_category=None):\n if self.cuda:\n input_img = input_img.cuda()\n\n features, output = self.extractor(input_img)\n\n if target_category == None:\n target_category = np.argmax(output.cpu().data.numpy())\n\n one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)\n one_hot[0][target_category] = 1\n one_hot = torch.from_numpy(one_hot).requires_grad_(True)\n if self.cuda:\n one_hot = one_hot.cuda()\n\n one_hot = torch.sum(one_hot * output)\n\n self.feature_module.zero_grad()\n self.model.zero_grad()\n one_hot.backward(retain_graph=True)\n\n grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()\n\n target = features[-1]\n target = target.cpu().data.numpy()[0, :]\n\n weights = np.mean(grads_val, axis=(2, 3))[0, :]\n cam = np.zeros(target.shape[1:], dtype=np.float32)\n\n for i, w in enumerate(weights):\n cam += w * target[i, :, :]\n\n cam = np.maximum(cam, 0)\n cam = cv2.resize(cam, input_img.shape[2:])\n cam = cam - np.min(cam)\n cam = cam / np.max(cam)\n return cam\n\n\ndef deprocess_image(img):\n \"\"\" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 \"\"\"\n img = img - np.mean(img)\n img = img / (np.std(img) + 1e-5)\n img = img * 0.1\n img = img + 0.5\n img = np.clip(img, 0, 1)\n return np.uint8(img*255)\n\n\ndef store_gradcam_image(model, feature_module, target_layer_names, model_name, label, i):\n if not os.path.exists('gradcam_images'):\n os.makedirs('gradcam_images')\n \n img_path = os.path.join(\n f'.\\\\COVID-19_Radiography_Dataset\\\\{label}', f'{label}-{i}.png')\n img = cv2.imread(img_path, 1)\n img = np.float32(img) / 255\n img = img.astype('float32')\n\n # Opencv loads as BGR:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n input_img = preprocess_image(img)\n\n grad_cam = GradCam(model=model, feature_module=feature_module,\n target_layer_names= target_layer_names, use_cuda=torch.cuda.is_available())\n\n # If None, returns the map for the highest scoring category.\n # Otherwise, targets the requested category.\n target_category = None\n grayscale_cam = grad_cam(input_img, target_category)\n\n grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))\n cam = show_cam_on_image(img, grayscale_cam)\n cv2.imwrite(f\".\\\\gradcam_images\\\\cam_{label}-{i}_{model_name}.jpg\", cam)\n \n def display(model_name, label, i):\n fig, ax = plt.subplots(1, 2, figsize=(8, 8))\n img_path = os.path.join(\n f'.\\\\COVID-19_Radiography_Dataset\\\\{label}', f'{label}-{i}.png')\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n cam = cv2.imread(f\".\\\\gradcam_images\\\\cam_{label}-{i}_{model_name}.jpg\")\n cam = cv2.cvtColor(cam, cv2.COLOR_BGR2RGB)\n ax[0].imshow(img)\n ax[1].imshow(cam)\n ax[0].set_title(f'Original {label}-{model_name}')\n ax[1].set_title(f'Grad Cam {label}-{model_name}')\n fig.savefig(os.path.join('gradcam_images', f'Grad Cam {label}-{model_name}.jpg'))\n \n display(model_name, label, i)\n\n return None\n","sub_path":"covid19-detection-computer-vision/helper functions/helper_Gradcam.py","file_name":"helper_Gradcam.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"291595334","text":"import logging\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom ..event_check import require_event_code\n\nfrom mercury.forms import EventForm\nfrom mercury.models import Event\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.ERROR)\n\n\nclass CreateEventsView(TemplateView):\n \"\"\"This is the view for creating a new event.\"\"\"\n\n template_name = \"events.html\"\n\n @require_event_code\n def get(self, request, *args, **kwargs):\n events = Event.objects.all().order_by(\"id\")\n event_form = EventForm()\n context = {\"event_form\": event_form, \"events\": events}\n return render(request, self.template_name, context)\n\n @require_event_code\n def post(self, request, *args, **kwargs):\n if \"submit\" in request.POST:\n post_event_name = request.POST.get(\"event_name\")\n post_event_location = request.POST.get(\"event_location\")\n post_event_date = request.POST.get(\"date\")\n post_event_comments = request.POST.get(\"comments\")\n event_data = Event(\n event_name=post_event_name,\n event_location=post_event_location,\n date=post_event_date,\n comments=post_event_comments,\n )\n event_data.save()\n events = Event.objects.all().order_by(\"id\")\n event_form = EventForm()\n context = {\"event_form\": event_form, \"events\": events}\n return render(request, \"events.html\", context)\n","sub_path":"mercury/views/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"617742307","text":"# Урок 5. Видеоурок. Основы ООП\n\n# 1. Создайте класс Word.\nclass Word:\n # 2. Добавьте свойства text и part of speech.\n # 3. Добавьте возможность создавать объект слово со значениями в скобках.\n def __init__(self, text='', part_of_speech=''):\n self.text = text\n self.part_of_speech = part_of_speech\n\n\n# 4. Создайте класс Sentence\nclass Sentence:\n # 5. Добавьте свойство content, равное списку, состоящему из номеров слов, входящих в предложение.\n content = []\n\n # 6. Добавьте метод show, составляющий предложение.\n def show(self):\n for word in self.content:\n print(word.text, end=' ')\n print('\\n')\n\n # 7. Добавьте метод show_parts, отображающий, какие части речи входят в предложение.\n def show_parts(self):\n print('Части речи членов предложения')\n for word in self.content:\n print(word.part_of_speech)\n\n def __init__(self, *args):\n self.content = args\n\n\nwords = [\n Word('Вечер', 'существительное'),\n Word('был', 'глагол'),\n Word('тёплый', 'прилагательное'),\n Word('и', 'союз'),\n Word('тихий', 'прилагательное')\n]\n\nsentence = Sentence(*words)\nsentence.show()\nsentence.show_parts()\n","sub_path":"oop-basic.py","file_name":"oop-basic.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"541093516","text":"# -*- coding: utf-8 -*-\n#\n# DIM-SDK : Decentralized Instant Messaging Software Development Kit\n#\n# Written in 2019 by Moky \n#\n# ==============================================================================\n# MIT License\n#\n# Copyright (c) 2019 Albert Moky\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ==============================================================================\n\n\"\"\"\n Facebook\n ~~~~~~~~\n\n Barrack for cache entities\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Optional, List\n\nfrom dimp.mkm.address import thanos\nfrom dimp import EntityType, ID\nfrom dimp import User, Group, BaseUser, BaseGroup\nfrom dimp import Meta, Document\nfrom dimp import Barrack\n\nfrom .mkm import ServiceProvider, Station, Bot\n\n\nclass Facebook(Barrack, ABC):\n\n def __init__(self):\n super().__init__()\n # memory caches\n self.__users = {} # ID -> User\n self.__groups = {} # ID -> Group\n\n def reduce_memory(self) -> int:\n \"\"\"\n Call it when received 'UIApplicationDidReceiveMemoryWarningNotification',\n this will remove 50% of cached objects\n\n :return: number of survivors\n \"\"\"\n finger = 0\n finger = thanos(self.__users, finger)\n finger = thanos(self.__groups, finger)\n return finger >> 1\n\n # private\n def cache_user(self, user: User):\n if user.data_source is None:\n user.data_source = self\n self.__users[user.identifier] = user\n\n # private\n def cache_group(self, group: Group):\n if group.data_source is None:\n group.data_source = self\n self.__groups[group.identifier] = group\n\n @abstractmethod\n def save_meta(self, meta: Meta, identifier: ID) -> bool:\n \"\"\"\n Save meta for entity ID (must verify first)\n\n :param meta: entity meta\n :param identifier: entity ID\n :return: True on success\n \"\"\"\n raise NotImplemented\n\n @abstractmethod\n def save_document(self, document: Document) -> bool:\n \"\"\"\n Save entity document with ID (must verify first)\n\n :param document: entity document\n :return: True on success\n \"\"\"\n raise NotImplemented\n\n #\n # Document checking\n #\n def check_document(self, document: Document) -> bool:\n \"\"\"\n Checking document\n\n :param document: entity document\n :return: True on accepted\n \"\"\"\n identifier = document.identifier\n # NOTICE: if this is a bulletin document for group,\n # verify it with the group owner's meta.key\n # else (this is a visa document for user)\n # verify it with the user's meta.key\n if identifier.is_group:\n # check by group owner's meta.key\n owner = self.owner(identifier=identifier)\n if owner is not None:\n meta = self.meta(identifier=owner)\n elif identifier.type == EntityType.GROUP:\n # NOTICE: if this is a polylogue profile\n # verify it with the founder's meta.key\n # (which equals to the group's meta.key)\n meta = self.meta(identifier=identifier)\n else:\n # FIXME: owner not found for this group\n return False\n else:\n # check by user's meta.key\n meta = self.meta(identifier=identifier)\n if meta is not None:\n return document.verify(public_key=meta.key)\n\n # protected\n # noinspection PyMethodMayBeStatic\n def create_user(self, identifier: ID) -> Optional[User]:\n # TODO: make sure visa key exists before calling this\n network = identifier.type\n # check user type\n if network == EntityType.STATION:\n # TODO: get station ip,port before create it\n # return Station(identifier=identifier, host='0.0.0.0', port=0)\n return Station(identifier=identifier)\n elif network == EntityType.BOT:\n return Bot(identifier=identifier)\n # general user, or 'anyone@anywhere'\n return BaseUser(identifier=identifier)\n\n # protected\n # noinspection PyMethodMayBeStatic\n def create_group(self, identifier: ID) -> Optional[Group]:\n # TODO: make group meta exists before calling this\n network = identifier.type\n # check group type\n if network == EntityType.ISP:\n return ServiceProvider(identifier=identifier)\n # general group, or 'everyone@everywhere'\n return BaseGroup(identifier=identifier)\n\n @property\n @abstractmethod\n def local_users(self) -> List[User]:\n \"\"\"\n Get all local users (for decrypting received message)\n\n :return: users with private key\n \"\"\"\n raise NotImplemented\n\n def select_user(self, receiver: ID) -> Optional[User]:\n \"\"\" Select local user for receiver \"\"\"\n users = self.local_users\n if len(users) == 0:\n assert False, 'local users should not be empty'\n # return None\n elif receiver.is_broadcast:\n # broadcast message can decrypt by anyone, so just return current user\n return users[0]\n elif receiver.is_user:\n # 1. personal message\n # 2. split group message\n for item in users:\n if item.identifier == receiver:\n # DISCUSS: set this item to be current user?\n return item\n # not mine?\n return None\n # group message (recipient not designated)\n assert receiver.is_group, 'receiver error: %s' % receiver\n # the messenger will check group info before decrypting message,\n # so we can trust that the group's meta & members MUST exist here.\n grp = self.group(identifier=receiver)\n if grp is None:\n assert False, 'group not ready: %s' % receiver\n # return None\n members = grp.members\n if members is None or len(members) == 0:\n assert False, 'members not found: %s' % receiver\n # return None\n for item in users:\n if item.identifier in members:\n # DISCUSS: set this item to be current user?\n return item\n\n #\n # Entity Delegate\n #\n\n # Override\n def user(self, identifier: ID) -> Optional[User]:\n # 1. get from user cache\n usr = self.__users.get(identifier)\n if usr is None:\n # 2. create and cache it\n usr = self.create_user(identifier=identifier)\n if usr is not None:\n self.cache_user(user=usr)\n return usr\n\n # Override\n def group(self, identifier: ID) -> Optional[Group]:\n # 1. get from group cache\n grp = self.__groups.get(identifier)\n if grp is None:\n # 2. create and cache it\n grp = self.create_group(identifier=identifier)\n if grp is not None:\n self.cache_group(group=grp)\n return grp\n","sub_path":"dimsdk/facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":8136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"453608596","text":"from enum import Enum\nfrom chess.Space import Space\nfrom chess.pieces.pieces import *\nfrom chess.GameState import *\nfrom typing import List, Dict\nimport copy\nfrom chess.bots.RandomBot import RandomBot\n\nclass Game():\n def __init__(self,\n agents: Dict = {COLORS.WHITE.value: 'player', \n COLORS.BLACK.value: 'player'},\n reset_board: callable = None):\n self._state = GameState()\n self._game_mode = GAME_MODE.SELECTION_MODE\n self._agents = agents\n self._state.set_player_move(COLORS.WHITE.value)\n if reset_board == None:\n self._state.set_board(Game.default_reset_board())\n else:\n self._state.set_board(reset_board())\n self._state_history = []\n \n if self.check_bot_move():\n self.bot_move()\n\n @staticmethod\n def default_reset_board() -> List[List[Space]]:\n board = [[Space(i,j) for j in range(8)] for i in range(8)]\n for i in range(8):\n board[1][i].place_piece(Pawn(color = COLORS.BLACK.value))\n board[6][i].place_piece(Pawn(color = COLORS.WHITE.value))\n\n board[0][0].place_piece(Rook(color = COLORS.BLACK.value))\n board[0][7].place_piece(Rook(color = COLORS.BLACK.value))\n board[0][1].place_piece(Knight(color = COLORS.BLACK.value))\n board[0][6].place_piece(Knight(color = COLORS.BLACK.value))\n board[0][2].place_piece(Bishop(color = COLORS.BLACK.value))\n board[0][5].place_piece(Bishop(color = COLORS.BLACK.value))\n board[0][3].place_piece(Queen(color = COLORS.BLACK.value))\n board[0][4].place_piece(King(color = COLORS.BLACK.value))\n\n board[7][0].place_piece(Rook(color = COLORS.WHITE.value))\n board[7][7].place_piece(Rook(color = COLORS.WHITE.value))\n board[7][1].place_piece(Knight(color = COLORS.WHITE.value))\n board[7][6].place_piece(Knight(color = COLORS.WHITE.value))\n board[7][2].place_piece(Bishop(color = COLORS.WHITE.value))\n board[7][5].place_piece(Bishop(color = COLORS.WHITE.value))\n board[7][3].place_piece(Queen(color = COLORS.WHITE.value))\n board[7][4].place_piece(King(color = COLORS.WHITE.value))\n return board\n\n def check_legal_move(self, space_from: Space, space_to: Space) -> bool:\n return (space_from, space_to) in self._selected_space_legal_moves\n \n def move(self, space_from: Space, space_to: Space):\n self._state_history.append(copy.deepcopy(self._state))\n self._state.move(space_from, space_to)\n self._game_mode = GAME_MODE.SELECTION_MODE\n self.unhighlight_all()\n\n if self._state.check_mate(self._state.get_player_move()):\n pass\n\n def check_legal_select(self, space: Space) -> bool:\n # check who's turn it is\n return (not space.isempty() \n and space.get_piece().get_color() == \n self._state.get_player_move())\n\n def _select(self, space: Space):\n self._space_moving_from = space\n self._selected_space_legal_moves = self._state.get_legal_moves(space)\n \n for move in self._selected_space_legal_moves:\n move[1].highlight()\n\n def unhighlight_all(self):\n for row in self._state.get_board():\n for space in row:\n space.unhighlight()\n\n def check_bot_move(self) -> bool:\n return (not self._agents[self._state.get_player_move()] == 'player' and \n not self._state.check_mate(COLORS.BLACK.value) and\n not self._state.check_mate(COLORS.WHITE.value))\n\n def bot_move(self):\n move = self._agents[self._state.get_player_move()].bot_move(self._state)\n self.move(move[0], move[1])\n\n def click(self, space_name: str):\n # TODO\n # isolate the interface from the logic\n i = int(space_name[0])\n j = int(space_name[1])\n space = self._state.get_board()[i][j]\n \n if self._game_mode == GAME_MODE.MOVE_MODE:\n if self.check_legal_move(self._space_moving_from, space):\n self.move(self._space_moving_from, space)\n if self.check_bot_move():\n self.bot_move()\n else:\n self._game_mode = GAME_MODE.SELECTION_MODE\n self.unhighlight_all()\n\n else:\n if self.check_legal_select(space):\n self._select(space)\n # print(self.get_all_legal_moves(x))\n self._game_mode = GAME_MODE.MOVE_MODE\n # highlight available moves\n\n def revert_move(self):\n if self._state_history:\n self._state = self._state_history.pop()\n self.unhighlight_all()\n\n def get_info(self) -> List[List[str]]:\n alert = str(self._state.get_all_legal_moves())\n if self._state.in_check(COLORS.WHITE.value):\n alert += '

white in check'\n if self._state.in_check(COLORS.BLACK.value):\n alert += '

black in check'\n\n if self._state.check_mate(COLORS.BLACK.value):\n alert += '

white wins!'\n if self._state.check_mate(COLORS.WHITE.value):\n alert += '

black wins!'\n\n return {\n 'board': [[str(piece) for piece in row] for row in self._state.get_board()],\n 'player_move': self._state.get_player_move(),\n 'alerts':alert\n }","sub_path":"chess/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"91119387","text":"import random\n\nplayer = int(input(\"请输入您要出的拳 石头(1)/剪刀(2)/布(3):\"))\n\ncomputer = random.randint(1,3)\n\nprint(\"玩家出的拳是 %d ,电脑出的拳是 %d\" %(player,computer))\n\nif ((player ==1 and computer == 2)\n or (player==2 and computer == 3)\n or (player == 3 and computer == 1)):\n\n print(\"欧耶您赢了,电脑弱爆了\")\nelif player == computer:\n print(\"你们俩竟然打平了!\")\nelse:\n print(\"好遗憾,电脑赢了\")\n\n","sub_path":"jc_06_石头剪刀布.py","file_name":"jc_06_石头剪刀布.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"212242421","text":"class Solution:\n def searchRange(self, nums, target):\n if not nums:\n return [-1, -1]\n\n def findGreater(target):\n left, right = 0, len(nums)\n while left < right:\n mid = left + (right - left) // 2\n if nums[mid] < target:\n left = mid + 1\n elif nums[mid] == target:\n return mid\n else:\n right = mid\n return left\n left = findGreater(target)\n right = findGreater(target + 1) - 1\n return [left, right] if right >= left else [-1, -1]\n","sub_path":"34/34.find-first-and-last-position-of-element-in-sorted-array.340492558.Wrong-Answer.leetcode.python3.py","file_name":"34.find-first-and-last-position-of-element-in-sorted-array.340492558.Wrong-Answer.leetcode.python3.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"124779591","text":"from twilio.rest import Client\n\ndef whatsapp():\n account_sid = os.environ['TWILIO_ACCOUNT_SID']\n auth_token = os.environ['TWILIO_AUTH_TOKEN'] \n client = Client(account_sid, auth_token) \n ss=['whatsapp:+9191050xxxxx','whatsapp:+919760xxxxxx','whatsapp:+918961xxxxxx','whatsapp:+918276xxxxxx','whatsapp:+918373xxxxxx']\n for i in ss:\n message = client.messages.create( \n from_='whatsapp:+14155238886', \n body='A warm welcome from Tech-Trollers!! Have a good day', \n to= i \n ) \n print(message.sid)\n","sub_path":"whats_app.py","file_name":"whats_app.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"363092736","text":"from bs4 import BeautifulSoup\nimport requests as rq\n\nresponse = rq.get(\"http://b.hatena.ne.jp/\")\nbsObj = BeautifulSoup(response.text)\nlinks = bsObj.find_all(\"a\", attrs={\"class\": \"entry-link\"})\n\nfor link in links:\n text = link.string + \" : \" + link.get(\"href\") # + \" : \" + link.get(\"title\")\n print(text)\n","sub_path":"scrape.hatena.py","file_name":"scrape.hatena.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"492337937","text":"# -*- coding: utf-8 -*-\nimport io\nimport os\nimport re\n\nimport sys\nfrom setuptools import find_packages, setup\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return io.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r'^__version__ = [\\'\"]([^\\'\"]*)[\\'\"]', version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')\n\nsetup_requirements = ['pytest-runner'] if {'pytest', 'test', 'ptr'}.intersection(sys.argv) else []\ninstall_requirements = ['babelfish>=0.5.2', 'enzyme>=0.4.1', 'pymediainfo>=2.1.5', 'PyYAML',\n 'six>=1.9.0']\ntest_requirements = ['flake8_docstrings', 'flake8-import-order', 'pydocstyle!=1.1.0', 'pycodestyle!=2.1.0',\n 'pep8-naming', 'pytest', 'pytest>=2.8', 'pytest-cov', 'pytest-flake8']\n\nif sys.version_info < (3, 3):\n test_requirements.append('mock')\n\nsetup(\n name='knowit',\n version=find_version('knowit', '__init__.py'),\n description='Know better your media files',\n long_description=read('README.rst'),\n keywords='video mkv mp4 mediainfo metadata movie episode tv shows series',\n author='Rato AQ2',\n author_email='rato.aq2@gmail.com',\n url='https://github.com/ratoaq2/knowit',\n license='MIT',\n entry_points={\n 'console_scripts': [\n 'knowit = knowit.__main__:main'\n ]},\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Multimedia :: Video'\n ],\n packages=find_packages(exclude=('tests', 'docs')),\n include_package_data=True,\n setup_requires=setup_requirements,\n install_requires=install_requirements,\n tests_require=test_requirements,\n extras_require={\n 'test': test_requirements,\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"604641155","text":"import os\nimport importlib\nimport sys\nimport time\nimport uuid\nfrom urllib.parse import urlparse\nfrom collections import namedtuple\nfrom contextlib import contextmanager\n\nfrom fabric.api import put, settings, run, env, local, get, prompt, task\nfrom fabric.contrib import files\n\n\nPROJ = 'ads'\nSYSTEMD_SERVICES = [\n PROJ + '-web.service',\n PROJ + '-worker.service',\n PROJ + '-clock.service',\n PROJ + '-flower.service',\n]\nPROJECT_ROOT = '/home/{}/backend'.format(PROJ)\n\nenv.hosts = ['176.122.20.158']\nenv.user = PROJ\nenv.colorize_errors = True\nenv.cwd = PROJECT_ROOT\nenv.dotenv_path = PROJECT_ROOT + '/.env'\n\n\n@contextmanager\ndef modify_path(path):\n sys.path.insert(0, path)\n try:\n yield\n finally:\n sys.path.remove(path)\n\n\n# Importing config as a standalone script, not within PROJ package.\n# Motivation: not importing all dependencies,\n# only fabric is needed with this approach.\nwith modify_path(PROJ):\n project_config = importlib.import_module('config')\n\n\ndef _psql(command, locally=False):\n command = 'psql -c \"{}\"'.format(command)\n sent = 'sudo -u postgres {}'.format(command)\n with settings(user='root'):\n (local if locally else run)(sent)\n\n\ndef _create_user(username, password, locally=False):\n cmd = '''CREATE USER \"{username}\" WITH PASSWORD '{password}';'''\n _psql(cmd.format(**locals()), locally=locally)\n\n\ndef _create_database(dbname, username, locally=False):\n _psql(\n '''CREATE DATABASE \"{dbname}\" WITH encoding='utf8'\n template=\"template0\"\n LC_COLLATE='ru_RU.UTF-8'\n LC_CTYPE='ru_RU.UTF-8';'''\n .format(**locals()), locally=locally\n )\n _psql(\n '''GRANT ALL PRIVILEGES ON DATABASE \"{dbname}\" to \"{username}\";'''\n .format(**locals()), locally=locally\n )\n\n\ndef _db():\n DB = namedtuple('DB', 'uri dbname username password')\n uri = project_config.SQLALCHEMY_DATABASE_URI\n d = urlparse(uri)\n dbname = d.path.lstrip('/')\n return DB(uri, dbname, d.username, d.password)\n\n\ndef _file_size_in_mb(filename):\n return os.stat(filename).st_size / (1024 * 1024)\n\n\ndef _deploy_systemd_service(name):\n assert name.endswith('.service')\n put('deploy/{}'.format(name), '/etc/systemd/system/')\n run('systemctl enable {}'.format(name))\n run('systemctl stop {}'.format(name))\n run('systemctl start {}'.format(name))\n run('systemctl status {}'.format(name))\n\n\ndef _clear_local_pycache():\n local('find . -type d -name __pycache__ -prune -exec rm -r -f {} \\;')\n\n\n# ############################################################################\n# FABRIC TASKS\n# ############################################################################\n\n\n@task\ndef psql():\n local('psql -d {}'.format(project_config.SQLALCHEMY_DATABASE_URI))\n\n\n@task\ndef setup_nginx():\n with settings(user='root'):\n nginx = PROJ + '.nginx'\n put('deploy/{}'.format(nginx), '/etc/nginx/sites-available/')\n run('ln -s -f /etc/nginx/sites-available/{} /etc/nginx/sites-enabled/'.format(nginx))\n run('nginx -t')\n run('systemctl reload nginx.service')\n\n\n@task\ndef setup_db(locally=False):\n d = _db()\n _create_user(d.username, d.password, locally=locally)\n _create_database(d.dbname, d.username, locally=locally)\n\n\n@task\ndef restore_db_to_local(locally=False):\n x = prompt('This will destroy local DB. Are you sure?', default='N')\n if x.lower() not in ['yes', 'y']:\n print('Exiting')\n return\n\n dump_loc = './dump.sql.gz'\n\n if not locally:\n dump_remote = 'dumps/dump.sql.gz'\n run('mkdir -p dumps')\n run('rm -f {}'.format(dump_remote))\n tmp = '{}_{}'.format(dump_remote, uuid.uuid4())\n\n start = time.time()\n run('pg_dump --create --clean | gzip -9 > {}'.format(tmp))\n print('Creating remote dump took {:.1f} sec.'.format(time.time() - start))\n\n run('mv {} {}'.format(tmp, dump_remote))\n\n start = time.time()\n get(dump_remote, dump_loc)\n print('Downloading dump took {:.1f} sec.'.format(time.time() - start))\n\n size = _file_size_in_mb(dump_loc)\n print('Archived dump size: {:.3f} Mb'.format(size))\n\n local('gunzip < {} | sudo -u postgres psql'.format(dump_loc))\n\n\n@task\ndef setup_systemd():\n with settings(user='root'):\n for name in SYSTEMD_SERVICES:\n _deploy_systemd_service(name)\n\n\n@task\ndef systemd_command(cmd):\n with settings(user='root'):\n for name in SYSTEMD_SERVICES:\n run('systemctl {} {}'.format(cmd, name))\n\n\n@task\ndef test():\n local('venv/bin/pytest -v')\n\n\n@task\ndef deploy():\n test()\n\n systemd_command('stop')\n\n run('mkdir -p logs')\n\n put('requirements.txt', '.')\n put('deploy/{}_uwsgi.ini'.format(PROJ), '.')\n put('app.py', '.')\n\n run('rm -r -f {}'.format(PROJ))\n _clear_local_pycache()\n put(PROJ, '.')\n run('chmod -R o-rwx {}'.format(PROJ))\n\n if not files.exists('venv'):\n run('/usr/bin/python3 -m venv venv')\n run('venv/bin/pip install --upgrade pip setuptools wheel')\n run('venv/bin/pip install -r requirements.txt')\n\n run('rm -r -f migrations')\n put('migrations', '.')\n run('venv/bin/flask db upgrade')\n\n systemd_command('restart')\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"649849814","text":"\"\"\"\ntictactoe.py\n\nThis program is a tic-tac-toe\ngame that involves 2 players.\nOnce the game board is filled up\nregardless of win/lose conditions,\nthe game will end.\n\n@Author: Brian Jacobe\n@Date: 11/10/19\n@Last Update: 12/3/19\n\"\"\"\n\n# Prints out a 3x3 board\ndef display_board(board):\n show_board = (board[0] + \"|\" + board[1] + \"|\" + board[2] + \"\\n\"\n + board[3] + \"|\" + board[4] + \"|\" + board[5] + \"\\n\"\n + board[6] + \"|\" + board[7] + \"|\" + board[8])\n return show_board\n\n# Grabs player input and assigns 'x' or 'o'\ndef player_input():\n user_input = input(\"Please select either 'x' or 'o'.\\n\")\n return user_input\n\n# Assign marker position based on int 1-8 value\ndef place_marker(board, marker):\n marker_message = marker + \" - player: Please select a position between the values (0-8).\\n\"\n position = int(input(marker_message))\n availability = space_check(board, position)\n if 0 <= position <= 8:\n if availability == False:\n print(\"That space is unavailable.\\n\")\n place_marker(board, marker)\n else:\n board[position] = marker\n else:\n if availability == False:\n print(\"That value is out of bounds.\\n\")\n place_marker(board, marker)\n \n\n# Check if win condition is met, mark represents\n# player marker\ndef win_condition(board, mark):\n if board[0] == mark and board[1] == mark and board[2] == mark:\n return True\n elif board[3] == mark and board[4] == mark and board[5] == mark:\n return True\n elif board[6] == mark and board[7] == mark and board[8] == mark:\n return True\n elif board[0] == mark and board[3] == mark and board[6] == mark:\n return True\n elif board[1] == mark and board[4] == mark and board[7] == mark:\n return True\n elif board[2] == mark and board[5] == mark and board[8] == mark:\n return True\n elif board[0] == mark and board[4] == mark and board[8] == mark:\n return True\n elif board[2] == mark and board[4] == mark and board[6] == mark:\n return True\n else:\n return False\n\n# Check if space is available\ndef space_check(board, position):\n if position < 0 or position >= len(board):\n return False\n elif board[position] == ' ':\n return True\n else:\n return False\n pass\n\n# Check if the board is full\ndef full_board_check(board):\n counter = 0\n for check in board:\n if check == 'x' or check == 'o':\n counter += 1\n if counter == 9:\n return True\n else:\n return False\n\n# Ask if the players want to play again\ndef replay():\n error_message = \"Invalid entry please try again.\\n\"\n user_input = input(\"Would you like to play again? (Y/N)\\n\")\n if user_input == 'y' or user_input == 'n':\n return user_input\n else:\n return error_message\n \n# =================================================================================\ndivider = \"=\"*30\nclear_board = \"\\n\"*100\n\nprint(clear_board)\nprint(divider)\nprint(\"Welcome to Tic-Tac-Toe!\")\nprint(divider)\n\ngame_board = [' ',' ',' ',' ',' ',' ',' ',' ',' ']\nboard_state = full_board_check(game_board)\ngame_state = True\nreplay_input = None\nplayer_turn = 0\n# Establish player markers\nplayer_1 = player_input()\nplayer_2 = None\n\nif player_1 == 'x':\n player_2 = 'o'\nelse:\n player_2 = 'x'\n\nwhile game_state == True:\n while board_state == False:\n if player_turn == 0:\n place_marker(game_board,player_1,)\n print(display_board(game_board))\n player_turn = 1\n else:\n place_marker(game_board,player_2)\n print(display_board(game_board))\n player_turn = 0\n winner_1 = win_condition(game_board, player_1)\n winner_2 = win_condition(game_board, player_2)\n if winner_1 == True and winner_2 == False:\n print(\"Player 1 wins!\\n\")\n board_state = True\n elif winner_2 == True and winner_1 == False:\n print(\"Player 2 wins!\\n\")\n board_state = True\n elif winner_1 == False and winner_2 == False and board_state == False:\n continue\n else:\n print(\"Draw!\\n\")\n board_state = True\n replay_input = replay()\n if replay_input == 'n':\n game_state = False\n break\n else:\n game_board = [' ',' ',' ',' ',' ',' ',' ',' ',' ']\n player_turn = 0\n player_1 = player_input()\n player_2 = None\n print(\"\\n\"*100)\n if player_1 == 'x':\n player_2 = 'o'\n else:\n player_2 = 'x'\n print(\"Welcome back!\\n\")\n game_state = True","sub_path":"Misc/Tic-Tac-Toe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"70149596","text":"import matplotlib.pylab as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport pickle as pkl\nimport numpy as np\n\nimport shutil\nimport shlex\nimport glob\nimport os\n\nimport respy\n\nfrom auxiliary_simulations import move_subdirectory\nfrom auxiliary_simulations import float_to_string\nfrom auxiliary_simulations import INIT_FILE\nfrom auxiliary_simulations import GRID_RSLT\n\nEDU, EXP_A, EXP_B = 10.00, 5, 5\n\n\ndef run(base):\n \"\"\" Create the input to plot some baseline information about the\n specification.\n \"\"\"\n move_subdirectory()\n\n fname = GRID_RSLT + '/' + float_to_string(base)\n\n base_dir = os.getcwd()\n os.chdir(fname)\n\n for src in glob.glob('*'):\n shutil.copy(src, base_dir)\n os.chdir(base_dir)\n\n choice_probabilities = get_choice_probabilities('data.respy.info')\n pkl.dump(choice_probabilities, open('probabilities.respy.pkl', 'wb'))\n\n os.chdir('../')\n\n\ndef plot():\n \"\"\" Plot some baseline information.\n \"\"\"\n\n os.chdir('rslt')\n\n plot_choice_patterns()\n plot_return_education()\n plot_return_experience()\n\n os.chdir('../')\n\n\ndef get_coeffs():\n \"\"\" Extract coefficients from the initialization file.\n \"\"\"\n move_subdirectory()\n respy_obj = respy.RespyCls(INIT_FILE)\n model_paras = respy_obj.get_attr('model_paras')\n coeffs = dict()\n for label in ['a', 'b']:\n coeffs[label] = model_paras['coeffs_' + label]\n return coeffs\n\n\ndef wage_function(edu, exp_A, exp_B, coeffs):\n \"\"\" This function calculates the expected wage based on an agent's\n covariates for a given parametrization.\n \"\"\"\n\n wage = coeffs[0]\n wage += coeffs[1] * edu\n wage += coeffs[2] * exp_A\n wage += coeffs[3] * exp_A ** 2\n wage += coeffs[4] * exp_B\n wage += coeffs[5] * exp_B ** 2\n\n wage = np.exp(wage)\n\n return wage\n\n\ndef return_to_experience(exp_A, exp_B, coeffs, which):\n \"\"\" Wrapper to evaluate the wage function for varying levels of experience.\n \"\"\"\n # Get wage\n wage = wage_function(EDU, exp_A, exp_B, coeffs[which])\n\n # Finishing\n return wage\n\n\ndef return_to_education(edu, coeffs, which):\n \"\"\" Wrapper to evaluate the wage function for varying levels of education\n \"\"\"\n wage = wage_function(edu, EXP_A, EXP_B, coeffs[which])\n\n return wage\n\n\ndef get_choice_probabilities(fname):\n \"\"\" Get the choice probabilities.\n \"\"\"\n stats = np.tile(np.nan, (0, 4))\n\n with open(fname) as in_file:\n for line in in_file.readlines():\n list_ = shlex.split(line)\n if not list_:\n continue\n\n if list_[0] == 'Outcomes':\n break\n\n try:\n int(list_[0])\n except ValueError:\n continue\n\n stats = np.vstack((stats, [float(x) for x in list_[1:]]))\n\n # Finishing\n return stats\n\n\ndef plot_return_experience():\n \"\"\" Function to produce plot for the return to experience.\n \"\"\"\n\n def _beautify_subplot(subplot, zlim):\n subplot.view_init(azim=180 + 40)\n\n subplot.set_ylabel('Experience A')\n subplot.set_xlabel('Experience B')\n subplot.set_zlabel('Wages')\n\n subplot.zaxis.set_rotate_label(False)\n subplot.set_zlabel(r'Wages (in \\$1,000)', rotation=90)\n\n subplot.zaxis.get_major_ticks()[0].set_visible(False)\n\n # Background Color (higher numbers are lighter)\n subplot.w_xaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))\n subplot.w_yaxis.set_pane_color((0.6, 0.6, 0.6, 1.0))\n subplot.w_zaxis.set_pane_color((0.68, 0.68, 0.68, 1.0))\n\n ax.set_zlim(zlim)\n\n coeffs = get_coeffs()\n\n z = dict()\n for which in ['a', 'b']:\n x, y = np.meshgrid(range(20), range(20))\n z[which] = np.tile(np.nan, (20, 20))\n for i in range(20):\n for j in range(20):\n args = [i, j, coeffs, which]\n z[which][i, j] = return_to_experience(*args)\n\n # Scaling\n z['a'] /= 1000\n z['b'] /= 1000\n\n zlim = [0, 30]\n\n fig = plt.figure(figsize=(16, 8))\n\n ax = fig.add_subplot(121, projection='3d')\n ax.plot_surface(x, y, z['a'], rstride=1, cstride=1, cmap=cm.jet,\n linewidth=0, antialiased=False, alpha=0.8)\n _beautify_subplot(ax, zlim)\n\n ax = fig.add_subplot(122, projection='3d')\n ax.plot_surface(x, y, z['b'], rstride=1, cstride=1, cmap=cm.jet,\n linewidth=0, antialiased=False, alpha=0.8)\n _beautify_subplot(ax, zlim)\n\n # Write out to\n plt.savefig('returns_experience.png', bbox_inches='tight', format='png')\n\n\ndef plot_return_education():\n \"\"\" Function to produce plot for the return to education.\n \"\"\"\n\n coeffs = get_coeffs()\n\n # Determine wages for varying years of education in each occupation.\n xvals, yvals = range(10, 21), dict()\n for which in ['a', 'b']:\n yvals[which] = []\n for edu in xvals:\n yvals[which] += [return_to_education(edu, coeffs, which)]\n\n # Initialize plot\n ax = plt.figure(figsize=(12, 8)).add_subplot(111)\n\n # Scaling\n for occu in ['a', 'b']:\n for i, _ in enumerate(xvals):\n yvals[occu][i] /= 1000\n\n # Draw lines\n ax.plot(xvals, yvals['a'], '-k', label='Occupation A', linewidth=5,\n color='red', alpha=0.8)\n ax.plot(xvals, yvals['b'], '-k', label='Occupation B', linewidth=5,\n color='orange', alpha=0.8)\n\n # Both axes\n ax.tick_params(labelsize=16, direction='out', axis='both', top='off',\n right='off')\n\n # x-axis\n ax.set_xticklabels(ax.get_xticks().astype(int))\n ax.set_xlabel('Years of Schooling', fontsize=16)\n\n # y-axis\n yticks = ['{:,.0f}'.format(y) for y in ax.get_yticks().astype(int)]\n ax.set_yticklabels(yticks, fontsize=16)\n\n ax.set_ylabel(r'Wages (in \\$1,000)', fontsize=16)\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n\n # Set up legend\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10),\n fancybox=False, frameon=False, shadow=False, ncol=2, fontsize=20)\n\n # Write out to\n plt.savefig('returns_schooling.png', bbox_inches='tight', format='png')\n\n\ndef plot_choice_patterns():\n \"\"\" Function to produce plot for choice patterns.\n \"\"\"\n choice_probabilities = pkl.load(open('probabilities.respy.pkl', 'rb'))\n\n num_periods = len(choice_probabilities)\n\n deciles = range(num_periods)\n colors = ['blue', 'yellow', 'orange', 'red']\n width = 0.9\n\n # Plotting\n bottom = [0] * num_periods\n\n # Initialize plot\n ax = plt.figure(figsize=(12, 8)).add_subplot(111)\n labels = ['Home', 'School', 'Occupation A', 'Occupation B']\n for j, i in enumerate([3, 2, 0, 1]):\n heights = choice_probabilities[:, i]\n plt.bar(deciles, heights, width, bottom=bottom, color=colors[j],\n alpha=0.70)\n bottom = [heights[i] + bottom[i] for i in range(num_periods)]\n\n # Both Axes\n ax.tick_params(labelsize=16, direction='out', axis='both', top='off',\n right='off')\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n # X axis\n ax.set_xlabel('Period', fontsize=16)\n ax.set_xlim([0, num_periods])\n\n # Y axis\n ax.set_ylabel('Share of Population', fontsize=16)\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n\n # Legend\n plt.legend(labels, loc='upper center', bbox_to_anchor=(0.5, -0.10),\n fancybox=False, frameon=False, shadow=False, ncol=4,\n fontsize=20)\n\n # Write out to\n plt.savefig('choices.png', bbox_inches='tight',\n format='png')\n","sub_path":"_modules/economics_baseline.py","file_name":"economics_baseline.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"636282011","text":"from seamless.core import context, cell, StructuredCell\n\nctx = context(toplevel=True)\nctx.data = cell(\"mixed\")\nctx.sc = StructuredCell(\n data=ctx.data\n)\n\ndata = ctx.sc.handle\ndata.set(20)\nprint(data)\nctx.compute()\nprint(data.data, ctx.data.value)\ndata.set({})\ndata.a = \"test\"\ndata.b = 12\ndata.b.set(5)\ndata.c = {\"d\": {}}\ndata.c.d.e = 12.0\nprint(data)\nctx.compute()\nprint(data.data, ctx.data.value)\n","sub_path":"tests/lowlevel/structured_cell/simple-auth.py","file_name":"simple-auth.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"340006974","text":"import json\nimport os.path\n\nfrom flask import render_template\n\nfrom clashleaders import app, cache\nfrom clashleaders.clash.transformer import clans_leaderboard\nfrom clashleaders.model import ClanPreCalculated\n\nparent = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(parent, \"../data/countries.json\")) as f:\n data = json.load(f)\n COUNTRIES = {c['countryCode']: c for c in data if c['isCountry']}\n\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html',\n most_donations=leaderboard('week_delta.avg_donations'),\n most_attacks=leaderboard('week_delta.avg_attack_wins'),\n most_bh_attacks=leaderboard('week_delta.avg_versus_wins'),\n most_loot=leaderboard('week_delta.avg_gold_grab'),\n most_points=leaderboard('clanPoints'),\n most_vs_points=leaderboard('clanVersusPoints'),\n most_win_streak=leaderboard('warWinStreak'),\n most_war_stars=leaderboard('week_delta.avg_war_stars'),\n most_trophies=leaderboard('week_delta.avg_trophies'),\n avg_bh_level=leaderboard('avg_bh_level'),\n most_active_country=aggregate_by_country('week_delta.avg_attack_wins'),\n most_trophies_country=aggregate_by_country('clanPoints'),\n trophy_distribution=trophy_distribution()\n )\n\n\n@cache.memoize(28800)\ndef leaderboard(field):\n return clans_leaderboard(ClanPreCalculated.objects(members__gt=20).order_by(f\"-{field}\").limit(10), field)\n\n\n@cache.memoize(28800)\ndef aggregate_by_country(score_column=\"week_delta.avg_attack_wins\"):\n group = {\"$group\": {\"_id\": \"$location.countryCode\", \"score\": {\"$sum\": f\"${score_column}\"}}}\n sort = {'$sort': {'score': -1}}\n aggregated = list(ClanPreCalculated.objects(location__countryCode__ne=None).aggregate(group, sort))\n aggregated = [{'code': c['_id'].lower(), 'name': COUNTRIES[c['_id']]['name'], 'score': c['score']} for c in\n aggregated[:10]]\n return aggregated\n\n\n@cache.memoize(28800)\ndef trophy_distribution():\n counts = list(ClanPreCalculated.objects.aggregate({\n '$group': {\n '_id': {'$subtract': ['$clanPoints', {'$mod': ['$clanPoints', 500]}]},\n 'count': {'$sum': 1}}},\n {'$sort': {'_id': 1}}\n ))\n\n labels = [c['_id'] for c in counts]\n values = [c['count'] for c in counts]\n\n return dict(labels=labels, values=values)\n","sub_path":"clashleaders/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"306271336","text":"import numpy as np\nimport cv2 as cv\nfrom scipy.spatial.distance import euclidean as l2\n\nDEBUG = False # turn off debug for faster calculation\nRATIO = 1.\n\n\ndef set_debug(debug):\n global DEBUG\n DEBUG = debug\n\ndef get_debug():\n return DEBUG\n\ndef draw_box(img, boxes, cls):\n img = np.array(img).copy()\n for b in boxes:\n cv.fillConvexPoly(img, b, cls)\n return img\n\n\ndef mean_iou(labels, predictions, num_classes, weights):\n \"\"\"\n :param labels: ground truth map - each pixel belongs to 1 of 2 classes [0, num_classes-1]\n :param predictions: same as labels\n :param num_classes:\n :param weights: weight for each class to calculate the mean iou - dim (num_classes-1, )\n :return: mean iou and iou of each class\n \"\"\"\n\n assert type(labels) in [list, np.ndarray], 'Labels must be a list or np array'\n assert type(predictions) in [list, np.ndarray], 'Prediction must be a list or np array'\n\n labels = np.array(labels).flatten()\n predictions = np.array(predictions).flatten()\n weights = np.array(weights).flatten()\n\n assert labels.shape == predictions.shape, 'Shapes of labels and predictions must be the same'\n assert weights.shape[0] == num_classes\n\n ans = []\n for cls in range(num_classes):\n lab = labels == cls\n pre = predictions == cls\n\n inter = lab&pre\n union = lab|pre\n ans.append(inter.sum()/union.sum())\n\n return np.mean(np.array(ans)*weights)\n\n\nclass base_error:\n def __init__(self):\n\n self.gt_map = None\n self.pr_map = None\n\n def __get_shape(self, boxes):\n return boxes.reshape(-1,2).max(0)\n\n def __get_patch(self, box):\n w = int(max(l2(box[0], box[1]), l2(box[2], box[3]))) + 1\n h = int(max(l2(box[3], box[0]), l2(box[1], box[2]))) + 1\n # print('Box size', w, h)\n\n poly = np.array([(0, 0), (w - 1, 0), (w - 1, h - 1), (0, h - 1)]).astype('float32')\n matrix = cv.getPerspectiveTransform(box.astype('float32'), poly)\n patch = cv.warpPerspective(self.pr_map, matrix, (w, h))\n\n # DEBUG\n db_gt = db_pr = None\n if DEBUG:\n db_gt = cv.warpPerspective(self.gt_map, matrix, (w, h))\n tmp = np.stack([(self.pr_map>0).astype(np.uint8)*255, self.img, self.img], 2)\n db_pr = cv.warpPerspective(tmp, matrix, (w, h))\n\n # remove unintended edge of other regions\n pad = 5\n patch[:pad, :] = patch[-pad:, :] = patch[:, :pad] = patch[:, -pad:] = 0\n if DEBUG:\n db_gt[:pad, :] = db_gt[-pad:, :] = db_gt[:, :pad] = db_gt[:, -pad:] = 0\n db_pr[:pad, :] = db_pr[-pad:, :] = db_pr[:, :pad] = db_pr[:, -pad:] = 0\n return patch, db_gt, db_pr\n\n # @staticmethod\n def __cal_err(self, gt, pred, img=None):\n gt = np.array(gt)\n pred = np.array(pred)\n # TODO: sort predicted boxes by its area to prevent bigger boxes from covering the small ones.\n assert len(gt.shape) == len(pred.shape) == 3\n # return 0\n shape = img.shape if img is not None else None\n self.img = img\n if shape is None:\n b1 = self.__get_shape(gt)\n b2 = self.__get_shape(pred)\n shape = (max(b1[0], b2[0])+1, max(b1[1], b2[1])+1)\n\n self.img = np.zeros(shape)\n\n self.pr_map = np.zeros_like(self.img)\n for idx, b in enumerate(pred):\n cv.fillConvexPoly(self.pr_map, b, idx+1)\n # self.pr_map = self.pr_map*0.3 + imgs*0.7\n # ===================DEBUG===================\n self.gt_map = np.zeros_like(self.img)\n # self.gt_map = self.gt_map*0.3 + imgs*0.7\n for idx, b in enumerate(gt):\n cv.fillConvexPoly(self.gt_map, b, idx + 1)\n # ===========================================\n errs = []\n db_list = []\n for idx, b in enumerate(gt):\n patch, gt_patch, pr_patch = self.__get_patch(b)\n # patch =\n\n # TODO: checking if each small box is meaningful instead of just considering its area.\n out = cv.connectedComponentsWithStats((patch>0).astype(np.uint8))\n n_regions, label_matrix, stats, centroids = out[0], out[1], out[2], out[3]\n n_active_region = n_regions-1 # ignore background\n errs.append(n_active_region)\n if n_active_region > 1:\n db_list.append(pr_patch)\n\n errs = np.array(errs)-1\n errs = (errs>0)*errs\n ret = {'Total error': errs.sum(),\n 'Mean error': errs.mean(),\n 'STD': errs.std()}\n # print(ret, db_list)\n\n return ret, db_list\n\n\nclass split_error(base_error):\n def __init__(self):\n super(split_error, self).__init__()\n\n def __call__(self, gt, pred, img = None):\n \"\"\"\n :param gt: array of bounding boxes (quadrilateral) in groundtruth\n :param pred: same as gt\n :param img: original document - used for debug\n :return: total splits, average split per line, std split\n \"\"\"\n if img is not None:\n img = cv.resize(img, (0, 0), fx=RATIO, fy=RATIO)\n gt = (np.array(gt) * RATIO).astype(np.int32)\n pred = (np.array(pred) * RATIO).astype(np.int32)\n return self._base_error__cal_err(gt, pred, img)\n\n\nclass merge_error(base_error):\n def __init__(self):\n super(merge_error, self).__init__()\n\n def __call__(self, gt, pred, img=None):\n if img is not None:\n img = cv.resize(img, (0, 0), fx=RATIO, fy=RATIO)\n\n gt, pred = pred, gt\n\n gt = (np.array(gt) * RATIO).astype(np.int32)\n pred = (np.array(pred) * RATIO).astype(np.int32)\n return self._base_error__cal_err(gt, pred, img)","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"497597471","text":"import numpy as np\nimport cv2\nimport os\nimport math\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\nimport pdb\n\n\n# https://www.cs.columbia.edu/CAVE/software/softlib/dorf.php\n\n# def load_CRF():\n# CRF = scipy.io.loadmat('matdata/201_CRF_data.mat')\n# iCRF = scipy.io.loadmat('matdata/dorfCurvesInv.mat')\n# B_gl = CRF['B']\n# I_gl = CRF['I']\n\n# if os.path.exists('matdata/201_CRF_iCRF_function.mat')==0:\n# CRF_para = np.array(CRF_function_transfer(I_gl, B_gl))\n# iCRF_para = 1. / CRF_para\n# scipy.io.savemat('matdata/201_CRF_iCRF_function.mat', {'CRF':CRF_para, 'iCRF':iCRF_para})\n# else:\n# Bundle = scipy.io.loadmat('matdata/201_CRF_iCRF_function.mat')\n# CRF_para = Bundle['CRF']\n# iCRF_para = Bundle['iCRF']\n\n# return CRF_para, iCRF_para\n\n \nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef ReadImg(filename):\n img = cv2.imread(filename)\n img = img[:, :, ::-1] / 255.0\n img = np.array(img).astype('float32')\n\n return img\n\n\ndef hwc_to_chw(img):\n return np.transpose(img, axes=[2, 0, 1])\n\n\ndef chw_to_hwc(img):\n return np.transpose(img, axes=[1, 2, 0])\n\n\n####################################################\n#################### noise model ###################\n####################################################\n\ndef func(x, a):\n return np.power(x, a)\n\ndef CRF_curve_fit(I, B):\n popt, pcov = curve_fit(func, I, B)\n return popt\n\ndef CRF_function_transfer(x, y):\n para = []\n for crf in range(201):\n temp_x = np.array(x[crf, :])\n temp_y = np.array(y[crf, :])\n para.append(CRF_curve_fit(temp_x, temp_y))\n\n # (Pdb) pp temp_x.shape, temp_y.shape\n # ((1024,), (1024,))\n # pdb.set_trace()\n # (Pdb) pp para[0]\n # array([0.34269688])\n # (Pdb) pp para[1]\n # array([0.33438498])\n return para\n\n\ndef mosaic_bayer(rgb, pattern, noiselevel):\n\n w, h, c = rgb.shape\n if pattern == 1:\n num = [1, 2, 0, 1]\n elif pattern == 2:\n num = [1, 0, 2, 1]\n elif pattern == 3:\n num = [2, 1, 1, 0]\n elif pattern == 4:\n num = [0, 1, 1, 2]\n elif pattern == 5:\n return rgb\n\n B = np.zeros((w, h))\n\n # Path is like N ?\n B[0:w:2, 0:h:2] = rgb[0:w:2, 0:h:2, num[0]]\n B[0:w:2, 1:h:2] = rgb[0:w:2, 1:h:2, num[1]]\n B[1:w:2, 0:h:2] = rgb[1:w:2, 0:h:2, num[2]]\n B[1:w:2, 1:h:2] = rgb[1:w:2, 1:h:2, num[3]]\n\n gauss = np.random.normal(0, noiselevel / 255., (w, h))\n # gauss = gauss.reshape(w, h)\n B = B + gauss\n\n return B\n\ndef CRF_Map_opt(Img, popt):\n # w, h, c = Img.shape\n output_Img = Img.copy()\n\n output_Img = np.power(output_Img, *popt)\n return output_Img\n\n\ndef Demosaic(B_b, pattern):\n\n B_b = B_b * 255\n B_b = B_b.astype(np.uint16)\n\n if pattern == 1:\n lin_rgb = cv2.demosaicing(B_b, cv2.COLOR_BayerGB2BGR)\n elif pattern == 2:\n lin_rgb = cv2.demosaicing(B_b, cv2.COLOR_BayerGR2BGR)\n elif pattern == 3:\n lin_rgb = cv2.demosaicing(B_b, cv2.COLOR_BayerBG2BGR)\n elif pattern == 4:\n lin_rgb = cv2.demosaicing(B_b, cv2.COLOR_BayerRG2BGR)\n elif pattern == 5:\n lin_rgb = B_b\n\n lin_rgb = lin_rgb[:, :, ::-1] / 255.\n\n # pdb.set_trace()\n # pattern = 2\n # (Pdb) pp B_b.shape\n # (512, 512)\n # (Pdb) pp lin_rgb.shape\n # (512, 512, 3)\n\n return lin_rgb\n\n# CRF: Modeling the space of camera response functions\n# http://www-cs.ccny.cuny.edu/~grossberg/publications/Modeling_the_Space_of_Camera_Response_Functions_Images.pdf\ndef AddNoiseMosai(x,\n CRF_para,\n iCRF_para,\n sigma_s,\n sigma_c,\n crf_index,\n pattern):\n w, h, c = x.shape\n temp_x = CRF_Map_opt(x, iCRF_para[crf_index])\n\n # pdb.set_trace()\n # (Pdb) pp x.shape\n # (512, 512, 3)\n\n # (Pdb) pp CRF_para.shape\n # (201, 1)\n # (Pdb) pp CRF_para.mean()\n # 0.5285327803038496\n # (Pdb) pp CRF_para.std()\n # 0.3079649125807247\n\n # CRF_para*iCRF_para == 1\n # (Pdb) pp B.shape\n # (201, 1024)\n # (Pdb) pp I.shape\n # (201, 1024)\n\n sigma_s = np.reshape(sigma_s, (1, 1, c))\n noise_s_map = np.multiply(sigma_s, temp_x)\n noise_s = np.random.randn(w, h, c) * noise_s_map\n temp_x_n = temp_x + noise_s\n\n noise_c = np.zeros((w, h, c))\n for chn in range(3):\n noise_c[:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h))\n\n temp_x_n = temp_x_n + noise_c\n temp_x_n = np.clip(temp_x_n, 0.0, 1.0)\n temp_x_n = CRF_Map_opt(temp_x_n, CRF_para[crf_index])\n\n # pdb.set_trace()\n # (Pdb) pp temp_x_n.shape\n # (512, 512, 3)\n\n B_b_n = mosaic_bayer(temp_x_n[:, :, ::-1], pattern, 0)\n # pdb.set_trace()\n # (Pdb) pp B_b_n.shape\n # (512, 512)\n\n lin_rgb_n = Demosaic(B_b_n, pattern)\n # pdb.set_trace()\n\n # (Pdb) lin_rgb.shape\n # (512, 512, 3) \n\n result = lin_rgb_n\n\n # pdb.set_trace()\n # (Pdb) pp result.shape\n # (512, 512, 3)\n\n return result\n\n\ndef AddRealNoise(image, CRF_para, iCRF_para):\n # array([0.0923482, 0.0048792, 0.1523728])\n sigma_s = np.random.uniform(0.0, 0.16, (3, ))\n sigma_c = np.random.uniform(0.0, 0.06, (3, ))\n\n CRF_index = np.random.choice(201)\n pattern = np.random.choice(4) + 1\n noise_img = AddNoiseMosai(image, CRF_para, iCRF_para, sigma_s, sigma_c, CRF_index, pattern)\n noise_level = sigma_s * np.power(image, 0.5) + sigma_c\n\n # pdb.set_trace()\n\n return noise_img, noise_level\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"127845676","text":"# importing all the required librarires\nimport pandas as pd \nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import minmax_scale\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn import ensemble\nimport matplotlib.pyplot as plt\nimport category_encoders as ce\n\n# Reading data\ndataset = pd.read_csv('/Users/shravani/Documents/Machine Learning/data.csv')\n\n# Removing null data\ndataset = dataset.fillna(method='bfill')\n\n# Removing outliers\nds = pd.DataFrame(dataset)\ndataset = dataset[ds['Income in EUR'] < 5000000 ]\n\n# Encoding all the categorical columns - Target encoding\ndf = pd.DataFrame(dataset)\ngenencoder = ce.TargetEncoder(cols=['Gender'])\ndfgender = genencoder.fit_transform(df['Gender'], df['Income in EUR'])\nprofencoder = ce.TargetEncoder(cols=['Profession'])\ndfprof = profencoder.fit_transform(df['Profession'], df['Income in EUR'])\ncountryencoder = ce.TargetEncoder(cols=['Country'])\ndfcountry = countryencoder.fit_transform(df['Country'], df['Income in EUR'])\ndegreeencoder = ce.TargetEncoder(cols=['University Degree'])\ndfdegree = degreeencoder.fit_transform(df['University Degree'], df['Income in EUR'])\ndf = df.drop(columns=['Gender','University Degree', 'Country','Profession'])\ndf = pd.concat([df, dfprof, dfgender, dfcountry, dfdegree], axis=1)\ndataset = df\n\n# Training the model\nX = dataset[['Gender','University Degree', 'Age', 'Size of City', 'Country', 'Profession', 'Year of Record']]\ny = dataset['Income in EUR']\n\n# Scaling the data\ndf = pd.DataFrame(X)\nsc = StandardScaler()\nscaler = sc.fit_transform(df[['Size of City','Year of Record','Age']])\nscaler = np.transpose(scaler)\ndf['Size of City'] = scaler[0]\ndf['Year of Record'] = scaler[1]\ndf['Age'] = scaler[2]\nX = df\n\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=0)\n\nprint('Training data size: (%i,%i)' % X_train.shape)\nprint('Testing data size: (%i,%i)' % X_test.shape)\n\n\n# Training the model\nimport xgboost as xgb\nregression_model=xgb.XGBRegressor(objective='reg:squarederror', random_state=1, learning_rate=0.1, max_depth=5, n_estimators= 1000)\n\nregression_model.fit(X_train,y_train)\n\n\n# Testing the model\ny_pred = regression_model.predict(X_test)\nprint(\"predicted data size:\", y_pred.size)\ntest_rmse = np.sqrt(mean_squared_error(y_test, y_pred))\nprint('Test RMSE: %f' % test_rmse)\n\n# Plot for the test\nplt.scatter(y_test,y_pred)\nplt.xlabel('Actual')\nplt.ylabel('Predicted')\nplt.show()\n\n# Predicting based on model\ndataset = pd.read_csv('/Users/shravani/Documents/Machine Learning/tcd ml 2019-20 income prediction test (without labels).csv')\nsubmissionFile = pd.read_csv('/Users/shravani/Documents/Machine Learning/tcd ml 2019-20 income prediction submission file.csv')\n\n# Filling null values\ndataset = dataset.fillna(method='bfill')\n\n# Encoding all the categorical columns - Target encoding\ndf = pd.DataFrame(dataset)\ndfprof = profencoder.transform(df['Profession'])\ndfgender = genencoder.transform(df['Gender'])\ndfcountry = countryencoder.transform(df['Country'])\ndfdegree = degreeencoder.transform(df['University Degree'])\ndf = df.drop(columns=['Gender','University Degree', 'Country','Profession'])\ndf = pd.concat([df, dfprof, dfgender, dfcountry, dfdegree], axis=1)\ndataset = df\n\nX = dataset[['Gender','University Degree', 'Age', 'Size of City', 'Country', 'Profession', 'Year of Record']]\n\n# Scaling the numerical data\ndf = pd.DataFrame(X)\nscaler = sc.transform(df[['Size of City','Year of Record','Age']])\nscaler=np.transpose(scaler)\ndf['Size of City'] = scaler[0]\ndf['Year of Record'] = scaler[1]\ndf['Age'] = scaler[2]\nX = df\n\n# Predicting the income\ny_pred_for_given_data = regression_model.predict(X)\nprint(y_pred_for_given_data.size)\n\n# Saving the values in a submission file\nsubmissionFile['Income'] = y_pred_for_given_data\nsubmissionFile.to_csv('/Users/shravani/Documents/Machine Learning/submission.csv', index=False)","sub_path":"kaggle_individual.py","file_name":"kaggle_individual.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"410796656","text":"\"\"\"\nPrompt for apikey\nPrompt for hash\nRequest hash report from VT\nParse only non-clean detections- AV name, detection name, version/definitions, VT updated date\nPrint above info\n\"\"\"\n\nimport requests,time,csv\nfrom time import sleep\n\n\n# requests setup\nrequests.urllib3.disable_warnings()\nclient = requests.session()\nclient.verify = False\n\napikey = '5c0d67f571ebd3d0404de3d3db093e76c5d49fca528d5fe8c38c0dbc8db43f6f' #Enter your API key.')\n\n\ndef get_hash_report(apikey, filehash , val):\n url = 'https://www.virustotal.com/vtapi/v2/file/report'\n params = {\"apikey\": apikey, \"resource\": filehash, \"allinfo\": True}\n\n # perform call\n r = client.get(url, params=params)\n\n if r.status_code == 429:\n print('Encountered rate-limiting. Sleeping for 45 seconds.')\n sleep(45)\n get_hash_report(apikey, filehash)\n\n elif r.status_code != 200:\n print('Encountered unanticipated HTTP error.')\n print(r.status_code)\n exit(1)\n\n elif r.status_code == 200:\n response = r.json()\n parse_hash_report(response,filehash,val)\n\n\ndef parse_hash_report(response, filehash, val):\n detections = response['positives']\n total = response['total']\n if detections >= 1:\n scan_results = response['scans']\n x = 0\n for vendor in scan_results:\n if scan_results[vendor]['detected'] == True and x ==0 :\n print (\"malicious detected\")\n info_date = scan_results[vendor]['update']\n detected_name = scan_results[vendor]['result']\n definition_version = scan_results[vendor]['version']\n with open('/home/radius/analisis-tools/report.csv','a', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([val,filehash,vendor,detected_name,\"{} Engine dari {}\".format(detections,total)])\n x += 1\n break\n\n else:\n print('No malicious detections found.')\n while True:\n time.sleep(20)\n return\n\n\nif __name__ == \"__main__\":\n d = {}\n with open(\"/home/radius/analisis-tools/hash.txt\",\"r\") as f:\n for line in f:\n (key, val) = line.split()\n d[val] = key\n for x , y in d.items():\n get_hash_report(apikey, x, y)\n","sub_path":"analisis-tools/VT_Hash_Search.py","file_name":"VT_Hash_Search.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"262814931","text":"from PyQt5 import QtWidgets, QtCore\r\nfrom PyQt5.QtCore import Qt, QPoint\r\nfrom CustomWidget.FrameLayout.FrameButton import *\r\nfrom CustomWidget.StateWidget.StateButton import *\r\n\r\nclass StateWidget(QtWidgets.QWidget):\r\n def __init__(self, path, parent = None):\r\n super().__init__(parent)\r\n\r\n layout = QtWidgets.QHBoxLayout()\r\n\r\n moveBar = FrameButton()\r\n moveBar.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\r\n moveBar.setMinimumSize(24, 24)\r\n moveBar.drag.connect(self.moveWindow)\r\n moveBar.click.connect(self.moveClick)\r\n moveBar.dclick.connect(self.max)\r\n\r\n self.minB = StateButton(path + \"/Picture/icon/minimize.png\", path + \"/Picture/icon/minimize-hover.png\")\r\n self.resB = StateButton(path + \"/Picture/icon/maximize.png\", path + \"/Picture/icon/maximize-hover.png\")\r\n self.exiB = StateButton(path + \"/Picture/icon/close.png\", path + \"/Picture/icon/close-hover.png\")\r\n\r\n self.minB.setToolTip(\"Minimize\")\r\n self.resB.setToolTip(\"Maximize\")\r\n self.exiB.setToolTip(\"Close\")\r\n\r\n self.minB.clicked.connect(self.minimize)\r\n self.resB.clicked.connect(self.maximize)\r\n self.exiB.clicked.connect(self.exit)\r\n\r\n layout.addWidget(moveBar)\r\n layout.addWidget(self.minB)\r\n layout.addWidget(self.resB)\r\n layout.addWidget(self.exiB)\r\n\r\n layout.setSpacing(0)\r\n layout.setContentsMargins(0, 0, 0, 0)\r\n self.setStyleSheet(\"background-color: white\")\r\n self.setLayout(layout)\r\n\r\n min = QtCore.pyqtSignal()\r\n max = QtCore.pyqtSignal()\r\n exi = QtCore.pyqtSignal()\r\n mov = QtCore.pyqtSignal()\r\n movC = QtCore.pyqtSignal()\r\n\r\n def minimize(self):\r\n self.min.emit()\r\n\r\n def maximize(self):\r\n self.max.emit()\r\n\r\n def exit(self):\r\n self.exi.emit()\r\n\r\n def moveWindow(self):\r\n self.mov.emit()\r\n\r\n def moveClick(self):\r\n self.movC.emit()","sub_path":"CustomWidget/StateWidget/StateWidget.py","file_name":"StateWidget.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"313004380","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\nCITIES = ['chicago', 'new york city', 'washington']\n\nMONTHS = ['all', 'january', 'february', 'march', 'april', 'may', 'june']\n\nDAYS = ['all','sunday', 'monday', 'tuesday', 'wednesday', \\\n 'thursday', 'friday', 'saturday' ]\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city=input('Which city do you want to analyse? Choose between: Chicago, New york city or Washington. ').lower()\n while city not in CITIES:\n print('\\n Please choose only between:', CITIES)\n city=input('\\n Which city do you want to analyse?? ').lower()\n\n\n # get user input for month (all, january, february, ... , june)\n month=input('\\n Ok, awesome, you want to analyse: {} ! Good choice. \\n \\n Which month do you want to analyse? Please write out month (all, january, february, ...) '.format(city)).lower()\n while month not in MONTHS:\n print('\\n\\nYou can only choose between: ', MONTHS)\n month=input('\\nPlease choose a month: ').lower()\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day=input('\\n Ok, cool, you want to look at {}. I got it. \\n \\n Which day of the week do you want to analyse? (all, monday, tuesday, ... sunday) '.format(month)).lower()\n while day not in DAYS:\n print('Please choose between:', DAYS)\n day=input('\\n Enter your choice for day of the week again. ').lower()\n print('\\n {}, fantastic choice\\n'.format(day))\n print('-'*40)\n\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day_of_week'] == day.title()]\n\n return df\n\n # ask if use wants to see a part of the raw data\n\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['month'].mode()[0]\n\n # display the most common day of week\n most_common_day = df['day_of_week'].mode()[0]\n\n # display the most common start hour\n most_common_hour = df['hour'].mode()[0]\n\n\n print('Most common day of week:', most_common_day)\n print('Most common month:', most_common_month)\n print('Most common start hour:', most_common_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start = df['Start Station'].value_counts().idxmax()\n print('The most popular start station is: ', popular_start)\n\n # display most commonly used end station\n popular_end = df['End Station'].value_counts().idxmax()\n print('The most popular end station is: ', popular_end)\n\n # display most frequent combination of start station and end station trip\n df['Start End'] = df['Start Station'].map(str) + '&' + df['End Station']\n popular_start_end = df['Start End'].value_counts().idxmax()\n print('The most commonly used start and end station: ', popular_start_end)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('The total travel time is: ', total_travel_time)\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('The total travel time is: ', mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Counts of user types:\\n')\n user_counts = df['User Type'].value_counts()\n\n # iteratively print out the total numbers of user types\n # using {} .format() is a great example of good refactoring\n for index, user_count in enumerate(user_counts):\n print(\" {}: {}\".format(user_counts.index[index], user_count))\n\n # Display counts of gender\n try:\n print('\\n Counts of gender:\\n')\n gender_counts = df['Gender'].value_counts()\n for index, gender_count in enumerate(gender_counts):\n print(' {}: {}'.format(gender_counts.index[index], gender_count))\n except:\n print('There is no gender data in the source.')\n\n\n # Display earliest, most recent, and most common year of birth\n try:\n print('\\nStatistics about the year of birth. \\n')\n earliest = int(df['Birth Year'].min())\n most_recent = int(df['Birth Year'].max())\n most_common = int(df['Birth Year'].value_counts().idxmax())\n print('The earliest year of birth is: {}. \\n The most recent year of birth is: {}. \\n The most common year of birth is: {}.'.format(earliest, most_recent, most_common))\n except:\n print('There is no year of birth data in the source.')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef disp_raw_data(df):\n '''\n Displays the data used to compute the stats\n Input:\n the df with all the bikeshare data\n Returns:\n raw data if they want.\n '''\n #omit irrelevant columns from visualization\n row_index = 0\n\n see_data = input(\"\\n Would you like to see a sample of the data used to compute the stats? Please write 'yes' or 'no' \\n\").lower()\n while True:\n if see_data == 'no':\n return\n if see_data == 'yes':\n print(df[row_index: row_index + 5])\n row_index = row_index + 5\n see_data = input(\"\\n Would you like to see 5 more rows of the data used to compute the stats? Please write 'yes' or 'no' \\n\").lower()\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n disp_raw_data(df)\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"300825899","text":"\"\"\"\nv001.1 (added custom_threshold which allows to start from N cases)\n\"\"\"\nimport pandas as pd\nfrom datetime import date\n\n\nclass Region:\n \"\"\"\n Defines the city/province/country of the epidemic by its\n population and daily cumulative cases curve.\n \"\"\"\n\n def __init__(self, name='', nCitizen=0, first_day=date(2020, 1, 1),\n cumulativeCases=[], recoveredCases=[], deathCases=[]):\n self.name = name\n self.N = nCitizen\n self.first_day = first_day\n self.rcQ = cumulativeCases\n self.rR = recoveredCases\n self.rD = deathCases\n\n\nclass WebDataReader:\n \"\"\"\n This class creates Region object with necessary data\n inputs: Country (str)\n State (str)\n output: (Region object, dataframe object)\n \"\"\"\n\n def __init__(self, dataset, country, state='nan', custom_threshold=0):\n self.dataset = dataset\n self.country = country\n self.state = state\n (self.Region, self.data) = self.form(self.country, self.state,\n custom_threshold)\n\n def form(self, country, state, custom_threshold=0):\n data = self.pick(country, state, custom_threshold)\n\n # Initialze the province\n Province = Region()\n\n # province's name\n if state == 'nan':\n Province.name = country\n else:\n Province.name = state\n\n # First day of data\n year = int(data.loc[0, 'Date'][0] + data.loc[0, 'Date'][1] + data.loc[0, 'Date'][2] + data.loc[0, 'Date'][3])\n month = int(data.loc[0, 'Date'][5] + data.loc[0, 'Date'][6])\n day = int(data.loc[0, 'Date'][8] + data.loc[0, 'Date'][9])\n\n Province.first_day = date(year,month,day)\n\n Province.rcQ = list(data.loc[:,'Confirmed'].values)\n Province.rR = list(data.loc[:,'Recovered'].values)\n Province.rD = list(data.loc[:,'Deaths'].values)\n return(Province,data)\n\n def pick(self, country, state = 'nan', custom_threshold = 0):\n \"\"\"\n This function picks the relevent country/state's data from the whole dataset\n It starts picking data when cases are > custom_threshold\n \"\"\"\n # initialize a dataframe\n data = pd.DataFrame()\n\n if state == 'nan':\n index=0\n head = False\n end = False\n\n while not end:\n\n info = self.dataset.loc[index, 'Confirmed'] + self.dataset.loc[index, 'Recovered'] + self.dataset.loc[index, 'Deaths']\n if self.dataset.loc[index, 'Country/Region'] == country and info > custom_threshold:\n head = True\n data = data.append(self.dataset.iloc[index,:],ignore_index=True)\n data\n elif head == True:\n end = True\n\n index += 1\n if index > self.dataset.shape[0]:\n end =True\n\n else:\n index=0\n head = False\n end = False\n\n while not end:\n\n info = self.dataset.loc[index, 'Confirmed'] + self.dataset.loc[index, 'Recovered'] + self.dataset.loc[index, 'Deaths']\n if self.dataset.loc[index, 'Country/Region'] == country and self.dataset.loc[index, 'Province/State'] == state and info > 0:\n head = True\n data = data.append(self.dataset.iloc[index,:],ignore_index=True)\n elif head == True:\n end = True\n\n index += 1\n if index > self.dataset.shape[0]:\n end =True\n data = data.drop(columns =['Country/Region','Province/State','Lat', 'Long'],axis=1)\n\n return(data)\n","sub_path":"seiqrdp_model/DataCollector.py","file_name":"DataCollector.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"442036021","text":"'''\nCreated on Jan 31, 2018\n\n@author: lfko\n'''\n# for plotting things\nfrom matplotlib import pyplot as pl\n\nimport numpy as np\n\n# this framework will do the actual image recognition\nimport cv2\n\n\ndef main():\n # read an image\n # we need to provide a file (or a path to a file) and the color components, we would like to read\n image = cv2.imread('/media/lfko/Lagerraum/Pictures/gruppe-lan.jpg', cv2.COLOR_BGR2GRAY)\n \n # resize by halfing each axis\n image = cv2.resize(image, (0, 0), fx=0.5, fy=0.5) \n \n # show the imported image; first param is the window' name\n # cv2.imshow('Frame', image)\n \n # wait for user interrupt; press any key\n # cv2.waitKey(0)\n # well, self-explanatory\n # cv2.destroyAllWindows()\n faceRecog(image)\n# showWithMPL(image)\n\n\ndef faceRecog(image):\n # templates for facial recognition\n face_cascade = cv2.CascadeClassifier('opencv/data/haarcascades/haarcascade_frontalface_default.xml')\n\n # templates for eye recognition\n eye_cascade = cv2.CascadeClassifier('opencv/data/haarcascades/haarcascade_eye.xml')\n \n # gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(image, 1.3, 5)\n\n for (x, y, w, h) in faces:\n # mark faces with a rectangle\n cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_gray = image[y:y + h, x:x + w]\n roi_color = image[y:y + h, x:x + w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex, ey, ew, eh) in eyes:\n # mark eyes with a rectangle\n cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)\n \n # resize it before showing it\n image = cv2.resize(image, (0, 0), fx=0.7, fy=0.7) \n cv2.imshow('img', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef showWithMPL(image):\n # now do the same with matplotlib\n pl.imshow(image, cmap='gray', interpolation='bicubic')\n pl.xticks([]), pl.yticks([]) # to hide tick values on X and Y axis\n # this will render a stripe into the image\n pl.plot([200, 300, 400], [100, 200, 300], 'c', linewidth=5)\n pl.show()\n\n\nif __name__ == \"__main__\":\n main() \n","sub_path":"image-recognition/lefko/python/image/ImageRecognizer.py","file_name":"ImageRecognizer.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"365689316","text":"n = 3\ncomputers = [[1, 1, 0], [1, 1, 1], [0, 1, 1]]\n\n\ndef solution(n, computers):\n visited = [0] * n\n count = 0\n for i in range(n):\n if visited[i] == 0:\n count += 1\n stack = [i]\n while stack:\n\n x = stack.pop() # 시작 컴퓨터\n\n for nx in range(n):\n if nx != x and visited[nx] == 0:\n if computers[x][nx] == 1: # 연결 시\n visited[nx] = 1\n stack.append(nx)\n\n return count\n\n\nprint(solution(n, computers))\n","sub_path":"PYTHON/PROGRAMMERS/네트워크/네트워크_DFS.py","file_name":"네트워크_DFS.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"33723353","text":"import librosa\nimport numpy as np\nfrom scipy.signal import lfilter, butter\n\nimport sigproc # see details: https://www.cnblogs.com/zhuimengzhe/p/10223510.html\nimport constants as c\nimport os\n\n\ndef load_wav(filename, sample_rate):\n\taudio, sr = librosa.load(filename, sr=sample_rate, mono=True)\n\taudio = audio.flatten()# 按行方向降为 1 维\n\treturn audio\n\n\ndef normalize_frames(m,epsilon=1e-12):\n\treturn np.array([(v - np.mean(v)) / max(np.std(v),epsilon) for v in m])\n\n\n# https://github.com/christianvazquez7/ivector/blob/master/MSRIT/rm_dc_n_dither.m\ndef remove_dc_and_dither(sin, sample_rate):\n\tif sample_rate == 16e3:\n\t\talpha = 0.99\n\telif sample_rate == 8e3:\n\t\talpha = 0.999\n\telse:\n\t\tprint(\"Sample rate must be 16kHz or 8kHz only\")\n\t\texit(1)\n\tsin = lfilter([1,-1], [1,-alpha], sin)\n\tdither = np.random.random_sample(len(sin)) + np.random.random_sample(len(sin)) - 1\n\tspow = np.std(dither)\n\tsout = sin + 1e-6 * spow * dither\n\treturn sout\n\n\ndef get_fft_spectrum(filename):\n\tsignal = load_wav(filename,c.SAMPLE_RATE)\n\tsignal *= 2**15\n\n\t# get FFT spectrum\n\tsignal = remove_dc_and_dither(signal, c.SAMPLE_RATE) # 数字滤波器,去除直流和颤动成分\n\tsignal = sigproc.preemphasis(signal, coeff=c.PREEMPHASIS_ALPHA) # 对输入信号进行预加重\n\tframes = sigproc.framesig(signal, frame_len=c.FRAME_LEN*c.SAMPLE_RATE, frame_step=c.FRAME_STEP*c.SAMPLE_RATE, winfunc=np.hamming) # 将信号框成重叠帧\n\t# print(\"===================\")\n\t# print(frames.shape)\n\t# print(\"===================\")\n\t# exit(0)\n\tspem = sigproc.logpowspec(frames,c.NUM_FFT) # 计算语谱图\n\t# print(\"===================\")\n\t# print(spem)\n\t# print(\"===================\")\n\t# print(spem.shape)\n\t# print(\"===================\")\n\t# exit(0)\n\n\tspem_norm = normalize_frames(spem.T) # 减去均值,除以标准差\n\n\tlength = spem_norm.shape[1]\n\treserve_length = length - (length % 100)\n\n\tout = spem_norm[:,0:reserve_length] # test\n\t# out = spem_norm[:, start:end] # train\n\n\treturn out\n\n","sub_path":"src/spem/wav_reader_for_test.py","file_name":"wav_reader_for_test.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"44804074","text":"# -*- coding:utf-8 -*-\r\n\r\n\"\"\"使用scrapy_redis的guba_stock_list_redis_spider(dateback)\r\n\"\"\"\r\n\r\nimport re\r\nimport json\r\nimport time\r\nimport math\r\nimport urllib2\r\nfrom scrapy import log\r\nfrom scrapy.http import Request\r\nfrom scrapy.conf import settings\r\nfrom scrapy.spider import Spider\r\nfrom BeautifulSoup import BeautifulSoup\r\nfrom guba.items import GubaPostListItem\r\nfrom guba.middlewares import UnknownResponseError\r\nfrom guba.utils import _default_redis, _default_mongo, HMS2ts, now_datestr\r\nfrom guba.scrapy_redis.spiders import RedisSpider\r\n\r\nHOST_URL = \"http://guba.eastmoney.com/\"\r\nLIST_URL = HOST_URL + \"list,{stock_id},f_{page}.html\" # f表示按照发布时间排序\r\n\r\nLATEST_PAGE = 'latest_page:{stock_id}'\r\nLATEST_TOTAL_PAGE = 'latest_total_page:{stock_id}'\r\nhost = settings.get('REDIS_HOST', None)\r\nport = settings.get('REDIS_PORT', None)\r\nredis = _default_redis(host, port)\r\n\r\n\r\nclass GubaStockListDatebackRedisSpider(RedisSpider):\r\n \"\"\"usage: scrapy crawl guba_stock_list_dateback_redis_spider --loglevel=INFO\r\n \"\"\"\r\n name = 'guba_stock_list_dateback_redis_spider'\r\n redis_key = 'guba_stock_list_dateback_redis_spider:start_urls'\r\n\r\n def parse(self, response):\r\n results = []\r\n resp = response.body\r\n\r\n request_url = response.url\r\n now_page = int(re.search(r'f_(.*?).html', request_url).group(1))\r\n\r\n try:\r\n soup = BeautifulSoup(resp)\r\n now_total_count = int(re.search(r'帖子数 (.*?) 篇', str(soup.find(\"div\", {\"class\": \"pager\"}))).group(1))\r\n now_total_page = int(math.ceil(now_total_count * 1.0 / 80))\r\n except:\r\n raise UnknownResponseError\r\n\r\n try:\r\n stock_id = re.search(r'barcode = \\\"(.*?)\\\";', str(soup)).group(1)\r\n except:\r\n raise UnknownResponseError\r\n\r\n redis_total_page = redis.get(LATEST_TOTAL_PAGE.format(stock_id=stock_id))\r\n if not redis_total_page:\r\n redis_total_page = now_total_page\r\n redis.set(LATEST_TOTAL_PAGE.format(stock_id=stock_id), redis_total_page)\r\n self.push_url2redis(stock_id, 1, now_total_page)\r\n else:\r\n redis_total_page = int(redis_total_page)\r\n\r\n if now_page >= redis_total_page and redis_total_page < now_total_page:\r\n redis.set(LATEST_TOTAL_PAGE.format(stock_id=stock_id), now_total_page)\r\n self.push_url2redis(stock_id, redis_total_page, now_total_page)\r\n\r\n stock_title = soup.html.head.title\r\n stock_name = re.search(r'_(.*?)股吧', str(stock_title)).group(1).decode('utf8')\r\n\r\n for item_soup in soup.findAll('div', {'class':'articleh'}):\r\n l1_span = item_soup.find(\"span\", {\"class\": \"l1\"})\r\n clicks = int(l1_span.string)\r\n\r\n l2_span = item_soup.find(\"span\", {\"class\": \"l2\"})\r\n replies = int(l2_span.string)\r\n\r\n isStockholder = False # 是否为股东\r\n isTopic = False # 话题\r\n isTop = False # 置顶\r\n isNews = False # 新闻\r\n em_info = None\r\n l3_span = item_soup.find(\"span\", {\"class\": \"l3\"})\r\n em = l3_span.find(\"em\")\r\n if em:\r\n em_info = em.text\r\n\r\n if em_info:\r\n if em_info == u'股友':\r\n isStockholder = True\r\n elif em_info == u'话题':\r\n isTopic = True\r\n elif em_info == u'置顶':\r\n isTop = True\r\n elif em_info == u'新闻':\r\n isNews = True\r\n\r\n # d表示按照时间排序回复\r\n post_url = HOST_URL + l3_span.find(\"a\").get(\"href\").replace('.html', ',d.html').lstrip('/')\r\n post_id = int(re.search(r'news,.*?,(.*?),', post_url).group(1))\r\n post_title = l3_span.find(\"a\").get(\"title\")\r\n\r\n l4_span = item_soup.find(\"span\", {\"class\": \"l4\"})\r\n l4_span_a = l4_span.find(\"a\")\r\n\r\n if l4_span_a:\r\n user_name = l4_span_a.string\r\n try:\r\n user_id = l4_span_a.get(\"data-popper\")\r\n except:\r\n user_id = l4_span_a.get(\"data-popstock\")\r\n user_url = l4_span_a.get(\"href\")\r\n else:\r\n user_name = l4_span.text\r\n user_id = None\r\n user_url = None\r\n\r\n l6_span = item_soup.find(\"span\", {\"class\": \"l6\"})\r\n create_date = l6_span.text\r\n\r\n # 话题贴不属于该股吧,不存数据 \r\n if not isTopic:\r\n item_dict = {'post_id': post_id, 'url': post_url, 'stock_id': stock_id, \\\r\n 'stock_name': stock_name, 'user_name': user_name, 'user_url': user_url, 'user_id': user_id, \\\r\n 'clicks': clicks, 'replies': replies, 'stockholder': isStockholder, 'create_date': create_date, \\\r\n 'em_info': em_info, 'title': post_title}\r\n\r\n item = GubaPostListItem()\r\n for key in GubaPostListItem.RESP_ITER_KEYS:\r\n item[key] = item_dict[key]\r\n\r\n results.append(item)\r\n\r\n latest_page = redis.get(LATEST_PAGE.format(stock_id=stock_id))\r\n if not latest_page:\r\n redis.set(LATEST_PAGE.format(stock_id=stock_id), now_page)\r\n elif now_page > int(latest_page):\r\n redis.set(LATEST_PAGE.format(stock_id=stock_id), now_page)\r\n\r\n return results\r\n\r\n def push_url2redis(self, stock_id, start_page, end_page):\r\n for page in range(start_page + 1, end_page + 1):\r\n redis.lpush(self.redis_key, LIST_URL.format(stock_id=stock_id, page=page))\r\n","sub_path":"guba/spiders/guba_stock_list_dateback_redis_spider.py","file_name":"guba_stock_list_dateback_redis_spider.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"237392957","text":"import logging\nimport time\nfrom businessView.generalView import GeneralView\nfrom businessView.openView import OpenView\nfrom businessView.wpView import WpView\nfrom common.myunit import StartEnd\nfrom airtest.core.api import *\n\n\nclass TestWordShapeAttrbute(StartEnd):\n def shapeatt_setup(self):\n ov = OpenView(self.driver)\n ov.open_file('欢迎使用永中Office.docx')\n gv = GeneralView(self.driver)\n gv.switch_write_read()\n wv = WpView(self.driver)\n wv.switch_option('插入')\n wv.insert_text_box()\n\n def test_wp_shape_copy_paste(self):\n # 形状复制粘贴\n logging.info('==========test_wp_shape_copy_paste==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n # connect_device(wv.get_phone_dev())\n\n wv.object_copy_paste()\n time.sleep(10)\n\n def test_wp_shape_cut_paste(self):\n # 形状剪切粘贴\n logging.info('==========test_wp_shape_cut_paste==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n # connect_device(wv.get_phone_dev())\n\n wv.object_cut_paste()\n time.sleep(10)\n\n def test_wp_shape_delete(self):\n # 形状 删除\n logging.info('==========test_wp_shape_delete==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n # connect_device(wv.get_phone_dev())\n wv.object_delete()\n time.sleep(10)\n\n def test_wp_shape_rotate_90(self):\n # 形状旋转90度\n logging.info('==========test_wp_shape_rotate_90==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n # connect_device(wv.get_phone_dev())\n\n wv.object_rotate_90()\n time.sleep(10)\n\n def test_wp_shape_free_rotate(self):\n # 形状自由旋转\n logging.info('==========test_wp_shape_free_rotate==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n # connect_device(wv.get_phone_dev())\n\n wv.object_free_rotate()\n time.sleep(10)\n\n def test_wp_shape_control_point(self):\n # 手势拖拉形状控制点\n logging.info('==========test_wp_shape_control_point==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n # connect_device(wv.get_phone_dev())\n\n wv.shape_control_point()\n time.sleep(10)\n\n def test_wp_shape_text_select(self):\n # 文本框内容选取\n logging.info('==========test_wp_shape_text_select==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n # # connect_device(wv.get_phone_dev())\n wv.text_box_text_select()\n self.assertTrue(exists(Template(r'../Res/res_delete.png', resolution=(1080, 1920))))\n\n def test_wp_shape_move(self):\n # 文本框移动\n logging.info('==========test_wp_shape_move==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n # connect_device(wv.get_phone_dev())\n wv.text_box_move()\n time.sleep(10)\n\n def test_wp_shape_fixed_rotate(self):\n # 形状旋转\n logging.info('==========test_wp_shape_fixed_rotate==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n wv.shape_fixed_rotate()\n\n def test_wp_shape_change_size(self):\n # 设置形状宽高\n logging.info('==========test_wp_shape_change_size==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n wv.shape_chang_size()\n\n def test_wp_shape_text_box_margin(self):\n # 设置文本框边距\n logging.info('==========test_wp_shape_margin_text_box==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n wv.text_box_margin()\n\n def test_wp_shape_fill_color(self):\n # 设置形状填充色及透明度\n logging.info('==========test_wp_shape_fill_color==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n self.assertTrue(wv.shape_fill_color(), msg='Filling color transparency fail')\n\n def test_wp_shape_broad(self):\n # 形状轮廓\n logging.info('==========test_wp_shape_broad==========')\n self.shapeatt_setup()\n gv = GeneralView(self.driver)\n gv.fold_expand()\n wv = WpView(self.driver)\n wv.shape_broad()\n\n def test_wp_shape_type(self):\n # 形状轮廓类型\n logging.info('==========test_wp_shape_broad==========')\n self.shapeatt_setup()\n gv = GeneralView(self.driver)\n gv.fold_expand()\n wv = WpView(self.driver)\n wv.shape_broad_type()\n\n def test_wp_shape_broad_width(self):\n # 设置形状轮廓粗细\n logging.info('==========test_wp_shape_broad==========')\n self.shapeatt_setup()\n gv = GeneralView(self.driver)\n gv.fold_expand()\n wv = WpView(self.driver)\n wv.shape_broad_width()\n\n def test_wp_shape_shadow(self):\n # 设置形状阴影和三维效果\n logging.info('==========test_wp_shape_broad==========')\n self.shapeatt_setup()\n gv = GeneralView(self.driver)\n gv.fold_expand()\n wv = WpView(self.driver)\n wv.shape_effect()\n\n def test_wp_shape_surround(self):\n # 设置形状文字环绕效果\n logging.info('==========test_wp_shape_surround==========')\n self.shapeatt_setup()\n gv = GeneralView(self.driver)\n gv.fold_expand()\n wv = WpView(self.driver)\n wv.surround('shape')\n\n def test_wp_shape_layer(self):\n # 设置形状叠放次序\n logging.info('==========test_wp_shape_surround==========')\n self.shapeatt_setup()\n wv = WpView(self.driver)\n gv = GeneralView(self.driver)\n gv.switch_write_read()\n gv.switch_write_read()\n wv.switch_option('插入')\n wv.insert_text_box()\n gv.fold_expand()\n wv.shape_layer()\n\n def test_wp_shape_text_fonts(self):\n # 设置形状内选取文字字体\n self.test_wp_shape_text_select()\n wv = WpView(self.driver)\n wv.switch_option('编辑')\n wv.fonts_list()\n\n def test_wp_shape_text_fonts_size(self):\n # 设置形状内选取文字字号\n self.test_wp_shape_text_select()\n wv = WpView(self.driver)\n wv.switch_option('编辑')\n wv.fonts_size_list()\n\n def test_wp_shape_text_effect(self):\n # 设置形状内选取文字效果\n self.test_wp_shape_text_select()\n wv = WpView(self.driver)\n wv.switch_option('编辑')\n wv.fonts_effect()\n\n def test_wp_shape_text_color(self):\n # 设置形状内选取文字颜色\n self.test_wp_shape_text_select()\n wv = WpView(self.driver)\n wv.switch_option('编辑')\n wv.fonts_color()\n\n def test_wp_shape_text_high_light(self):\n # 设置形状内选取文字高亮\n self.test_wp_shape_text_select()\n wv = WpView(self.driver)\n wv.switch_option('编辑')\n wv.fonts_high_light()\n\n def test_wp_shape_text_bullet(self):\n # 设置形状内选取文字项目符号\n self.test_wp_shape_text_select()\n wv = WpView(self.driver)\n wv.switch_option('编辑')\n wv.fonts_bullet()\n\n def test_wp_shape_text_align_indent(self):\n # 设置形状内选取文字对齐、缩进量\n self.test_wp_shape_text_select()\n wv = WpView(self.driver)\n wv.switch_option('编辑')\n wv.align_indent()\n\n def test_wp_shape_text_line_space_size(self):\n # 设置形状内选取文字多倍行距\n self.test_wp_shape_text_select()\n wv = WpView(self.driver)\n wv.switch_option('编辑')\n wv.line_space_size()\n","sub_path":"test_case/test_wp_shape_attribute.py","file_name":"test_wp_shape_attribute.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"625910626","text":"# -*- coding: utf-8 -*-\n\ndef main():\n\n #Goes through each node\n for node in AdminConfig.list('Node').split():\n #Acquire node name\n nodeName = AdminConfig.showAttribute(node, 'name')\n for server in AdminControl.queryNames(\"type=Server,node=\"+ nodeName + \",*\").split():\n #Acquire server name\n serverName = AdminControl.getAttribute(server, 'name')\n print(\"serverName \" + serverName)\n #Instrument JVM profiler\n AdminTask.setJVMProperties('[-nodeName ' + nodeName + ' -serverName ' + serverName + ' -genericJvmArguments \"-agentlib:pmiJvmtiProfiler\"]')\n AdminConfig.save()\n print(\"Finished instrumenting GC profiler\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"setGCProfiler.py","file_name":"setGCProfiler.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"196346165","text":"import os\nimport numpy as np\nimport time\n\nimport tensorflow as tf\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.utils import Sequence\n\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\nfrom preprocessing import CreateMLBYoutubeDataset\n\n# Interactive GPU memory Allocation\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\n# Data path\nSPLIT_FILE_PATH = '/home/gon/Desktop/mlb-youtube-master/data/mlb-youtube-segmented.json'\nNEGATIVE_SPLIT_FILE_PATH = '/home/gon/Desktop/mlb-youtube-master/data/mlb-youtube-negative.json'\nSAVE_DIR = '/home/gon/Desktop/save_dir'\nROOT_PATH = '/home/gon/Desktop/sampled_video' # Sub-sampled Video Folder\nMODEL_CHECK_POINT = '/home/gon/Desktop/model_check'\n\n# Model Hyper-parameter\nBATCH_SIZE = 6\nEPOCHS = 5\nMAX_FRAME_LENGTH = 16\nLSTM_DIM = 512\nIMG_SIZE = (224, 224, 3)\nMOBILENET_TRAINABLE = False\n\n# Model ChdeckPoints\ncheckpointer = ModelCheckpoint(\n filepath=os.path.join('dataset', 'checkpoints', 'Resnet50V2.{epoch:03d}-{val_loss:.2f}.hdf5'),\n verbose=1,\n save_best_only=True\n)\n\n# Model EarlyStopper\nearly_stopper = EarlyStopping(patience=10)\n\n\nclass DataLoader(Sequence):\n \"\"\"\n Push Data into CNN for Feature Extraction\n\n :return 5+D Tensor [B, N, H, W, C], label\n \"\"\"\n def __init__(self, dataset, batch_size, max_length, image_size, shuffle=False):\n self.data_list = np.arange(len(dataset))\n self.indexex = np.arange(len(dataset) - batch_size)\n self.dataset = dataset\n self.batch_size = batch_size\n self.max_length = max_length\n self.img_size = image_size\n self.shuffle = shuffle\n self.on_epoch_end()\n\n def on_epoch_end(self):\n self.indexes = np.arange(len(self.data_list))\n if self.shuffle is True:\n np.random.shuffle(self.indexes)\n\n def __data__generation(self, temp_data_list):\n \"\"\"\n Generate data containing batch_size samples\n\n :param temp_data_list:\n :return Frames [B, N, H, W, C], labels(one hot):\n \"\"\"\n # Initialize\n batch_input = np.empty([self.batch_size, self.max_length, self.img_size[0], self.img_size[1], self.img_size[2]])\n batch_label = [None] * self.batch_size\n\n # Data generation\n for i, frames in enumerate(temp_data_list):\n # frames[0] = frames, frames[1] = labels, frames[2] = vid\n batch_input[i, ] = frames[0]\n batch_label[i] = frames[1]\n\n batch_input = tf.convert_to_tensor(batch_input, dtype=tf.float32) # Convert ndarry to tensor\n batch_label = tf.convert_to_tensor(batch_label)\n\n return batch_input, batch_label\n\n def __len__(self):\n return int(np.floor(len(self.data_list) / self.batch_size)) # 4665 > 4640, drop 25\n\n def __getitem__(self, index):\n \"\"\"\n Generate one Batch of Data\n\n :param index:\n :return:\n \"\"\"\n # Generate Batch Indexes\n indexes = self.indexes[index * self.batch_size:(index+1) * self.batch_size]\n\n # Find Batch list\n temp_data_list = [self.dataset[k] for k in indexes]\n\n # Generate Data\n x, y = self.__data__generation(temp_data_list)\n\n return x, y\n\n\ndef loss(model, x, y, training):\n y_ = model(x, training=training)\n loss_ = loss_object(y_true=y, y_pred=y_)\n\n # Masking\n # mask = tf.math.logical_not(tf.math.equal(x, 0))\n # loss_ *= mask\n\n # return tf.reduce_mean(loss_)\n return loss_object(y_true=y, y_pred=y_)\n\n\ndef grad(model, x, y):\n with tf.GradientTape() as tape:\n loss_value = loss(model, x, y, training=True)\n\n return loss_value, tape.gradient(loss_value, model.trainable_variables)\n\n\ndef train(model, dataloader, optimizer, num_epochs=EPOCHS):\n start_time = time.time()\n print(\"\\nModel training Started in : [%s]\" % timer())\n\n train_loss_results = []\n train_accuracy_results = []\n training_total_time = []\n\n for epoch in range(num_epochs):\n print('\\n\\n Epoch {} / {}'.format(epoch, num_epochs - 1))\n print('-' * 20)\n\n epoch_loss_avg = tf.keras.metrics.Mean()\n epoch_accuracy = tf.keras.metrics.CategoricalAccuracy()\n\n for x, y in dataloader:\n loss_value, grads = grad(model, x, y)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n # Track Progress\n epoch_loss_avg.update_state(loss_value)\n epoch_accuracy.update_state(y, model(x, training=True))\n\n print(\"Current loss : {}\".format(loss_value))\n\n end_time = time.time() - start_time\n print(\"\\nModel training ended in : [%s]\" % timer())\n\n train_loss_results.append(epoch_loss_avg.result())\n train_accuracy_results.append(epoch_accuracy.result())\n training_total_time.append(end_time)\n\n training_total_time = np.sum(training_total_time)\n\n return train_loss_results, train_accuracy_results, training_total_time\n\n\ndef timer():\n current_time_info = time.strftime('%c', time.localtime(time.time()))\n return current_time_info\n\n\n# Data pre-processing\ntrain_dataset = CreateMLBYoutubeDataset(SPLIT_FILE_PATH, \"training\", ROOT_PATH, MAX_FRAME_LENGTH)\ntest_dataset = CreateMLBYoutubeDataset(SPLIT_FILE_PATH, \"testing\", ROOT_PATH, MAX_FRAME_LENGTH)\n\n# Define Dataloader\ntrain_dataloader = DataLoader(train_dataset, BATCH_SIZE, MAX_FRAME_LENGTH, IMG_SIZE)\ntest_dataloader = DataLoader(test_dataset, BATCH_SIZE, MAX_FRAME_LENGTH, IMG_SIZE)\n\n# Phase\ndatasets = {'train': train_dataset, 'test': test_dataset}\ndataloaders = {'train': train_dataloader, 'test': test_dataloader}\n\n# Define Model layers With Functional API\nmodel_input = tf.keras.Input(shape=(16, 224, 224, 3), name=\"video_frame\")\nmasking_layer = layers.Masking()\nfeature_extraction_layer = tf.keras.applications.MobileNetV2(weights='imagenet', include_top=True)\nfeature_extraction_layer.trainbable = MOBILENET_TRAINABLE # Freeze weight\nlstm_layer = layers.LSTM(LSTM_DIM)\ndense_layer_1 = layers.Dense(256, activation='relu')\ndense_layer_2 = layers.Dense(8, activation='softmax')\n\n# Process\nx = masking_layer(model_input)\nx = tf.reshape(x, [-1, 224, 224, 3], name=\"5D_to_4D_Tensor\")\nx = feature_extraction_layer(x)\nx = tf.reshape(x, [BATCH_SIZE, MAX_FRAME_LENGTH, 1000], name=\"Segment_feature\")\nx = lstm_layer(x)\nx = dense_layer_1(x)\nx = dense_layer_2(x)\n\n# Model Complile\nmodel = tf.keras.Model(model_input, x)\nmodel.summary()\n\n# Model parameter\nloss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)\n\n\n# model training with iteration\ntrain_loss, train_acc, training_time = train(model, train_dataloader, optimizer)\nprint(\"train_loss : {}, train_acc : {}, train_time : {}\".format(train_loss, train_acc, training_time))\n\ndef sample_loss_calc():\n # just for back up\n\n # Sample Loss function\n l = loss(model, train_dataloader[0], training=False)\n print(\"Loss test : {}\".format(l))\n\n x, y = train_dataloader[0]\n y_ = model(x, training=False)\n loss_ = loss_object(y_true=y, y_pred=y_)\n\n # Masking\n mask = tf.math.logical_not(tf.math.equal(x, 0))\n loss_ += mask\n\n\n\n\n # loss, grad\n loss_value, grads = grad(model, train_dataloader[0])\n print(\"\\nStep : {}, Initial Loss : {}\".format(optimizer.iterations.numpy(),\n loss_value.numpy()))\n\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n print(\"\\nStep : {}, Loss : {}\".format(optimizer.iterations.numpy(),\n loss(model, train_dataloader[0], training=True).numpy()))\n","sub_path":"old_Files/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"10922668","text":"import datetime\nimport uuid\nfrom buildbot.plugins import util, steps\nfrom buildbot.process.buildstep import ShellMixin\nfrom buildbot.process.results import SKIPPED\nfrom buildbot.steps.shell import ShellCommand\nfrom buildbot.steps.shellsequence import ShellSequence\nfrom buildbot.steps.trigger import Trigger\nfrom twisted.internet import defer\nfrom maxscale.builders.support import support\nfrom maxscale.change_source.maxscale import get_test_set_by_branch\nfrom maxscale import workers\nfrom enum import IntEnum\nfrom maxscale.config import constants\n\n\ndef cloneRepository():\n \"\"\"Clone MaxScale repository using default configuration options\"\"\"\n return [steps.Git(\n name=util.Interpolate(\"Clone repository '%(prop:repository)s', branch '%(prop:branch)s'\"),\n repourl=util.Property('repository'),\n branch=util.Property('branch'),\n mode='incremental',\n haltOnFailure=True)]\n\n\ndef cleanBuildDir():\n \"\"\"Clean the build directory after the worker have completed the task\"\"\"\n return [steps.ShellCommand(\n name=\"Clean build directory using 'git clean -fd'\",\n command=[\"rm\", \"-rf\", util.Property('builddir')],\n alwaysRun=True)]\n\n\ndef configureMdbciVmPathProperty():\n \"\"\"Configure the MDBCI_VM_PATH property\"\"\"\n buildSteps = getWorkerHomeDirectory()\n configureMdbciProperty = steps.SetProperty(\n name=\"Set MDBCI_VM_PATH property to $HOME/vms\",\n property=\"MDBCI_VM_PATH\",\n value=util.Interpolate(\"%(prop:HOME)s/vms\"),\n hideStepIf=True,\n )\n\n buildSteps.append(configureMdbciProperty)\n return buildSteps\n\n\ndef runMdbciCommand(name, *command):\n \"\"\"Run the MDBCI with the specified command\"\"\"\n return steps.ShellCommand(\n name=name,\n command=[util.Interpolate(\"%(prop:HOME)s/mdbci/mdbci\"), *command],\n timeout=1800\n )\n\n\ndef generateMdbciRepositoryForTarget():\n \"\"\"Generate repository configuration for the target specified by the property target\"\"\"\n return runMdbciCommand(\n util.Interpolate(\"Generate new repo descriptions for %(prop:target)s\"),\n \"generate-product-repositories\", \"--product\", \"maxscale_ci\", \"--product-version\", util.Property(\"target\")\n )\n\n\ndef getWorkerHomeDirectory():\n \"\"\"Capture worker home directory into the HOME property\"\"\"\n return [steps.SetPropertiesFromEnv(\n name=\"Get HOME variable from the worker into build property\",\n hideStepIf=True,\n variables=[\"HOME\"])]\n\n\ndef shouldDestroyVirtualMachines(step):\n \"\"\"Helper method that checks whether the VM destruction step should be run or not\"\"\"\n if step.getProperty(\"try_already_running\") == \"yes\" or step.getProperty(\"do_not_destroy_vm\") == \"yes\":\n return False\n return True\n\n\ndef destroyVirtualMachine(configurationName=util.Property(\"mdbciConfig\")):\n \"\"\"Destroy virtual machine if it was not destroyed after the build\"\"\"\n return downloadAndRunScript(\n scriptName=\"destroy_vm.py\",\n extraFiles=[\"common.py\"],\n args=[\"--configuration-name\", configurationName],\n name=\"Destroy leftover VMs using MDBCI\",\n alwaysRun=True,\n doStepIf=shouldDestroyVirtualMachines,\n )\n\n\ndef destroyAllConfigurations(configurationsPath):\n \"\"\"Destroy all configurations in the specified path and remove it\"\"\"\n return downloadAndRunScript(\n scriptName=\"destroy_vm.py\",\n extraFiles=[\"common.py\"],\n args=[\"--destroy-all\", \"--configuration-dir\", configurationsPath],\n alwaysRun=True,\n name=\"Destroy VMs created by the system tests\",\n doStepIf=shouldDestroyVirtualMachines,\n )\n\n\ndef save_env_to_property(rc, stdout, stderr):\n ''' Function used as the extrat_fn function for SetProperty class\n This takes the output from env command and creates a dictionary of\n the environment, the result of which is stored in a property names\n env'''\n if not rc:\n env_list = [l.strip() for l in stdout.split('\\n')]\n env_dict = {l.split('=', 1)[0]: l.split('=', 1)[1] for l in\n env_list if len(l.split('=', 1)) == 2}\n return {'env': env_dict}\n\n\n@util.renderer\ndef clean_workspace_command(props):\n return ['git', 'clean', '-fd']\n\n\nclass SetDefaultPropertiesStep(ShellMixin, steps.BuildStep):\n name = 'Set default properties'\n\n def __init__(self, default_properties, **kwargs):\n self.default_properties = default_properties\n kwargs = self.setupShellMixin(kwargs, prohibitArgs=['command'])\n steps.BuildStep.__init__(self, **kwargs)\n\n @defer.inlineCallbacks\n def run(self):\n for property_name, value in self.default_properties.items():\n if self.getProperty(property_name) is None:\n self.setProperty(\n property_name,\n value,\n 'setDefaultProperties'\n )\n cmd = yield self.makeRemoteShellCommand(\n command=['echo', \"Set default property: {}={}\".format(property_name, value)])\n yield self.runCommand(cmd)\n defer.returnValue(0)\n\n\nclass StdoutShellCommand(ShellCommand):\n \"\"\"\n Runs single shell command on a remote worker\n and outputs stdout into the stdout\n \"\"\"\n def __init__(self, *args, **kwargs):\n ShellCommand.__init__(self, *args, collectStdout=True, **kwargs)\n\n def commandComplete(self, cmd):\n self.addCompleteLog('stdout', cmd.stdout)\n\n\ndef getFormattedDateTime(format):\n \"\"\"\n Creates renderer which return formatted datetime\n :param format: format of datetime string\n :return: rendered for datetime\n \"\"\"\n @util.renderer\n def formatDateTime(properties):\n return datetime.datetime.now().strftime(format)\n\n return formatDateTime\n\n\nclass TargetInitOptions(IntEnum):\n GENERATE = 1\n SET_FROM_BRANCH = 2\n\n\ndef initTargetProperty():\n \"\"\"\n Sets 'target' property of the build to:\n - -buildbot- if it isn't set yet or property 'targetInitMode' is TargetInitOptions.GENERATE;\n - if property 'targetInitMode' is TargetInitOptions.SET_FROM_BRANCH.\n :return: list of steps\n \"\"\"\n return [\n steps.SetProperty(\n name=util.Interpolate(\"Set 'target' property\"),\n property=\"target\",\n value=util.Interpolate(\"%(prop:branch)s-buildbot-%(kw:startTime)s\",\n startTime=getFormattedDateTime(\"%Y-%b-%d-%H-%M-%S\")),\n doStepIf=lambda step: step.build.getProperty('target') is None and\n step.build.getProperty('targetInitMode') is None or\n step.build.getProperty('targetInitMode') == TargetInitOptions.GENERATE,\n hideStepIf=lambda results, s: results == SKIPPED\n ),\n steps.SetProperty(\n name=util.Interpolate(\"Set 'target' property\"),\n property=\"target\",\n value=util.Property(\"branch\"),\n doStepIf=lambda step: step.build.getProperty('targetInitMode') == TargetInitOptions.SET_FROM_BRANCH,\n hideStepIf=lambda results, s: results == SKIPPED\n )\n ]\n\n\nclass NameInitOptions(IntEnum):\n GENERATE = 1\n KEEP_ORIGINAL = 2\n\n\ndef initNameProperty():\n \"\"\"\n Sets 'name' property of the build to:\n - -buildbot- if it isn't set yet or property 'nameInitMode' is NameInitOptions.GENERATE;\n - if property 'nameInitMode' is NameInitOptions.KEEP_ORIGINAL.\n :return: list of steps\n \"\"\"\n return [\n steps.SetProperty(\n name=util.Interpolate(\"Set 'name' property\"),\n property=\"name\",\n value=util.Interpolate(\"%(prop:branch)s-buildbot-%(kw:startTime)s\",\n startTime=getFormattedDateTime(\"%Y-%b-%d-%H-%M-%S\")),\n doStepIf=lambda step: step.build.getProperty('name') is None and\n step.build.getProperty('nameInitMode') is None or\n step.build.getProperty('nameInitMode') == NameInitOptions.GENERATE,\n hideStepIf=lambda results, s: results == SKIPPED\n )\n ]\n\n\ndef assignWorker(_builder, workerForBuilderList, buildRequest):\n \"\"\"\n Returns available worker for a builder\n filtered by the scheduler which triggered build and by the giver task-host mapping\n See 'nextWorker' at http://docs.buildbot.net/current/manual/configuration/builders.html\n \"\"\"\n workerNames = workers.workersOnHosts(buildRequest.properties.getProperty(\"host\", default=\"\"),\n *buildRequest.properties.getProperty(\"buildHosts\", default=[]))\n for workerForBuilder in workerForBuilderList:\n workerName = workerForBuilder.worker.workername\n if workerForBuilder.isAvailable() and workerName in workerNames:\n buildRequest.properties.setProperty(\"host\", workers.workerToHostMap()[workerName],\n \"Assign worker\")\n return workerForBuilder\n\n\ndef assignBestHost(hostPool):\n\n def selectWorkersFromHostPool(builder, workersForBuilders, buildRequest):\n \"\"\"\n Returns availble workersForBuilders on a host with the least tasks running\n :param builder: Builder for this task\n :param workersForBuilders: List of workerForBuilders\n :param buildRequest: build request\n :return: List of workersForBuilders for a specific host\n \"\"\"\n # Proceed directly to worker assignment if host is specified\n if buildRequest.properties.getProperty(\"host\"):\n return assignWorker(buildRequest, workersForBuilders, buildRequest)\n\n workerToHostMap = workers.workerToHostMap()\n hostToWorkersMap = {}\n for name, host in workerToHostMap.items():\n if host in hostPool or not hostPool:\n hostToWorkersMap[host] = hostToWorkersMap.get(host, []) + [name]\n workersForBuilders = list(filter(lambda wfb: workerToHostMap[wfb.worker.workername] in hostToWorkersMap,\n workersForBuilders))\n availableWFB = collectAvailableWorkers(workersForBuilders, workerToHostMap, hostToWorkersMap)\n return assignWorker(builder, availableWFB, buildRequest)\n\n return selectWorkersFromHostPool\n\n\ndef findBestHost(workersForBuilders, workerToHostMap, hostToWorkersMap):\n \"\"\"\n Finds host with least amount of tasks running on a builder\n :param workersForBuilders: List of workerForBuilders\n :param workerToHostMap: Map where each worker contains its host\n :param hostToWorkersMap: Map where each host contains its workers\n :return: Name of hosts\n \"\"\"\n occupiedWorkers = dict(map(lambda item: (item[0], len(item[1])), hostToWorkersMap.items()))\n for wfb in workersForBuilders:\n if wfb.isAvailable():\n occupiedWorkers[workerToHostMap[wfb.worker.workername]] -= 1\n bestHost = sorted(occupiedWorkers.items(), key=lambda item: item[1])[0]\n return bestHost[0]\n\n\ndef collectAvailableWorkers(workersForBuilders, workerToHostMap, hostToWorkersMap):\n \"\"\"\n Collects available workers from the least loaded host\n :param workersForBuilders: List of workerForBuilders\n :param workerToHostMap: Map where each worker contains its host\n :param hostToWorkersMap: Map where each host contains its workers\n :return: List of available workers on the best host\n \"\"\"\n availableWFB = []\n bestHost = findBestHost(workersForBuilders, workerToHostMap, hostToWorkersMap)\n for wfb in workersForBuilders:\n if wfb.worker.workername in hostToWorkersMap[bestHost]:\n availableWFB.append(wfb)\n return availableWFB\n\n\ndef generateRepositories():\n \"\"\"\n Runs 'mdbcu generate-product-repositories' command on a worker\n :return: list of steps\n \"\"\"\n return [steps.ShellCommand(\n name=\"Generate product repositories\",\n command=[util.Interpolate(\"%(prop:HOME)s/mdbci/mdbci\"), \"generate-product-repositories\"],\n haltOnFailure=True\n )]\n\n\ndef syncRepod():\n \"\"\"\n Creates steps for running rsync to remote workers\n :return: list of steps\n \"\"\"\n return [RsyncShellSequence(name=\"Synchronizing ~/.config/mdbci/repo.d among workers\",\n haltOnFailure=False, flunkOnFailure=False, flunkOnWarnings=False)]\n\n\nclass RsyncShellSequence(ShellSequence):\n \"\"\"\n rsync ~/.config/mdbci/repo.d directory from current worker\n to every other unique worker's host\n \"\"\"\n def createRsyncSequence(self, hosts):\n \"\"\"\n Creates a list of shell commands for synchronization of .config directory on each given host\n :param hosts: List of host addresses\n :return: List with rsync shell command for each host\n \"\"\"\n return [util.ShellArg(command=\"rsync -r ~/.config/mdbci/repo.d/ -e \"\n \"'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' \"\n \"{}@{}:~/.config/mdbci/repo.d\".format(constants.HOST_USERS.get(host), constants.HOST_FULL.get(host)),\n logfile=\"rsync to {}\".format(host)) for host in hosts]\n\n def getRemoteWorkersHosts(self):\n \"\"\"\n Creates a list of unique hosts which holds running workers excluding host of the current worker\n :return: List of host addresses\n \"\"\"\n hosts = set()\n currentHost = None\n for worker in workers.WORKER_CREDENTIALS:\n if worker[\"name\"] != self.getProperty(\"workername\"):\n if self.master.workers.connections.get(worker[\"name\"]):\n hosts.add(worker[\"host\"])\n else:\n currentHost = worker[\"host\"]\n hosts.discard(currentHost)\n return hosts\n\n def run(self):\n hosts = self.getRemoteWorkersHosts()\n self.commands = self.createRsyncSequence(hosts)\n if not hosts:\n self.descriptionDone = \"No remote hosts found\"\n return self.runShellSequence(self.commands)\n\n\ndef downloadScript(scriptName, hideStepIf=True, **kwargs):\n \"\"\"Downloads script with the given name from scripts directory to the current worker\"\"\"\n return [steps.FileDownload(\n name=\"Transferring {} to worker\".format(scriptName),\n mastersrc=\"maxscale/builders/support/scripts/{}\".format(scriptName),\n workerdest=util.Interpolate(\"%(prop:builddir)s/scripts/{}\".format(scriptName)),\n mode=0o755,\n hideStepIf=hideStepIf,\n **kwargs\n )]\n\n\ndef writeBuildResultsToDatabase(**kwargs):\n \"\"\"Call the script to save results to the database\"\"\"\n return [steps.SetPropertyFromCommand(\n name=\"Save test results to the database\",\n command=[util.Interpolate(\"%(prop:builddir)s/scripts/write_build_results.py\"),\n \"--run-id\", util.Property(\"buildId\"),\n util.Property(\"jsonResultsFile\"),\n \"--database-info\", util.Secret(\"dataBaseInfo.json\")],\n extract_fn=extractDatabaseBuildid,\n **kwargs)]\n\n\ndef extractDatabaseBuildid(rc, stdout, stderr):\n keyPhrase = \"LAST_WRITE_BUILD_RESULTS_ID\"\n for line in stdout.split(\"\\n\"):\n if line.startswith(keyPhrase):\n return {keyPhrase: line[len(keyPhrase) + 2:]}\n return {}\n\n\ndef remoteRunScriptAndLog(scriptName, logFile, resultFile, **kwargs):\n \"\"\"\n Runs shell script which name is given in a property script_name\n and save results to the log file\n \"\"\"\n service_script = \"run_script_and_log.py\"\n actions = downloadScript(service_script)\n actions.append(\n steps.ShellCommand(command=[\n util.Interpolate(\"%(prop:builddir)s/scripts/{script}\".format(script=service_script)),\n \"--script_name\", scriptName,\n \"--log_file\", logFile,\n \"--result_file\", resultFile],\n timeout=1800,\n **kwargs)\n )\n return actions\n\n\ndef writeBuildsResults():\n \"\"\"Downloads and runs script for saving build results to database\"\"\"\n return downloadScript(\"write_build_results.py\", alwaysRun=True) + writeBuildResultsToDatabase(alwaysRun=True)\n\n\ndef downloadAndRunScript(scriptName, extraFiles=(), args=(), **kwargs):\n \"\"\"\n Downloads the script to remote location and executes it\n :param: scriptName name of the local script to execute\n :param: extraFiles name of extra files that should be transferred to the remote host\n :param: args list of arguments to pass to the remote script\n :param: kwargs parameters of the executeStep\n \"\"\"\n taskSteps = []\n allFiles = list(extraFiles)\n allFiles.append(scriptName)\n for file in allFiles:\n taskSteps.append(steps.FileDownload(\n name=\"Transferring {} to worker\".format(file),\n mastersrc=\"maxscale/builders/support/scripts/{}\".format(file),\n workerdest=util.Interpolate(\"%(prop:builddir)s/scripts/{}\".format(file)),\n hideStepIf=True,\n alwaysRun=True,\n mode=0o755,\n ))\n remoteScriptName = util.Interpolate(\"%(prop:builddir)s/scripts/{}\".format(scriptName))\n taskSteps.append(steps.ShellCommand(\n command=[remoteScriptName, *args],\n timeout=1800,\n **kwargs\n ))\n return taskSteps\n\n\n@util.renderer\ndef renderTestSet(properties):\n \"\"\"\n Returns test set value if it's present, otherwise returns test set filtered by branch\n :param properties:\n :return: Test set\n \"\"\"\n return properties.getProperty(\"test_set\") \\\n or get_test_set_by_branch(properties.getProperty('branch'))\n\n\nclass BuildAllTrigger(Trigger):\n \"\"\"\n Implements custom trigger step which triggers task on a virtual builder for every marked checkbox\n \"\"\"\n def getSchedulersAndProperties(self):\n \"\"\"\n Overrides method getSchedulersAndProperties of Trigger class\n so that it returns a scheduler for every marked checkbox\n :return: List which contains schedulers for every marked checkbox\n \"\"\"\n schedulers = []\n for checkboxName, checkboxValue in self.set_properties[\"build_box_checkbox_container\"].items():\n if checkboxValue:\n propertiesToSet = {}\n propertiesToSet.update(self.set_properties)\n propertiesToSet.update({\"box\": checkboxName})\n propertiesToSet.update({\"virtual_builder_name\":\n \"{}_{}\".format(self.set_properties[\"virtual_builder_name\"], checkboxName)})\n for schedulerName in self.schedulerNames:\n schedulers.append({\n \"sched_name\": schedulerName,\n \"props_to_set\": propertiesToSet,\n \"unimportant\": schedulerName in self.unimportantSchedulerNames\n })\n\n return schedulers\n\n\ndef runSshCommand(name='', host=\"\", command=(), timeout=1800, **kwargs):\n \"\"\"\n Run command on the remote server\n :param name: name of the command to show to end user\n :param host: the host definition\n :param command: a set of separate command parts\n :param timeout:\n :param kwargs: different arguments to pass to ShellCommand\n :return: ShellCommand configured to run remote ssh command\n \"\"\"\n sshCommand = [\"ssh\", \"-o\", \"StrictHostKeyChecking=no\", \"-o\", \"UserKnownHostsFile=/dev/null\", host]\n sshCommand.extend(command)\n return steps.ShellCommand(\n name=name,\n command=sshCommand,\n timeout=timeout,\n **kwargs\n )\n\n\ndef rsyncViaSsh(name=\"\", local=\"\", remote=\"\", timeout=1800, **kwargs):\n \"\"\"\n Run rsync to put directory to the remote server\n :param name: Command name\n :param local: path to the local folder\n :param remote: path to the remote folder that\n :param timeout: timeout for the service\n :param kwargs: misc arguments to ShellCommand\n :return: ShellCommand configured to run rsync remote ssh\n \"\"\"\n rsyncCommand = [\"rsync\", \"-avz\", \"--progress\", \"-e\",\n \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null\", local, remote]\n return steps.ShellCommand(\n name=name,\n command=rsyncCommand,\n timeout=timeout,\n **kwargs\n )\n\n\ndef shouldGenerateBuildId(properties):\n return not (properties.hasProperty(\"buildId\")) or not properties.getProperty(\"buildId\")\n\n\n@util.renderer\ndef generateBuildId(properties):\n if not shouldGenerateBuildId(properties):\n return {}\n\n return {\n \"buildId\": str(uuid.uuid4())\n }\n\n\ndef determineBuildId():\n return steps.SetProperties(\n name=\"Automatically set buildId property\",\n doStepIf=shouldGenerateBuildId,\n properties=generateBuildId,\n )\n\n\ndef shouldAppendTestRunId(properties):\n return not (properties.hasProperty(\"appendTestRunId\")) or properties.getProperty(\"appendTestRunId\")\n\n\n@util.renderer\ndef generateTestRunId(properties):\n if not shouldAppendTestRunId(properties):\n return {}\n\n return {\n \"name\": \"{}-{}-{}\".format(properties.getProperty(\"name\"), properties.getProperty(\"buildername\"),\n properties.getProperty(\"buildnumber\"))\n }\n\n\ndef determineTestRunName():\n return steps.SetProperties(\n name=\"Add test run id to the name of the build\",\n doStepIf=shouldAppendTestRunId,\n properties=generateTestRunId,\n )\n","sub_path":"master/maxscale/builders/support/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":21373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"80530913","text":"s = raw_input()\ns = list(s) #Quebra string\n\nfor x in range(len(s)):\n\tif s[x].isupper():\n\t\ts[x] = s[x].lower()\n\telif s[x].islower():\n\t\ts[x] = s[x].upper()\ns = ''.join(s) #Junta string\n\nprint(s)","sub_path":"HackerEarth/Wellington/Python/Basic Programming/Input Output/Basic of Input Output/Toogle_String.py","file_name":"Toogle_String.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"63382407","text":"str1 = 'a few words'\nstr2 = \"a few words\"\nstr3 = '''a few words'''\n\nmany_line_string = '''This is line 1.\nThis is the second line.\nAnd this is line three.'''\n\n\nsentence1 = '''The processive form of 'it' is 'its'\n-\"it's\"is an abbreviation of \"it is\".'''\n\nsentence2 = '''A \"\" is three 'but two' do not make a \".'''\n\naseq = \"abcd\"\n\nfor elem in aseq:\n print(elem)","sub_path":"labs/lab05/5.0 str.py","file_name":"5.0 str.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"425846949","text":"from .base_trigger import BasePollingTrigger\nimport datetime\nimport pathlib\nfrom ..exceptions import MissingInformationError\n\n\nclass FileWatchTrigger(BasePollingTrigger):\n \"\"\"\n Watch for the presence of a file; the filename can contain\n markers for date formatting.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if 'filename' not in kwargs:\n raise MissingInformationError(\"FileWatchTrigger requires 'filename' parameter\")\n self.filename = kwargs['filename']\n self.last_filename = None\n if self.label:\n self.label = self.label + \" - \" + self.filename\n else:\n self.label = self.filename\n\n def nudge(self):\n # build filename, \n filename = datetime.datetime.today().strftime(self.filename)\n # does the filename exist\n path = pathlib.Path(filename)\n if path.is_file() and path != self.last_filename:\n self.on_event(filename)\n self.last_filename = path\n return True\n return False\n","sub_path":"caffael/triggers/file_watch_trigger.py","file_name":"file_watch_trigger.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"174795948","text":"from models.WaterSports import WaterSports\n\n\nclass Swimming(WaterSports):\n\n def __init__(self, name, amount_of_members,\n need_of_sport_equipment, is_olympic_sport,\n purpose_of_sport, rating, pool_length):\n super(Swimming, self).__init__(name, amount_of_members,\n need_of_sport_equipment, is_olympic_sport,\n purpose_of_sport, rating)\n self.pool_length = pool_length\n\n def __str__(self):\n return \"Swimming\" + super(WaterSports, self).__str__() \\\n + \", pool length=\" + str(self.pool_length) + \\\n + \"}\"\n","sub_path":"venv/models/Swimming.py","file_name":"Swimming.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"114489022","text":"#!/usr/bin/env python\n\nimport sys\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Activation, Dropout, BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.regularizers import l2\n\n\ndef save_keras_model(model_name, model, history):\n model_fname = model_name + '.h5'\n history_fname = model_name + '_history.pkl'\n model.save(model_fname)\n with open(history_fname, 'wb') as fh:\n pickle.dump(history, fh)\n\n\ndef load_keras_model(model_name):\n model_fname = model_name + '.h5'\n history_fname = model_name + '_history.pkl'\n model = load_model(model_fname)\n with open(history_fname, 'rb') as fh:\n history = pickle.load(fh)\n\n return model, history\n\n\ndef define_keras_model():\n # create the model\n model = Sequential()\n init = 'he_uniform'\n reg = l2(1e-3)\n\n # input layer\n model.add(Dense(units=180, input_dim=8,\n kernel_initializer=init, kernel_regularizer=reg))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n # model.add(Dropout(0.5))\n\n # first hidden layer\n model.add(Dense(units=120, kernel_initializer=init, kernel_regularizer=reg))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n # model.add(Dropout(0.2))\n\n # first hidden layer\n model.add(Dense(units=60, kernel_initializer=init, kernel_regularizer=reg))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n # model.add(Dropout(0.1))\n\n # output layer\n model.add(Dense(units=1, kernel_initializer=init))\n model.add(BatchNormalization())\n model.add(Activation('sigmoid'))\n\n # optimizer params\n optim = Adam(lr=1e-2, decay=1e-6)\n\n # compile the model\n model.compile(loss='binary_crossentropy',\n optimizer=optim, metrics=['accuracy'])\n return model\n\nif __name__ == '__main__':\n model_name = 'pima_model'\n dataset = np.genfromtxt('data/pima-indians-diabetes.csv', delimiter=',')\n\n # get the input and output variables\n X = dataset[:, 0:8]\n y = dataset[:, 8]\n\n # split into train and test sets\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n\n if len(sys.argv) == 1:\n # define keras model\n model = define_keras_model()\n\n # fit the model with training data with a 0.2 val split\n hist = model.fit(X_train, y_train, epochs=300,\n batch_size=20, verbose=0, validation_split=0.2)\n history = hist.history\n save_keras_model(model_name, model, history)\n else:\n model, history = load_keras_model(model_name)\n\n # evaluate the model on the test set\n scores = model.evaluate(X_test, y_test)\n\n # gather all the information regarding loss and accuracy\n met = ['Loss', 'Accuracy']\n spec = ['Training', 'Validation', 'Test']\n results = np.array([[history['loss'][-1], history['acc'][-1]],\n [history['val_loss'][-1], history['val_acc'][-1]],\n [scores[0], scores[1]]])\n results = np.around(results, 3)\n\n # print out a nice table of the information\n row_format = '{:>15}' * (len(met) + 1)\n print(\"\\n\\n\")\n print(row_format.format(\"\", *met))\n for s, r in zip(spec, results):\n print(row_format.format(s, *r))\n print(\"\\n\")\n\n # summarize history for accuracy and loss\n plt.subplot(1, 2, 1)\n plt.plot(history['loss'])\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper right')\n\n plt.subplot(1, 2, 2)\n plt.plot(history['acc'])\n plt.plot(history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper right')\n plt.savefig('acc_loss.pdf')\n plt.show()\n","sub_path":"pima-indians/pima_indians.py","file_name":"pima_indians.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"347003784","text":"#score = input(\"Enter Score in the interval <0,1>: \")\ndef average1(score):\n s=float(score)\n i=True\n while i==True:\n try:\n if ((s>=0) & (s<=1.0)): \n if s>=0.9: print(\"A\")\n elif s>=0.8: print(\"B\")\n elif s>=0.7: print(\"C\")\n elif s>=0.6: print(\"D\")\n elif s<0.6:print(\"F\")\n i=False\n return\n except: \n print(\"Value is out of range\")\n i=False\n return \n\n\n \n ","sub_path":"Python/average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"259591891","text":"from .forms import ApprovalForm\nfrom django.shortcuts import render\n# from .forms import MyForm\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import api_view\nfrom django.core import serializers\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.http import JsonResponse\nfrom django.contrib import messages\nfrom rest_framework.parsers import JSONParser\nfrom .models import approvals\nfrom .serializers import approvalSerializers\nimport pickle\nimport joblib\nimport json\nimport numpy as np\nimport pandas as pd\nimport os\n\n\n# Create your views here.\nclass ApprovalsView(viewsets.ModelViewSet):\n queryset = approvals.objects.all()\n serializer_class = approvalSerializers\n\n# @api_view([\"POST\"])\ndef approvereject(unit):\n try:\n if float(unit['security_deposit']) == 0.0:\n return [2] #Rejected\n if(float(unit['security_deposit']) >= float(unit['loan_ammount'])):\n return [3] #Approved\n if float(unit['security_deposit']) / float(unit['loan_ammount']) < 0.5:\n return [4] #Rejected\n if float(unit['security_deposit']) / float(unit['loan_ammount']) >= 0.5 and float(unit['security_deposit']) / float(unit['loan_ammount']) < 1:\n mdl=joblib.load('../research/loan_model_lr.pkl')\n unit['ratio_de_solvabilite'] = float(unit['passifs_non_courant']) / float(unit['capitaux_propres'])\n unit['ratio_de_rentabilite'] = float(unit['resultat_net']) / float(unit['capitaux_propres'])\n unit['ratio_d_endettement'] = (float(unit['total_bilan']) - float(unit['capitaux_propres'])) / float(unit['total_bilan'])\n unit['marge_nette_sur_vente'] = (float(unit['resultat_net']) / float(unit['chiffre_d_affaires'])) * 100\n x_cols = ['ratio_de_solvabilite', 'ratio_de_rentabilite', 'ratio_d_endettement', 'marge_nette_sur_vente']\n y_pred=mdl.predict(unit[x_cols])\n newdf=pd.DataFrame(y_pred, columns=['Status'])\n #newdf=newdf.replace({1:'Approved', 0:'Rejected'})\n if newdf.values[0][0] == 0 :\n return [newdf.values[0][0], unit['ratio_de_solvabilite'], unit['ratio_de_rentabilite'], unit['ratio_d_endettement'], unit['marge_nette_sur_vente']]\n return [newdf.values[0][0]]\n except ValueError as e:\n return (e.args[0])\n\ndef sctcontact(request):\n if request.method == 'POST':\n form=ApprovalForm(request.POST)\n if form.is_valid():\n sct = form.cleaned_data['sct']\n capitaux_propres = form.cleaned_data['capitaux_propres']\n passifs_non_courant = form.cleaned_data['passifs_non_courant']\n total_bilan = form.cleaned_data['total_bilan']\n stock = form.cleaned_data['stock']\n creance_client = form.cleaned_data['creance_client']\n actif_immobilier = form.cleaned_data['actif_immobilier']\n resultat_net = form.cleaned_data['resultat_net']\n chiffre_d_affaires = form.cleaned_data['chiffre_d_affaires']\n security_deposit = form.cleaned_data['security_deposit']\n loan_ammount = form.cleaned_data['loan_ammount']\n myDict = (request.POST).dict()\n df = pd.DataFrame(myDict, index=[0])\n answer = approvereject(df)\n if answer[0] == 1:\n messages.success(request, 'This loan has been approved')\n if answer[0] == 2:\n messages.success(request, \"This loan has been rejected because there is no security deposit\")\n if answer[0] == 3:\n messages.success(request, \"This loan has been approved, your security deposit covers the loan ammount\")\n if answer[0] == 4:\n messages.success(request, \"This loan has been rejected because your security deposit can't cover the loan ammount\")\n if answer[0] == 0:\n messages.success(request, \"This loan has been rejected because :\\nYour solvency ratio : {}\\nYour profitability ratio : {}\\nYour debt ratio : {}\\nYour net margin on sales {}\".format(str(int(answer[1].values))+'%', str(int(answer[2].values*100))+'%', str(int(answer[3].values*100))+'%', str(int(answer[4].values))+'%'))\n \n form=ApprovalForm()\n\n return render(request, 'myform/exform.html', {'form':form})","sub_path":"FinanciniAPI/MlAPI/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"104431181","text":"''' Work out the first ten digits of the sum of the one-hundred\n50-digit numbers found in the source file '''\n\nfrom euler import src\n\ndef main():\n ''' Sums the numbers and finds the first 10 digits '''\n with open(src+'e013') as data:\n num = [line.rstrip() for line in data]\n num = ''.join(num)\n num = [int(num[n:n+50]) for n in range(len(num), 50)]\n print(str(sum(num))[:10])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"problems/Python/e013.py","file_name":"e013.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"445118922","text":"#!/usr/bin/env python3\n\ndef recursiveGetArray(arr):\n arrInRow=[]\n for element in arr:\n if type(element)==type(list()):\n arrInRow+=recursiveGetArray(element)\n else:\n arrInRow.append(element)\n return arrInRow\n\ndef checkio(arr):\n 'convert all elements in arr in one row'\n result=[]\n for element in arr:\n if type(element)==type(list()):\n result+=recursiveGetArray(element)\n else:\n result.append(element)\n return result\n\nif __name__ == '__main__':\n assert checkio([1,2,3]) == [1,2,3], 'First'\n assert checkio([1,[2,2,2],4]) == [1,2,2,2,4], 'Second'\n assert checkio([[[2]],[4,[5,6,[6],6,6,6],7]])\\\n == [2,4,5,6,6,6,6,6,7], 'Third'\n print('All ok')","sub_path":"Old Library/All-in-row.py","file_name":"All-in-row.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"193298344","text":"from typing import Optional, Union\nfrom lxml.etree import Element, _Element\n\nfrom .namespaces_by_prefix import NAMESPACES_BY_PREFIX\nfrom .set_attribute_to_element import set_attribute_to_element\nfrom .append_children_to_parent_element import append_children_to_parent_element\n\n\ndef create_element(tag: str, attributes: dict[str, str] = {}, children: list[Union[_Element, str]] = [], parent: Optional[_Element] = None, text: str | None = None) -> _Element:\n [namespace_prefix, local_tag] = tag.split(\":\")\n namespace = NAMESPACES_BY_PREFIX[namespace_prefix]\n element = Element(\n \"{\" + namespace + \"}\" + local_tag,\n )\n\n if parent is not None:\n parent.append(element)\n\n if text:\n element.text = text\n\n for attribute_name, attribute_value in attributes.items():\n if attribute_value is not None:\n set_attribute_to_element(element, attribute_name, attribute_value)\n\n append_children_to_parent_element(element, children)\n\n return element","sub_path":"opendocument/src/radium226/opendocument/xml/create_element.py","file_name":"create_element.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"115306878","text":"#!/usr/bin/env python3\nimport http.client as http_client\nimport json\nimport logging\nimport os\nimport requests\nimport time\nfrom datetime import datetime, timedelta\nfrom requests.auth import HTTPDigestAuth\nfrom subprocess import call\nfrom urllib.parse import urljoin\n\nBASE_URL = \"https://mms.mongodb.com/api/public/v1.0\"\nHEADERS = {'Content-type': 'application/json'}\n\n\ndef api_call(path, user, key, json_data=None):\n url = BASE_URL + path\n auth = HTTPDigestAuth(user, key)\n r = None\n if json_data:\n data = json.dumps(json_data)\n r = requests.post(url, auth=auth, headers=HEADERS, data=data)\n else:\n r = requests.get(url, auth=auth, headers=HEADERS)\n\n if r.status_code == 200:\n return r.json()\n else:\n raise Exception(r.json())\n\n\ndef cluster_path(group_id, cluster_id):\n return \"/groups/{0}/clusters/{1}\".format(group_id, cluster_id)\n\n\ndef create_job_point_in_time(user, key, group_id, cluster_id, dt):\n path = cluster_path(group_id, cluster_id) + \"/restoreJobs\"\n dt_string = dt.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n timestamp = {\"timestamp\": {\"date\": dt_string, \"increment\": 0}}\n r = api_call(path, user, key, timestamp)\n\n return r['results'][0]['id']\n\n\ndef get_job(user, key, group_id, cluster_id, job_id):\n prefix = cluster_path(group_id, cluster_id)\n path = prefix + \"/restoreJobs/{0}\".format(job_id)\n return api_call(path, user, key)\n\n\ndef setup_logging():\n logging.basicConfig()\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.propagate = True\n\n if os.environ.get('MMS_DEBUG', 'false') == 'true':\n http_client.HTTPConnection.debuglevel = 1\n logging.getLogger().setLevel(logging.DEBUG)\n requests_log.setLevel(logging.DEBUG)\n\n\ndef main():\n setup_logging()\n\n user = os.environ['MMS_USERNAME']\n key = os.environ['MMS_API_KEY']\n group_id = os.environ['MMS_GROUP_ID']\n cluster_id = os.environ['MMS_CLUSTER_ID']\n\n dt = datetime.now() - timedelta(0, 3000)\n\n job_id = create_job_point_in_time(user, key, group_id, cluster_id, dt)\n\n print(\"Restore job id: {0!s}\".format(job_id))\n\n url = None\n while not url:\n job = get_job(user, key, group_id, cluster_id, job_id)\n if job[\"statusName\"] == \"FINISHED\":\n print(\"Restore job complete.\")\n url = job[\"delivery\"][\"url\"]\n else:\n print(\"Restore job in progress...\")\n time.sleep(10)\n\n print(\"Fetching {0}\".format(url))\n\n call([\"curl\", url, \"-o\", \"snapshot.tar.gz\"])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fetch_snapshot.py","file_name":"fetch_snapshot.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"434983687","text":"# Python program to find current \nfrom datetime import datetime\nimport time\nimport requests, json\n\n# Python mysql DB connect lines\nimport mysql.connector\nimport random\nfrom datetime import datetime\nfrom sample import tb \n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"purna\",\n passwd=\"password\",\n database=\"mydatabase\"\n)\nlatitude=\"13.030087\"\nlongitude=\"77.657284\"\ndatetimes=tb()\n\nmycursor = mydb.cursor()\n\n# Enter your API key here \n#api_key = '96df4121f48572f1743ad4a782a0713e'\n#api_key = 'bb1ceced30d040834df3dd7e645b7cac'\n#api_key= 'd8a69f0567afcca15fe0ed09366f0556'\n#api_key= 'f629365a1f04f6bdb2ea7dad3fe3a1a1'\n#api_key='a530b3ed864a25caa0b496f47e936913'\n#api_key='48ce42e8afea06592ebf24c080b5767b'\n#api_key='626ebe9e2a22536297655f0612d8dbd0'\napi_key='3b81ba46090832322ef1670282ca328b'\n\n# base_url variable to store url \nbase_url = \"https://api.darksky.net/forecast/\"\nfor dtime in datetimes:\n # complete url address \n complete_url = base_url + api_key + \"/\" + latitude +\",\"+ longitude +\",\"+ str(dtime) + \"?\" + \"exclude=currently,flags\"\n print(complete_url)\n # get method of requests module \n # return response object \n response = requests.get(complete_url) \n # json method of response object \n time.sleep(0.5)\n x = response.json() \n\n city_name=x[\"timezone\"]\n current_humidity=x[\"hourly\"][\"data\"][0][\"humidity\"]\n weather_description=\"Bangalore data not available\"\n #weather_description=x[\"daily\"][\"data\"][0][\"icon\"]\n wind_speed=x[\"hourly\"][\"data\"][0][\"windSpeed\"]\n\n ts=int(x[\"hourly\"][\"data\"][0][\"time\"])\n responsedtime=datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\n temp_high = x[\"hourly\"][\"data\"][0][\"temperature\"]\n #temp_low = x[\"daily\"][\"data\"][0][\"temperatureLow\"] \n\n #current_temperature = (temp_high+temp_low)/2\n current_temperature = temp_high\n\n print(\"City Name: \"+city_name+\" Date&Time: \"+responsedtime+\" Wind-speed: \"+str(wind_speed)+\" humidity: \"+str(current_humidity)+\" weather_discription: \"+str(weather_description)+\" current_temperature:\"+str(current_temperature))\n sql = \"\"\"INSERT INTO weatherdata (cityname, datetime, windspeed, humidity, temparature, discription) VALUES (%s,%s,%s,%s,%s,%s)\"\"\"\n val = (city_name, responsedtime, str(wind_speed), str(current_humidity), str(current_temperature), str(weather_description))\n mycursor.execute(sql,val);\n mydb.commit()\n print(mycursor.rowcount, \"record inserted.\")\n time.sleep(0.5)\n \n if responsedtime == \"2002-06-29 05:00:00\" :\n print(\"date%time to breake is : \"+responsedtime)\n break;\n\n# a = input(\"Enter yes/no to continue\")\n# if a==\"yes\":\n# continue\n# elif a==\"no\":\n# break\n\n\n","sub_path":"prac.py","file_name":"prac.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"75028948","text":"import datetime\nimport dbfunctions\nimport pandas as pd\nimport pdb\n\n\nfrom dbfunctions import session\nfrom schema_nyse_dbsetup import Base, Analytics, PriceData\nfrom functools import wraps\n\nouputcode = {0:'Number of data points below minimum threshold',}\n# Look into automating this later on\ntextdate = '2015-05-20'\nrundate = datetime.datetime.strptime(textdate, \"%Y-%m-%d\")\nDAY = datetime.timedelta(days=1)\nWEEK = datetime.timedelta(days=7)\nMONTH = datetime.timedelta(days=30)\nQUARTER = datetime.timedelta(days=90)\nHALFYEAR = datetime.timedelta(days=180)\nYEAR = datetime.timedelta(days=360)\nYEARHALF = datetime.timedelta(days=540)\nTWOYEAR = datetime.timedelta(days=720)\ntenor = [MONTH, QUARTER, HALFYEAR, YEAR, YEARHALF, TWOYEAR]\n\n# Minimum number of data points for each MA tenor\n# Set to 89% for now to account for public holidays\ndef minpoints(duration):\n return int(0.9 * (duration.days - (int(duration.days/7) * 2)))\n\n\n# Operations on time in string format and returns a string\ndef texttimedelta(textdate, delta):\n dateparts = textdate.split('-') # YYYY-mm-dd\n dateparts = [int(d) for d in dateparts]\n tdate = datetime.date(dateparts[0],dateparts[1],dateparts[2]) + delta\n return tdate.strftime(\"%Y-%m-%d\")\n\n\n# Returns the date of the previous business day\ndef prevbizday(cdate):\n query = session.query(PriceData.id).filter((PriceData.dt_close == cdate) & (PriceData.volume != '0')).statement\n nkyou = len(query2pd(query))\n n = 0\n # For now let's just get the date of the previous trading day from the database\n # Avoids having to bother with ensuring that the database records are consistent with the holiday calendar\n while n < 0.9 * nkyou:\n query = session.query(PriceData.dt_close).filter((PriceData.dt_close < cdate) & (PriceData.volume != '0')).order_by(PriceData.dt_close.desc()).limit(1).statement\n pdate = query2pd(query)['dt_close'][0]\n query = session.query(PriceData.id).filter((PriceData.dt_close == pdate) & (PriceData.volume != '0')).statement\n n = len(query2pd(query))\n cdate = pdate\n\n return cdate\n\n\n# The ticker selected for calculation should have recent continuous data of at least a week\n# Returns None if the requirement is not met\n# This decorator could be an overkill. There's no need to check for every tenor point. Maybe convert it into a function and call only once instead.\n# def isrecent2(calcfunction):\n# @wraps(calcfunction)\n# def decorated_function(*args, **kwargs):\n# lastweek = rundate - datetime.timedelta(days=7)\n# query = session.query(PriceData.id).filter((PriceData.symbol == item) & (PriceData.dt_close >= lastweek)).count.().statement\n# if query2pd(query) > 5:\n# return calcfunction(*args, **kwargs)\n# return None\n\n# return decorated_function\n\n# The ticker selected for calculation should have recent continuous data of at least a week\ndef isrecent(item):\n lastweek = rundate - WEEK\n query = session.query(PriceData.id).filter((PriceData.symbol == item) & (PriceData.dt_close >= lastweek) & (PriceData.dt_close <= rundate)).statement\n if len(query2pd(query)) > 5:\n return True\n return False\n\n\n# Executes sqlalchemy statements into data frames\ndef query2pd(statement):\n return pd.read_sql(statement, session.bind)\n\n\n# First instance calculation of a moving average\ndef firstMA(item, duration, mode='px_close'):\n startdate = rundate - duration\n query = session.query(PriceData).filter((PriceData.symbol == item) & (PriceData.dt_close >= startdate) & (PriceData.dt_close <= rundate) & (PriceData.volume != '0')).statement\n dataset = query2pd(query)\n npoint = len(dataset)\n # Avoid the compromise of the integrity of MA computation\n if npoint < minpoints(duration):\n return (npoint, None)\n dataset[mode] = dataset[mode].apply(pd.to_numeric, errors='coerce')\n return (npoint, dataset[mode].mean())\n\n\n# Stocks with existing MA as of T-1\ndef existma():\n query = session.query(Analytics.symbol).statement\n return query2pd(query)\n\n\ndef colpos(duration):\n # Get the list index of duration\n idx = tenor.index(duration)\n # Translate to object index 1 -> symbol, 2,3 -> ma_one..\n return int((2 * tenor.index(duration)) + 2)\n\n\n# Algorithm to update each ma\ndef updatema(maobj, pxadd, pxminus, duration, mode):\n # 1. Check that the value is not None\n # 2. If value is None, check to see if min point requirement has been met. \n # a. If it is, invoke firstma function\n # b. If not, assign None\n # 3. If value is not None, update with the standard displacement method\n validx = colpos(duration)\n posidx = validx + 1\n ptval = maobj.iloc[0,validx] # MA value\n ptdenom = maobj.iloc[0,posidx] # number of data points\n\n if ptval is None and int(ptdenom) < minpoints(duration):\n return (ptdenom, None)\n\n if ptval is None and int(ptdenom) >= minpoints(duration):\n return firstma(item, duration, mode)\n \n return (ptdenom, str(float(ptval) + ((float(pxadd) + float(pxminus))/float(ptdenom))))\n\n\n# Update MA by bootstrapping\ndef extendma(item, mode='px_close'):\n mapts = []\n maval = []\n prevday = prevbizday(rundate)\n\n # Loop through the tenors here and calculate!\n # Include exit conditions if px_xpire is not found!\n for tn in tenor:\n startdate = prevbizday(rundate - tn)\n\n # Get the latest data point\n query = session.query(PriceData).filter((PriceData.symbol == item) & (PriceData.volume != '0') & (PriceData.dt_close <= rundate)).order_by(PriceData.dt_close.desc()).limit(1).statement\n currobj = query2pd(query)\n px_today = currobj[mode].iloc[0]\n\n # Get the expired data point\n query = session.query(PriceData).filter((PriceData.symbol == item) & (PriceData.volume != '0') & (PriceData.dt_close >= startdate)).order_by(PriceData.dt_close.asc()).limit(1).statement\n xprdobj = query2pd(query)\n px_xpire = xprdobj[mode].iloc[0]\n\n # Get all MA values as of the previous business day\n query = session.query(Analytics).filter((Analytics.symbol == item) & (Analytics.dt_close == prevday)).statement\n tckrma = query2pd(query)\n ret = updatema(tckrma, px_today, px_xpire, tn, mode)\n mapts.append(ret[0])\n maval.append(ret[1])\n\n analyticsMA = Analytics(symbol=item, ma_one=maval[0], ma_one_d=mapts[0], ma_three=maval[1], ma_three_d=mapts[1], ma_six=maval[2], ma_six_d=mapts[2], ma_twelve=maval[3], ma_twelve_d=mapts[3], ma_eighteen=maval[4], ma_eighteen_d=mapts[4], ma_twofour=maval[5], ma_twofour_d=mapts[5], dt_close=rundate)\n session.add(analyticsMA)\n return\n\n\ndef calculatema(item, mode='px_close'):\n mapts = []\n maval = []\n\n if isrecent(item) == False:\n # No data points in the most recent week. Skip for now\n return\n\n # Loop through the tenors here and calculate!\n for tn in tenor:\n ret = firstMA(item, tn)\n mapts.append(ret[0])\n maval.append(ret[1])\n # This line feels like it's speeding things up. May be just redundant >.<\n ret = tuple()\n try:\n amaobj = session.query(Analytics).filter_by(symbol=item).one()\n except:\n amaobj = None\n\n if amaobj is not None:\n amaobj.ma_one = maval[0]\n amaobj.ma_one_d = mapts[0]\n amaobj.ma_three = maval[1]\n amaobj.ma_three_d = mapts[1]\n amaobj.ma_six = maval[2]\n amaobj.ma_six_d = mapts[2]\n amaobj.ma_twelve = maval[3]\n amaobj.ma_twelve_d = mapts[3]\n amaobj.ma_eighteen = maval[4]\n amaobj.ma_eighteen_d = mapts[4]\n amaobj.ma_twofour = maval[5]\n amaobj.ma_twofour_d = mapts[5]\n amaobj.dt_close=rundate\n\n session.add(amaobj)\n else:\n analyticsMA = Analytics(symbol=item, ma_one=maval[0], ma_one_d=mapts[0], ma_three=maval[1], ma_three_d=mapts[1], ma_six=maval[2], ma_six_d=mapts[2], ma_twelve=maval[3], ma_twelve_d=mapts[3], ma_eighteen=maval[4], ma_eighteen_d=mapts[4], ma_twofour=maval[5], ma_twofour_d=mapts[5], dt_close=rundate)\n session.add(analyticsMA)\n\n return\n\n\nif __name__ == '__main__':\n # Create a new analytics table for each exchange board\n # Calculate the first MA and write to table for each security\n # Subsequently, update MA by taking the difference between the first and last\n # point in the MA time series and add that to the first MA\n\n # Get all tickers loaded today\n query = session.query(PriceData.symbol).filter(PriceData.dt_close == rundate).order_by(PriceData.symbol.asc()).statement\n tickertoday = query2pd(query)\n\n # Doesn't seem to be faster at all. Let's just query the entire series and compute the mean\n # # Get all tickers with existing ma\n # tickerMA = existma()\n\n # # Isolate tickertoday with existing ma\n # # Update the MA by 'bootstrapping'\n # todayMA = tickertoday[tickertoday['symbol'].isin(tickerMA['symbol'])]\n # j = 1\n # for tma in todayMA['symbol']:\n # print(\"Extending MA %s of %s\" % (j, len(todayMA) + 1))\n # extendma(tma)\n # j += 1\n # session.commit()\n\n # For tickers without an existing ma, check that it has sufficient points\n # If it does, perform full computation\n # todaynoMA = tickertoday[~tickertoday['symbol'].isin(todayMA['symbol'])]\n\n i = 1\n for nma in tickertoday['symbol'][:20]:\n print(\"Computing %s of %s\" % (i, len(tickertoday) + 1))\n calculatema(nma)\n i += 1\n # Persistence!\n session.commit()\n\n\n","sub_path":"calcengine.py","file_name":"calcengine.py","file_ext":"py","file_size_in_byte":9528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"374061039","text":"import os, re\nimport xml.etree.ElementTree as et\nimport regex\nfrom ptxprint.utils import allbooks, books, bookcodes\n\n\nclass ParatextSettings:\n def __init__(self, basedir, prjid):\n self.dict = {}\n self.ldml = None\n self.basedir = basedir\n self.prjid = prjid\n self.langid = None\n self.dir = \"left\"\n self.parse()\n\n def parse(self):\n path = os.path.join(self.basedir, self.prjid, \"Settings.xml\")\n pathmeta = os.path.join(self.basedir, self.prjid, \"metadata.xml\")\n for a in (\"Settings.xml\", \"ptxSettings.xml\"):\n path = os.path.join(self.basedir, self.prjid, a)\n if os.path.exists(path):\n doc = et.parse(path)\n for c in doc.getroot():\n self.dict[c.tag] = c.text\n self.read_ldml()\n break\n else:\n self.inferValues()\n return self\n\n def read_ldml(self):\n self.langid = re.sub('-(?=-|$)', '', self.get('LanguageIsoCode', \"unk\").replace(\":\", \"-\"))\n fname = os.path.join(self.basedir, self.prjid, self.langid+\".ldml\")\n silns = \"{urn://www.sil.org/ldml/0.1}\"\n if os.path.exists(fname):\n self.ldml = et.parse(fname)\n for k in ['footnotes', 'crossrefs']:\n d = self.ldml.find('.//characters/special/{1}exemplarCharacters[@type=\"{0}\"]'.format(k, silns))\n if d is not None:\n self.dict[k] = \",\".join(re.sub(r'^\\[\\s*(.*?)\\s*\\]', r'\\1', d.text).split())\n # print(k, self.dict[k].encode(\"unicode_escape\"))\n fonts = self.ldml.findall('.//special/{0}external-resources/{0}font'.format(silns))\n for t in (None, \"default\"):\n for f in fonts:\n if f.get('type', None) == t:\n self.dict['DefaultFont'] = f.get('name', '')\n self.dict['DeafultFontSize'] = float(f.get('size', 1.0)) * 12\n d = self.ldml.find(\".//layout/orientation/characterOrder\")\n if d is not None:\n if d.text.lower() == \"right-to-left\":\n self.dir = \"right\"\n else:\n self.ldml = None\n\n def __getitem__(self, key):\n return self.dict[key]\n\n def get(self, key, default=None):\n res = self.dict.get(key, default)\n if res is None:\n return default\n return res\n\n def find_ldml(self, path):\n if self.ldml is None:\n return None\n return self.ldml.find(path)\n\n def inferValues(self):\n path = os.path.join(self.basedir, self.prjid)\n sfmfiles = [x for x in os.listdir(path) if x.lower().endswith(\"sfm\")]\n for f in sfmfiles:\n m = re.search(r\"(\\d{2})\", f)\n if not m:\n continue\n bk = allbooks[int(m.group(1))-1]\n bki = f.lower().find(bk.lower())\n if bki < 0:\n continue\n numi = m.start(1)\n s = min(bki, numi)\n e = max(bki+3, numi+2)\n (pre, main, post) = f[:s], f[s:e], f[e:]\n self.dict['FileNamePrePart'] = pre\n self.dict['FileNamePostPart'] = post\n main = main[:numi-s] + \"41\" + main[numi-s+2:]\n main = main[:bki-s] + \"MAT\" + main[bki-s+3:]\n self.dict['FileNameBookNameForm'] = main\n break\n\n #self.dict['FullName'] = \"\"\n #self.dict['Copyright'] = \"\"\n self.dict['DefaultFont'] = \"\"\n self.dict['Encoding'] = 65001\n \n fbkfm = self.dict['FileNameBookNameForm']\n bknamefmt = self.get('FileNamePrePart', \"\") + \\\n fbkfm.replace(\"MAT\",\"{bkid}\").replace(\"41\",\"{bkcode}\") + \\\n self.get('FileNamePostPart', \"\")\n bookspresent = [0] * len(allbooks)\n for k, v in books.items():\n if os.path.exists(os.path.join(path, bknamefmt.format(bkid=k, bkcode=v))):\n bookspresent[v-1] = 1\n self.dict['BooksPresent'] = \"\".join(str(x) for x in bookspresent)\n\n def getBookFilename(self, bk):\n fbkfm = self.get('FileNameBookNameForm', \"\")\n bknamefmt = self.get('FileNamePrePart', \"\") + \\\n fbkfm.replace(\"MAT\",\"{bkid}\").replace(\"41\",\"{bkcode}\") + \\\n self.get('FileNamePostPart', \"\")\n fname = bknamefmt.format(bkid=bk, bkcode=bookcodes.get(bk, 0))\n return fname\n\n def getArchiveFiles(self):\n res = {}\n path = os.path.join(self.basedir, self.prjid, \"Settings.xml\")\n if os.path.exists(path):\n res[path] = \"Settings.xml\"\n if self.langid is None:\n return res\n fname = os.path.join(self.basedir, self.prjid, self.langid+\".ldml\")\n if os.path.exists(fname):\n res[fname] = self.langid+\".ldml\"\n return res\n\n","sub_path":"python/lib/ptxprint/ptsettings.py","file_name":"ptsettings.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"256887070","text":"import random\n\nclass SeedModifier():\n def getOptions():\n return [\n {\n \"name\": \"Max Logic Item Placement\",\n \"description\": \"Less restricted item placement. All checks still obtainable.\"\n },\n {\n \"name\": \"Reverse Rando\",\n \"description\": \"Use when generating a Reverse Rando seed to ensure softlock protection\"\n },\n {\n \"name\":\"Glass Cannon\",\n \"description\": \"No more pesky Defense Ups in the level up stats pool\"\n },\n {\n \"name\":\"Library of Assemblage\",\n \"description\": \"Start with all the hints\"\n },\n {\n \"name\": \"Schmovement\",\n \"description\": \"Start with level 1 of all growth abilities\"\n },\n {\n \"name\": \"Better Junk\",\n \"description\": \"No more synthesis materials in the junk item pool\"\n },\n {\n \"name\": \"Randomize Ability Pool\",\n \"description\": \"Pick Sora's action/support abilities at random (guaranteed to have 1 SC & 1 OM)\"\n },\n {\n \"name\": \"Start with No AP\",\n \"description\": \"Sora/Donald/Goofy start the game with 0 AP\"\n },\n {\n \"name\": \"Remove Damage Cap\",\n \"description\": \"Removes the damage cap for every enemy/boss in the game.\"\n }\n ]\n\n def randomAbilityPool(action, support):\n abilitylist = action + support\n abilitydict = {i.Name: i for i in abilitylist}\n possibleabilities = list(set([i.Name for i in abilitylist if i.Name not in [\"Second Chance\", \"Once More\"]]))\n possibleabilities.sort()\n randomabilitypool = []\n for _ in range(len(abilitylist)-2):\n choice = random.choice(possibleabilities)\n randomabilitypool.append(abilitydict[choice])\n # Limit only 1 of each action ability in the pool, to make it more interesting\n if choice in [i.Name for i in action]:\n possibleabilities.remove(choice)\n\n # Make sure there is one OM and one SC so the tracker behaves\n randomabilitypool.append(abilitydict[\"Second Chance\"])\n randomabilitypool.append(abilitydict[\"Once More\"])\n return randomabilitypool\n\n def glassCannon(enabled):\n if enabled:\n return [{\"Stat\":\"Str\",\"Value\": 2},{\"Stat\":\"Mag\", \"Value\": 2},{\"Stat\": \"Ap\", \"Value\": 2}]\n return None\n\n def library(enabled):\n if enabled:\n return [\n \"226\",\n \"227\",\n \"228\",\n \"229\",\n \"230\",\n \"231\",\n \"232\",\n \"233\",\n \"234\",\n \"235\",\n \"236\",\n \"237\",\n \"238\"\n ]\n return []\n\n def schmovement(enabled):\n if enabled:\n return [\n \"94\",\n \"98\",\n \"102\",\n \"106\",\n \"564\"\n ]\n return []","sub_path":"Module/modifier.py","file_name":"modifier.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"223368447","text":"import argparse\nimport cv2\nimport numpy as np\nimport os\nimport math\nimport configparser\n\ndef run(video_file, output_dir):\n \"\"\"Run transform video file into directory MOT format.\n\tDirectory : img1 -> all video frame (000001 to maxframe)\n\t\t\t\tseqinfo.ini -> video information\n \n\tParameters\n ----------\n video_file : str\n Path to the video file.\n output_dir : str\n Path to the MOT format directory.\n\n \"\"\"\n \n # Récupérer le nom du répertoire\n list_output_dir = output_dir.split('\\\\')\n name = list_output_dir[len(list_output_dir)-1]\n # Récupérer la vidéo\n video = cv2.VideoCapture(video_file)\n # Récupérer le fps\n fps = video.get(cv2.CAP_PROP_FPS)\n print(video.get(cv2.CAP_PROP_FPS))\n #largeur des images de la vidéo\n width_video = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n #hauteur des images de la vidéo\n height_video = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n # Récupérer le nombre d'images\n length_video = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n print (\"Number of frames: \", length_video)\n count = 0\n print (\"Converting video..\\n\")\n # Commencer à convertir la vidéo\n while video.isOpened():\n # Extraire les images\n ret, frame = video.read()\n # Ecrire le résultat dans la sortie.\n if not os.path.exists(output_dir + \"/img1/\"):\n os.makedirs(output_dir + \"/img1/\")\n cv2.imwrite(output_dir + \"/img1/%d.jpg\" % (count+1), frame)\n count = count + 1\n # Si il n'y a plus d'images\n if (count > (length_video-1)):\n # Libérer la vidéo\n video.release()\n break\n \n # Ecrire dans le fichier seqinfo.ini\n cfg = configparser.ConfigParser()\n \n S = 'Sequence'\n cfg.add_section(S)\n \n cfg.set(S,'name',name)\n cfg.set(S,'imDir','img1')\n cfg.set(S,'frameRate',str(int(fps)))\n cfg.set(S,'seqLength',str(length_video))\n cfg.set(S,'imWidth',str(width_video))\n cfg.set(S,'imHeight',str(height_video))\n cfg.set(S,'imExt','.jpg')\n \n cfg.write(open(output_dir+\"/seqinfo.ini\",'w'))\n\n\t\ndef parse_args():\n \"\"\" Parse command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description = 'Generate MOTFormat file from video')\n parser.add_argument(\n \"--video_file\", help=\"Path to video file\",\n default=None, required=True)\n parser.add_argument(\n \"--output_dir\", help=\"Path to MOT format file output\",\n default=None, required=True)\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parse_args()\n run(args.video_file, args.output_dir)","sub_path":"Developpement/proposition1/generate_MOTFormat_File_test.py","file_name":"generate_MOTFormat_File_test.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"183764661","text":"irregular_nouns = {'child': 'children', \n 'goose': 'geese', \n 'man': 'men',\n 'woman': 'women',\n 'superman': 'supermen',\n 'superwoman': 'superwomen',\n 'tooth': 'teeth',\n 'foot': 'feet',\n 'mouse': 'mice',\n 'person': 'poeple',\n 'ox': 'oxen',\n 'louse': 'lice',\n 'alumna': 'alumnae',\n 'charisma': 'charismata'}\n\n# https://dictionary.cambridge.org/grammar/british-grammar/nouns-singular-and-plural?q=Singular+and+plural+nouns\n# https://usefulenglish.ru/writing/irregular-plural-nouns\nnouns_in_plurals = ['sheep', 'series', 'species', 'deer',\n 'bison', 'moose', 'swine', 'fish', \n 'dozen', 'aircraft', 'means', 'corps', \n 'headquarters', 'crossroads', 'alms', \n 'gallows', 'barracks', 'classics', 'economics', 'maths', \n 'linguistics', 'mathematics', 'physics', 'aerobics', 'phonetics',\n 'politics', 'statistics', 'acoustics', \n 'acrobatics', 'gymnastics', 'measles', 'mumps',\n 'news', 'binoculars', 'headphones', 'sunglasses',\n 'glasses', 'scissors', 'tweezers', 'pantyhose',\n 'jeans', 'pyjamas', 'tights', 'knickers',\n 'shorts', 'trousers', 'pants', 'belongings', 'slacks',\n 'breeches', 'pagamas', 'clothes', 'congratulations',\n 'briefs', 'panties', 'pliers', 'earnings', 'goods',\n 'tongs', 'pincers', 'forceps', 'spectacles', \n 'likes', 'dislikes', 'outskirts', 'premises',\n 'buildings', 'savings', 'money', 'stairs',\n 'surroundings', 'thanks', 'audience', 'public',\n 'arms', 'troops', 'weapons', 'remains',\n 'billiards', 'chess', 'checkers', 'cards', 'dominoes',]","sub_path":"5_plural_nouns/irregular_nouns_dict.py","file_name":"irregular_nouns_dict.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"604038285","text":"import uuid\nimport os\nimport mailchimp\nfrom settings import MAILCHIMP_API_KEY\nfrom locations.models import Location\n\n# Create an instance of the Mailchip class with our API key\ndef get_mailchimp_api():\n return mailchimp.Mailchimp(MAILCHIMP_API_KEY)\n\n# Parse uploaded files and generate a unique (UUID) filename\ndef get_file_path(instance, filename):\n ext = filename.split('.')[-1]\n filename = \"%s.%s\" % (uuid.uuid4(), ext)\n return os.path.join(filename)\n\n# Location utility function to pass geocoded results into the DB\n# Used in the RequestViewSet, UserViewSet, and ItineraryViewSet API Views\ndef build_geocode_location(data):\n if 'sublocality_level_1' in data:\n check_loc = Location.objects.all().filter(country_id=data['country_id'],\n locality=data['locality'],\n sublocality_level_1=data['sublocality_level_1'])\n\n if check_loc:\n location = check_loc[0]\n else:\n location = Location.objects.create(**data)\n\n else:\n check_loc = Location.objects.all().filter(country_id=data['country_id'],\n locality=data['locality'],\n sublocality_level_1__isnull=True)\n\n if check_loc:\n location = check_loc[0]\n else:\n location = Location.objects.create(**data)\n\n return location","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"430099882","text":"import glob\nimport csv\nimport json\nimport os\nimport time\nimport argparse\nimport torch\nimport yaml\nimport sys\nimport numpy as np\n\n\nclass LogWriter(object):\n # kind of inspired form openai.baselines.bench.monitor\n # We can add here an optional Tensorboard logger as well\n def __init__(self, path, keys, header=\"\", name=\"monitor.csv\"):\n self.keys = tuple(keys) + (\"t\",)\n assert path is not None\n\n os.makedirs(path, exist_ok=True)\n filename = os.path.join(path, name)\n if os.path.exists(filename):\n os.remove(filename)\n\n print(\"Writing logs to \", filename)\n\n self.f = open(filename, \"wt\")\n if isinstance(header, dict):\n header = \"# {} \\n\".format(json.dumps(header))\n self.f.write(header)\n self.logger = csv.DictWriter(self.f, fieldnames=self.keys)\n self.logger.writeheader()\n self.f.flush()\n self.tstart = time.time()\n\n def write_row(self, epinfo):\n if self.logger:\n t = time.time() - self.tstart\n epinfo[\"t\"] = t\n self.logger.writerow(epinfo)\n self.f.flush()\n\n\nclass LoadFromFile(argparse.Action):\n # parser.add_argument('--file', type=open, action=LoadFromFile)\n def __call__(self, parser, namespace, values, option_string=None):\n if values.name.endswith(\"yaml\") or values.name.endswith(\"yml\"):\n with values as f:\n namespace.__dict__.update(yaml.load(f, Loader=yaml.FullLoader))\n return\n\n with values as f:\n input = f.read()\n input = input.rstrip()\n for lines in input.split(\"\\n\"):\n k, v = lines.split(\"=\")\n typ = type(namespace.__dict__[k])\n v = typ(v) if typ is not None else v\n namespace.__dict__[k] = v\n\n\ndef save_argparse(args, filename, exclude=None):\n if filename.endswith(\"yaml\") or filename.endswith(\"yml\"):\n if isinstance(exclude, str):\n exclude = [\n exclude,\n ]\n args = args.__dict__.copy()\n for exl in exclude:\n del args[exl]\n yaml.dump(args, open(filename, \"w\"))\n else:\n with open(filename, \"w\") as f:\n for k, v in args.__dict__.items():\n if k is exclude:\n continue\n f.write(f\"{k}={v}\\n\")\n","sub_path":"torchmd/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"617393881","text":"\"\"\"\nClone of 2048 game.\n\"\"\"\n#http://www.codeskulptor.org/#user42_kuOnoiAOQs_43.py\n# import poc_2048_gui\nimport random\n\n# Directions, DO NOT MODIFY\nUP = 1\nDOWN = 2\nLEFT = 3\nRIGHT = 4\n\n# Offsets for computing tile indices in each direction.\n# DO NOT MODIFY this dictionary.\nOFFSETS = {UP: (1, 0),\n DOWN: (-1, 0),\n LEFT: (0, 1),\n RIGHT: (0, -1)}\n\ndef merge(line):\n \"\"\"\n Helper function that merges a single row or column in 2048\n \"\"\"\n #online file: http://www.codeskulptor.org/#user42_BSAk2FXqfxnrGlr.py\n #list1: Iterates over the input and creates an output list that has all of the non-zero tiles slid \n # over to the beginning of the list with the appropriate number of zeroes at the end of the list.\n list1=[]\n list3=[]\n length=len(line)\n zer1=0\n zer2=0\n for count in range(length):\n if line[count]==0:\n zer1=zer1+1\n else:\n list1.append(line[count])\n for count in range(zer1):\n list1.append(0)\n #list2: Iterates over the list created in the previous step and create another new list in which pairs\n # of tiles in the first list are replaced with a tile of twice the value and a zero tile.\n list2=list1[:] \n for count in range(length-zer1-1):\n if list2[count]==list2[count+1]:\n list2[count]=list2[count]+list2[count+1]\n list2[count+1]=0\n #list3: Repeats step one using the list created in step two to slide the tiles to the beginning of the list again.\n for count in range(length):\n if list2[count]==0:\n zer2=zer2+1\n else:\n list3.append(list2[count])\n for count in range(zer2):\n list3.append(0)\n return list3\n\nclass TwentyFortyEight:\n \"\"\"\n Class to run the game logic.\n \"\"\"\n def __init__(self, grid_height, grid_width):\n self._height=grid_height\n self._width=grid_width\n \n init_up=[(0,count) for count in range(self._width)]\n init_down=[(self._height-1,count) for count in range(self._width)]\n init_left=[(count,0) for count in range(self._height)]\n init_right=[(count,self._width-1) for count in range(self._height)]\n self._dir_dic={UP:init_up, DOWN:init_down, LEFT:init_left, RIGHT:init_right}\n \n self.reset()\n \n def reset(self):\n \"\"\"\n Reset the game so the grid is empty except for two\n initial tiles.\n \"\"\"\n self._initial_grid = [[0 for dummy_1 in range(self._width)]\n for dummy_2 in range(self._height)]\n# use the new_tile method to add two initial tiles \n self.new_tile()\n self.new_tile()\n\n def __str__(self):\n \"\"\"\n Return a string representation of the grid for debugging.\n \"\"\"\n return '[' +'\\n '.join(str(self._initial_grid[count]) for count in range(self._height))+']' \n \n def get_grid_height(self):\n \"\"\"\n Get the height of the board.\n \"\"\"\n return self._height\n\n def get_grid_width(self):\n \"\"\"\n Get the width of the board.\n \"\"\"\n return self._width\n\n def move(self, direction):\n \"\"\"\n Move all tiles in the given direction and add\n a new tile if any tiles moved.\n \"\"\"\n falses=0\n start_ind=self._dir_dic[direction] #list of start indices\n offset=OFFSETS[direction] #tuple of direction from OFFSETS\n if direction==UP or direction==DOWN:\n step1=self._height\n step2=self._width\n else:\n step1=self._width\n step2=self._height\n \n for count1 in range(step2):\n temp_ind=[]\n temp_val=[]\n temp_ind.append(start_ind[count1])\n for count2 in range(1,step1): \n temp_ind.append((temp_ind[count2-1][0]+offset[0],temp_ind[count2-1][1]+offset[1])) \n for count3 in range (step1):\n temp_val.append(self.get_tile(temp_ind[count3][0],temp_ind[count3][1]))\n temp_merg_val=merge(temp_val)\n if not temp_merg_val==temp_val: \n falses=falses+1\n for count4 in range (step1):\n self.set_tile(temp_ind[count4][0], temp_ind[count4][1], temp_merg_val[count4])\n \n if not falses==0: \n self.new_tile()\n\n def new_tile(self):\n \"\"\"\n Create a new tile in a randomly selected empty\n square. The tile should be 2 90% of the time and\n 4 10% of the time.\n \"\"\"\n #new_tile will randomly have 2 or 4 with 90% and 10% probability\n prob=random.randint(1,100)\n if prob <=90:\n new_tile=2\n else: \n new_tile=4 \n #Save Tuple indices of none zero elements in a list\n list_z=[]\n for count1 in range (self._height):\n for count2 in range (self._width): \n if self.get_tile(count1,count2)==0:\n list_z.append((count1,count2)) \n if list_z==[]:\n print (\"Game Over!\")\n else:\n new=random.randint(0,len(list_z)-1)\n pick1=list_z[new][0]\n pick2=list_z[new][1]\n \n self.set_tile(pick1,pick2,new_tile)\n\n def set_tile(self, row, col, value):\n \"\"\"\n Set the tile at position row, col to have the given value.\n \"\"\"\n self._initial_grid[row][col]=value\n\n def get_tile(self, row, col):\n \"\"\"\n Return the value of the tile at position row, col.\n \"\"\"\n return self._initial_grid[row][col]\n\n# poc_2048_gui.run_gui(TwentyFortyEight(4, 4))\n","sub_path":"source/Courses/PC 1/2016-2017/MiniProj2.py","file_name":"MiniProj2.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"452851824","text":"from tkinter.filedialog import * # для окна приложения и для TK()\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\r\nimport matplotlib.pyplot as plt # для анимации\r\nimport matplotlib.animation as animation # для анимации\r\nimport numpy as np\r\n\r\n# создание окна программы\r\nroot = Tk()\r\nroot.minsize(width=800, height=400)\r\nroot.title('ПТИ')\r\n\r\n# создаине 1 и 2 графика\r\nfig, ax = plt.subplots() #\r\nline, = ax.plot([], [], color='blue', ls='solid', lw=1.5)\r\nline_2, = ax.plot([], [], color='red', ls='solid', lw=1.5) # вид, тип , цвет графика lw=2 - толшина линии\r\nax.grid() # отрисовка сетки\r\nax.set_title('Tk embedding') # название графика\r\nax.set_xlabel('время') # название оси\r\nax.set_ylabel('температура, влажность')\r\nxdata, ydata = [], [] # массив данных для X и Y для 1 графика\r\nxdata_2, ydata_2 = [], [] # массив данных для X и Y для 2 графика\r\n\r\n# отрисовка осей и подписей к ним\r\ncanvas = FigureCanvasTkAgg(fig, master=root)\r\ncanvas.show()\r\ncanvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) # положение окна с осями\r\n\r\n# отрисовка инструментов для работы с графикой\r\ntoolbar = NavigationToolbar2TkAgg(canvas, root)\r\ntoolbar.update()\r\ncanvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)\r\n\r\n# функция закрытия приложения\r\ndef quit():\r\n root.quit() # stops mainloop\r\n root.destroy() # this is necessary on Windows to prevent\r\n # Fatal Python Error: PyEval_RestoreThread: NULL tstate\r\n\r\n# функция генерации данных для графика\r\ndef data_gen(t=0):\r\n cnt = 0\r\n while cnt < 1000:\r\n cnt += 1\r\n cnt = np.random.randint(0, 30)\r\n cnt_2 = np.random.randint(20, 80)\r\n t += 0.5\r\n # yield t, np.sin(2*np.pi*t) * np.exp(-t/10.)\r\n yield t, (cnt), (cnt_2),\r\n\r\n# функция для начального состояния графика\r\ndef init():\r\n ax.set_ylim(0, 100) # лимиты по оси Y\r\n ax.set_xlim(0, 60) # лимиты по оси Y\r\n del xdata[:] # очистка массива исходных данных для X для 1 графика\r\n del ydata[:] # очистка массива исходных данных для Y для 1 графика\r\n del xdata_2[:] # очистка массива исходных данных для X для 2 графика\r\n del ydata_2[:] # очистка массива исходных данных для Y для 2 графика\r\n line.set_data(xdata, ydata) # в качестве данных для 1 графика будут xdata и ydata\r\n line_2.set_data(xdata_2, ydata_2) # в качестве данных для 1 графика будут xdata и ydata\r\n return line, line_2,\r\n\r\n# функция для обновления графика\r\ndef run(data):\r\n # update the data\r\n t, y, y_2 = data\r\n xdata.append(t)\r\n ydata.append(y)\r\n xdata_2.append(t)\r\n ydata_2.append(y_2)\r\n xmin, xmax = ax.get_xlim() # изменение масштаба по оси Х во времени\r\n if t >= xmax:\r\n ax.set_xlim(xmin+60, xmax+60)\r\n ax.figure.canvas.draw()\r\n line.set_data(xdata, ydata)\r\n line_2.set_data(xdata_2, ydata_2)\r\n return line, line_2,\r\n\r\nani = animation.FuncAnimation(fig, run, data_gen, blit=False, interval=100,\r\n repeat=False, init_func=init)\r\n\r\n# вывод кнопки Закрыть в окно приложения\r\nB1 = Button(master=root, text='Закрыть', command=quit)\r\nB1.place(x=150, y=120)\r\n\r\nroot.mainloop()","sub_path":"plot_2.py","file_name":"plot_2.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"128863378","text":"import random\nimport copy\nimport math\nfrom collections import defaultdict\nimport numpy as np\nimport scipy as sp\nimport sqlite3 as lite\n\nfrom cyclus.agents import Institution, Agent, Facility\nfrom cyclus import lib\nimport cyclus.typesystem as ts\n\n\nclass udb_reactor(Facility):\n\n reactor_id = ts.Int(\n doc=\"This variable lists the reactor id of the reactors in the database \",\n tooltip=\"Reactor Id in database\",\n uilabel=\"Reactor ID\"\n )\n\n outcommod = ts.String(\n doc=\"The commodity this institution will output\",\n tooltip=\"Output commodity\",\n uilabel=\"Output Commodity\"\n )\n\n inventory = ts.ResBufMaterialInv()\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def write(self, string):\n with open('log.txt', 'a') as f:\n f.write(string + '\\n')\n\n def tock(self):\n self.write('what')\n\n # Example dummy code\n composition = {922350000: 5,\n 922380000: 95}\n material = ts.Material.create(self, 100, composition)\n\n self.inventory.push(material)\n self.write(str(self.inventory.quantity))\n\n\n # time = self.context.time\n # get rows that match with current time\n\n # for information in rows:\n # Create material given by recipe and quantity\n # composition = {ZZAAA0000: massfrac,\n # ZZAAA0000: massfrac}\n # recipe = self.context.get_recipe()\n # material = ts.Material.create(self, quantity, recipe)\n\n # Push material to out buffer\n # self.out.push(material)\n\n\n def get_material_bids(self, requests):\n if self.outcommod not in requests:\n return\n reqs = requests[self.outcommod]\n bids = [reqs]\n ports = [{\"bids\": bids, \"constraints\": self.inventory.quantity}]\n return ports\n\n\n def get_material_trades(self, trades):\n responses = {}\n for trade in trades:\n print(trade)\n mat = self.inventory.pop()\n responses[trade] = mat\n return responses","sub_path":"udb_reactor/data_reactor.py","file_name":"data_reactor.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"256526542","text":"import tensorflow as tf\nfrom PIL import Image\nimport numpy as np\nimport threading\n\nSMALL_PATH = \"smallObjImage\\\\\"\n\nf = open(\"label_obj.txt\", 'r')\nTRAINING_DATA_RATE = 0.9\nIMAGE_SIZE = 224\nIMAGE_LAYER = 3\nTRAINING_EPOCHS = 80\nBATCH_NUM = 100\nNB_CLASSES = 6\nlearning_rate = 0.00005\n\nuser_input = [None]\ndef get_user_input(user_input_ref):\n user_input_ref[0] = input(\"Give me some Information: \")\n\ndef oneHotEncoding(imageLabelList):\n return_values = np.eye(NB_CLASSES)[np.array(imageLabelList).reshape(-1)]\n return return_values\n\ndef getImageDatasByName(imageNameList):\n imageList = []\n for name in imageNameList:\n imageFile = Image.open(SMALL_PATH + name)\n imageRGBArray = np.array(imageFile)\n imageRGBList = imageRGBArray.tolist()\n imageList.append(imageRGBList)\n return imageList\n\ndef getImageNameList():\n datasetNameArray = []\n datasetScoreArray = []\n i = 0\n while True:\n\n line = f.readline()\n if line == \"\":\n break\n line = line.split(\" \")\n datasetNameArray.append(line[0])\n datasetScoreArray.append([int(line[1][:-1])])\n i += 1\n return datasetNameArray, datasetScoreArray\n\ndatasetNameArray, datasetScoreArray = getImageNameList()\n\ntrainingDatasetLen = int(len(datasetNameArray) * TRAINING_DATA_RATE)\ntestDatasetLen = len(datasetNameArray) - trainingDatasetLen\n\ntrainingDatasetNameArray = datasetNameArray[:trainingDatasetLen]\ntrainingDatasetScoreArray = datasetScoreArray[:trainingDatasetLen]\ntestDatasetNameArray = datasetNameArray[trainingDatasetLen:]\ntestDatasetScoreArray = datasetScoreArray[trainingDatasetLen:]\n\nprint(str(len(trainingDatasetNameArray)) + \" \" + str(len(testDatasetNameArray)))\n\n##################################################################################################\n\ndef conv2d(x, W, b, strid=1, padd='SAME'):\n x = tf.nn.conv2d(x, W, strides=[1, strid, strid, 1], padding=padd)\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\ndef maxpool2d(x, s=2, k=2):\n return tf.nn.max_pool(x, ksize=[1, s, s, 1], strides=[1, k, k, 1], padding='SAME')\n\ndef conv_net(x, weights, biases, dropout):\n conv1_1 = conv2d(x, weights['wc1_1'], biases['bc1_1'])\n conv1_2 = conv2d(conv1_1, weights['wc1_2'], biases['bc1_2'])\n pool1 = maxpool2d(conv1_2)\n\n conv2_1 = conv2d(pool1, weights['wc2_1'], biases['bc2_1'])\n conv2_2 = conv2d(conv2_1, weights['wc2_2'], biases['bc2_2'])\n pool2 = maxpool2d(conv2_2)\n\n conv3_1 = conv2d(pool2, weights['wc3_1'], biases['bc3_1'])\n conv3_2 = conv2d(conv3_1, weights['wc3_2'], biases['bc3_2'])\n conv3_3 = conv2d(conv3_2, weights['wc3_3'], biases['bc3_3'])\n pool3 = maxpool2d(conv3_3)\n\n conv4_1 = conv2d(pool3, weights['wc4_1'], biases['bc4_1'])\n conv4_2 = conv2d(conv4_1, weights['wc4_2'], biases['bc4_2'])\n conv4_3 = conv2d(conv4_2, weights['wc4_3'], biases['bc4_3'])\n pool4 = maxpool2d(conv4_3)\n\n conv5_1 = conv2d(pool4, weights['wc5_1'], biases['bc5_1'])\n conv5_2 = conv2d(conv5_1, weights['wc5_2'], biases['bc5_2'])\n conv5_3 = conv2d(conv5_2, weights['wc5_3'], biases['bc5_3'], strid=2)\n pool5 = maxpool2d(conv5_3)\n\n fc1 = tf.reshape(pool5, [-1, 8192])\n fc1 = tf.nn.relu(tf.matmul(fc1, weights['wc6']) + biases['bc6'])\n fc1 = tf.nn.dropout(fc1, keep_prob=dropout)\n\n fc2 = tf.nn.relu(tf.matmul(fc1, weights['wc7']) + biases['bc7'])\n fc2 = tf.nn.dropout(fc2, keep_prob=dropout)\n\n fc3 = tf.nn.relu(tf.matmul(fc2, weights['wc8']) + biases['bc8'])\n fc3 = tf.nn.dropout(fc3, keep_prob=dropout)\n\n fc4 = tf.add(tf.matmul(fc3, weights['wc9']), biases['bc9'])\n return fc4\n\nweights = {\n 'wc1_1': tf.Variable(tf.random_normal([3, 3, 3, 64], stddev=0.01)),\n 'wc1_2': tf.Variable(tf.random_normal([3, 3, 64, 64], stddev=0.01)), #필터크기x, 필터크기y, 색, 필터개수\n 'wc2_1': tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01)),\n 'wc2_2': tf.Variable(tf.random_normal([3, 3, 128, 128], stddev=0.01)),\n 'wc3_1': tf.Variable(tf.random_normal([3, 3, 128, 256], stddev=0.01)),\n 'wc3_2': tf.Variable(tf.random_normal([3, 3, 256, 256], stddev=0.01)),\n 'wc3_3': tf.Variable(tf.random_normal([3, 3, 256, 256], stddev=0.01)),\n 'wc4_1': tf.Variable(tf.random_normal([3, 3, 256, 512], stddev=0.01)),\n 'wc4_2': tf.Variable(tf.random_normal([3, 3, 512, 512], stddev=0.01)),\n 'wc4_3': tf.Variable(tf.random_normal([3, 3, 512, 512], stddev=0.01)),\n 'wc5_1': tf.Variable(tf.random_normal([3, 3, 512, 512], stddev=0.01)),\n 'wc5_2': tf.Variable(tf.random_normal([3, 3, 512, 512], stddev=0.01)),\n 'wc5_3': tf.Variable(tf.random_normal([3, 3, 512, 512], stddev=0.01)),\n 'wc6': tf.get_variable(\"W6\", shape=[8192, 1500], initializer=tf.contrib.layers.xavier_initializer()),\n 'wc7': tf.get_variable(\"W7\", shape=[1500, 250], initializer=tf.contrib.layers.xavier_initializer()),\n 'wc8': tf.get_variable(\"W8\", shape=[250, 40], initializer=tf.contrib.layers.xavier_initializer()),\n 'wc9': tf.get_variable(\"W9\", shape=[40, 6], initializer=tf.contrib.layers.xavier_initializer())\n}\n\nbiases = {\n 'bc1_1': tf.Variable(tf.random_normal([64])),\n 'bc1_2': tf.Variable(tf.random_normal([64])),\n 'bc2_1': tf.Variable(tf.random_normal([128])),\n 'bc2_2': tf.Variable(tf.random_normal([128])),\n 'bc3_1': tf.Variable(tf.random_normal([256])),\n 'bc3_2': tf.Variable(tf.random_normal([256])),\n 'bc3_3': tf.Variable(tf.random_normal([256])),\n 'bc4_1': tf.Variable(tf.random_normal([512])),\n 'bc4_2': tf.Variable(tf.random_normal([512])),\n 'bc4_3': tf.Variable(tf.random_normal([512])),\n 'bc5_1': tf.Variable(tf.random_normal([512])),\n 'bc5_2': tf.Variable(tf.random_normal([512])),\n 'bc5_3': tf.Variable(tf.random_normal([512])),\n 'bc6': tf.Variable(tf.random_normal([1500])),\n 'bc7': tf.Variable(tf.random_normal([250])),\n 'bc8': tf.Variable(tf.random_normal([40])),\n 'bc9': tf.Variable(tf.random_normal([6]))\n}\n\nX = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, IMAGE_LAYER])\nY = tf.placeholder(tf.float32, [None, NB_CLASSES])\n# X_img = tf.reshape(X, [-1, IMAGE_SIZE, IMAGE_SIZE, IMAGE_LAYER])\nkeep_prop = tf.placeholder(tf.float32)\n\npred = conv_net(X, weights, biases, keep_prop)\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain = optimizer.minimize(cost)\n\ncost_sum = tf.summary.scalar(\"cost\", cost)\n\n# W8_hist = tf.summary.histogram(\"weights8\", weights['wc8'])\n# cost_sum = tf.summary.scalar(\"cost\", cost)\n\n# is_correct = tf.equal(tf.arg_max(pred, 1), tf.arg_max(Y, 1))\n# accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n\n###########################################################################################\n\nmythread = threading.Thread(target=get_user_input, args=(user_input,))\nmythread.daemon = True\nmythread.start()\n\nwith tf.Session() as sess:\n\n summary = tf.summary.merge_all()\n\n writer = tf.summary.FileWriter('./logs/rate00005_small_obj_80_VGG')\n writer.add_graph(sess.graph)\n\n sess.run(tf.global_variables_initializer())\n\n oneBatchNum = int(trainingDatasetLen / BATCH_NUM)\n for epoch in range(TRAINING_EPOCHS):\n avg_cost = 0\n\n if user_input[0] is not None:\n print(\"input is exists\")\n break\n\n for i in range(BATCH_NUM):\n batch_ys = oneHotEncoding(trainingDatasetScoreArray[oneBatchNum * i:oneBatchNum * (i + 1)])\n batch_xs = getImageDatasByName(trainingDatasetNameArray[oneBatchNum * i:oneBatchNum * (i + 1)])\n\n # if i == 29 or i == 48 or i == 106 or i == 135 or i == 144 or i == 169 or i == 201 or i == 224 or i == 243:\n # continue\n\n print(i)\n c, h, s, _ = sess.run([cost, pred, summary, train], feed_dict={X: batch_xs, Y: batch_ys, keep_prop: 0.7})\n avg_cost += c / BATCH_NUM\n\n if i % 20 == 0:\n print(epoch, i, \"Cost: \", c, \" Prediction:\")\n\n writer.add_summary(s, global_step=i + epoch * BATCH_NUM)\n\n for j in range(len(h)):\n print(h[j], batch_ys[j])\n\n print('Epoch:', \"%04d\" % (epoch + 1), 'cost = ', '{:.9f}', format(avg_cost))\n\n WC1_1 = weights['wc1_1'].eval(sess)\n WC1_2 = weights['wc1_2'].eval(sess)\n WC2_1 = weights['wc2_1'].eval(sess)\n WC2_2 = weights['wc2_2'].eval(sess)\n WC3_1 = weights['wc3_1'].eval(sess)\n WC3_2 = weights['wc3_2'].eval(sess)\n WC3_3 = weights['wc3_3'].eval(sess)\n WC4_1 = weights['wc4_1'].eval(sess)\n WC4_2 = weights['wc4_2'].eval(sess)\n WC4_3 = weights['wc4_3'].eval(sess)\n WC5_1 = weights['wc5_1'].eval(sess)\n WC5_2 = weights['wc5_2'].eval(sess)\n WC5_3 = weights['wc5_3'].eval(sess)\n WC6 = weights['wc6'].eval(sess)\n WC7 = weights['wc7'].eval(sess)\n WC8 = weights['wc8'].eval(sess)\n WC9 = weights['wc9'].eval(sess)\n\n BC1_1 = biases['bc1_1'].eval(sess)\n BC1_2 = biases['bc1_2'].eval(sess)\n BC2_1 = biases['bc2_1'].eval(sess)\n BC2_2 = biases['bc2_2'].eval(sess)\n BC3_1 = biases['bc3_1'].eval(sess)\n BC3_2 = biases['bc3_2'].eval(sess)\n BC3_3 = biases['bc3_3'].eval(sess)\n BC4_1 = biases['bc4_1'].eval(sess)\n BC4_2 = biases['bc4_2'].eval(sess)\n BC4_3 = biases['bc4_3'].eval(sess)\n BC5_1 = biases['bc5_1'].eval(sess)\n BC5_2 = biases['bc5_2'].eval(sess)\n BC5_3 = biases['bc5_3'].eval(sess)\n BC6 = biases['bc6'].eval(sess)\n BC7 = biases['bc7'].eval(sess)\n BC8 = biases['bc8'].eval(sess)\n BC9 = biases['bc9'].eval(sess)\n\ng = tf.Graph()\nwith g.as_default():\n X_2 = tf.placeholder(\"float\", [None, IMAGE_SIZE, IMAGE_SIZE, IMAGE_LAYER], name=\"input\")\n x_image = tf.reshape(X_2, [-1, IMAGE_SIZE, IMAGE_SIZE, IMAGE_LAYER])\n\n WC1_1 = tf.constant(WC1_1, name=\"WC1_1\")\n WC1_2 = tf.constant(WC1_2, name=\"WC1_2\")\n WC2_1 = tf.constant(WC2_1, name=\"WC2_1\")\n WC2_2 = tf.constant(WC2_2, name=\"WC2_2\")\n WC3_1 = tf.constant(WC3_1, name=\"WC3_1\")\n WC3_2 = tf.constant(WC3_2, name=\"WC3_2\")\n WC3_3 = tf.constant(WC3_3, name=\"WC3_3\")\n WC4_1 = tf.constant(WC4_1, name=\"WC4_1\")\n WC4_2 = tf.constant(WC4_2, name=\"WC4_2\")\n WC4_3 = tf.constant(WC4_3, name=\"WC4_3\")\n WC5_1 = tf.constant(WC5_1, name=\"WC5_1\")\n WC5_2 = tf.constant(WC5_2, name=\"WC5_2\")\n WC5_3 = tf.constant(WC5_3, name=\"WC5_3\")\n WC6 = tf.constant(WC6, name=\"WC6\")\n WC7 = tf.constant(WC7, name=\"WC7\")\n WC8 = tf.constant(WC8, name=\"WC8\")\n WC9 = tf.constant(WC9, name=\"WC9\")\n BC1_1 = tf.constant(BC1_1, name=\"BC1_1\")\n BC1_2 = tf.constant(BC1_2, name=\"BC1_2\")\n BC2_1 = tf.constant(BC2_1, name=\"BC2_1\")\n BC2_2 = tf.constant(BC2_2, name=\"BC2_2\")\n BC3_1 = tf.constant(BC3_1, name=\"BC3_1\")\n BC3_2 = tf.constant(BC3_2, name=\"BC3_2\")\n BC3_3 = tf.constant(BC3_3, name=\"BC3_3\")\n BC4_1 = tf.constant(BC4_1, name=\"BC4_1\")\n BC4_2 = tf.constant(BC4_2, name=\"BC4_2\")\n BC4_3 = tf.constant(BC4_3, name=\"BC4_3\")\n BC5_1 = tf.constant(BC5_1, name=\"BC5_1\")\n BC5_2 = tf.constant(BC5_2, name=\"BC5_2\")\n BC5_3 = tf.constant(BC5_3, name=\"BC5_3\")\n BC6 = tf.constant(BC6, name=\"BC6\")\n BC7 = tf.constant(BC7, name=\"BC7\")\n BC8 = tf.constant(BC8, name=\"BC8\")\n BC9 = tf.constant(BC9, name=\"BC9\")\n\n CONV1_1 = conv2d(x_image, WC1_1, BC1_1)\n CONV1_2 = conv2d(CONV1_1, WC1_2, BC1_2)\n POOL1 = maxpool2d(CONV1_2)\n\n CONV2_1 = conv2d(POOL1, WC2_1, BC2_1)\n CONV2_2 = conv2d(CONV2_1, WC2_2, BC2_2)\n POOL2 = maxpool2d(CONV2_2)\n\n CONV3_1 = conv2d(POOL2, WC3_1, BC3_1)\n CONV3_2 = conv2d(CONV3_1, WC3_2, BC3_2)\n CONV3_3 = conv2d(CONV3_2, WC3_3, BC3_3)\n POOL3 = maxpool2d(CONV3_3)\n\n CONV4_1 = conv2d(POOL3, WC4_1, BC4_1)\n CONV4_2 = conv2d(CONV4_1, WC4_2, BC4_2)\n CONV4_3 = conv2d(CONV4_2, WC4_3, BC4_3)\n POOL4 = maxpool2d(CONV4_3)\n\n CONV5_1 = conv2d(POOL4, WC5_1, BC5_1)\n CONV5_2 = conv2d(CONV5_1, WC5_2, BC5_2)\n CONV5_3 = conv2d(CONV5_2, WC5_3, BC5_3, strid=2)\n POOL5 = maxpool2d(CONV5_3)\n\n FC1 = tf.reshape(POOL5, [-1, 8192])\n FC1 = tf.nn.relu(tf.matmul(FC1, WC6) + BC6)\n\n FC2 = tf.nn.relu(tf.matmul(FC1, WC7) + BC7)\n\n FC3 = tf.nn.relu(tf.matmul(FC2, WC8) + BC8)\n\n FC4 = tf.add(tf.matmul(FC3, WC9), BC9)\n\n softMaxTest = tf.nn.softmax(FC4, name=\"output\")\n\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n\n y_train = tf.placeholder(\"float\", [None, NB_CLASSES])\n correct_prediction = tf.equal(tf.argmax(FC4, 1), tf.argmax(y_train, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n tf.train.write_graph(g.as_graph_def(), 'models/', 'small_obj_00005_200_VGG_2.pb', as_text=False)\n tf.train.write_graph(g.as_graph_def(), 'models/', 'small_obj_00005_200_text_VGG_2.pb', as_text=True)\n\n i = 0\n\n outputResultFile = open(\"OBJ_OUTPUT_RESULT_4.txt\", 'w')\n\n while True:\n if len(testDatasetNameArray) < (i + 1) * 20:\n break\n input_y = oneHotEncoding(testDatasetScoreArray[i * 20:(i + 1) * 20])\n outputResultFile.write(\"check accuracy %g\" % accuracy.eval(\n {X_2: getImageDatasByName(testDatasetNameArray[i * 20:(i + 1) * 20]), y_train: input_y}, sess))\n\n outputResultFile.write(\"\\n\")\n resultSoftMaxList = softMaxTest.eval({X_2: getImageDatasByName(testDatasetNameArray[i * 20:(i + 1) * 20])}, sess)\n\n for j in range(len(resultSoftMaxList)):\n outputResultFile.write(str(resultSoftMaxList[j]) + \"\\n\" + str(input_y[j]) + \"\\n\")\n\n i += 1\n\n # test_batch_xs = getImageDatasByName(testDatasetNameArray)\n # test_batch_ys = getImageUpDownByScore(testDatasetScoreArray)\n # print(\"Accuracy: \", accuracy.eval(session=sess, feed_dict={X: test_batch_xs, Y: test_batch_ys, keep_prop: 1}))\n","sub_path":"TensorFlow/GraduateThesis/main_for_obj.py","file_name":"main_for_obj.py","file_ext":"py","file_size_in_byte":13717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"40343980","text":"import vk_api\nfrom vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType\nimport random\nimport json\nfrom bot.settings import TOKEN, COM_ID, NEWS_COUNT\nfrom requests import get\n\nWEEKDAYS = {\"monday\": \"понедельник\", \"tuesday\": \"вторник\", \"wednesday\": \"среда\", \"thursday\": \"четверг\",\n \"friday\": \"пятница\", \"saturday\": \"суббота\"}\nvk_session = vk_api.VkApi(\n token=TOKEN)\nlongpoll = VkBotLongPoll(vk_session, COM_ID)\nvk = vk_session.get_api()\n# словарь из текущих запросов пользователя ( {user_id: {\"action\": \"action_name\",\"grade\":\"grade_name\"}} )\ncur_requests = {}\n\n\ndef send_weekdays():\n \"\"\"\n Функция отправки меню с днями недели для пользователя ( in-line keyboard)\n \"\"\"\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=\"Выберите день недели для просмотра расписания\",\n random_id=random.randint(0, 2 ** 64),\n keyboard=json.dumps({\"one_time\": True, \"buttons\": [\n [{\"action\": {\"type\": \"text\", \"label\": \"Понедельник\",\n 'payload': '{\"command\":\"week_day\"}'\n },\n \"color\": \"negative\"},\n {\"action\": {\"type\": \"text\", \"label\": \"Вторник\",\n 'payload': '{\"command\":\"week_day\"}'\n },\n \"color\": \"negative\"},\n {\"action\": {\"type\": \"text\", \"label\": \"Среда\", 'payload': '{\"command\":\"week_day\"}'\n },\n \"color\": \"negative\"}], [\n {\"action\": {\"type\": \"text\", \"label\": \"Четверг\",\n 'payload': '{\"command\":\"week_day\"}'\n },\n \"color\": \"negative\"},\n {\"action\": {\"type\": \"text\", \"label\": \"Пятница\",\n 'payload': '{\"command\":\"week_day\"}'\n },\n \"color\": \"negative\"}\n ], [{\"action\": {\"type\": \"text\", \"label\": \"Вернуться в меню\",\n 'payload': '{\"command\":\"menu\"}'\n },\n \"color\": \"positive\"}]]}))\n\n\ndef send_menu():\n \"\"\"\n Функция отправки основного меню для пользователя ( in-line keyboard)\n \"\"\"\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=\"Выберите один из предложенных вариантов дальнейшей работы\",\n random_id=random.randint(0, 2 ** 64),\n keyboard=json.dumps({\"one_time\": True, \"buttons\": [\n [{\"action\": {\"type\": \"text\", \"label\": \"Расписание\",\n 'payload': '{\"command\":\"schedule\"}'},\n \"color\": \"negative\"}, {\"action\": {\"type\": \"text\", \"label\": \"Расписание звонков\",\n 'payload': '{\"command\":\"schedule_calls\"}'},\n \"color\": \"negative\"}],\n [{\"action\": {\"type\": \"text\", \"label\": \"Новости\", 'payload': '{\"command\":\"news\"}'},\n \"color\": \"negative\"}\n ]]}))\n\n\nfor event in longpoll.listen():\n if event.type == VkBotEventType.MESSAGE_NEW:\n # Если сообщение отправлено с кнопки клавиатуры бота, т.е существует ключ \"payload\",\n # то обрабатываем запрос с помощью него\n if \"payload\" in event.object[\"message\"]:\n if eval(event.object[\"message\"][\"payload\"])[\"command\"] == \"start\":\n send_menu()\n elif eval(event.object[\"message\"][\"payload\"])[\"command\"] == \"schedule\":\n # Начало обработки запроса на получение расписания уроков\n send_weekdays()\n cur_requests[event.obj.message['from_id']] = {\"action\": \"schedule\"}\n elif eval(event.object[\"message\"][\"payload\"])[\"command\"] == \"schedule_calls\":\n # Начало обработки запроса на получение расписания звонков\n send_weekdays()\n cur_requests[event.obj.message['from_id']] = {\"action\": \"schedule_calls\"}\n elif eval(event.object[\"message\"][\"payload\"])[\"command\"] == \"week_day\":\n # если после отправки дня недели выбрано действие расписания звонков, то класс запрашивать не нужно и\n # отправка расписания происходит сразу после получения сообщения о дне недели\n if cur_requests[event.obj.message['from_id']][\"action\"] == \"schedule_calls\":\n cur_requests[event.obj.message['from_id']][\"weekday\"] = event.object[\"message\"][\"text\"].strip(\n \"\\n\").strip().split()\n try:\n all_schedules = get('http://127.0.0.1:5000/api/get/schedule_calls').json()[\"schedule calls\"]\n needed = list(\n filter(\n lambda schedule: schedule[\"weekday\"] ==\n cur_requests[event.obj.message['from_id']][\n \"weekday\"][0].lower(),\n all_schedules))[0]\n message = \"\"\n for i in range(1, len(needed[\"schedule\"].split(\"\\n\")) + 1):\n message += \"{}. {}\\n\".format(i, needed['schedule'].split('\\n')[i - 1])\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=message,\n random_id=random.randint(0, 2 ** 64))\n cur_requests[event.obj.message['from_id']] = {}\n send_menu()\n except Exception as x:\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=f\"Ошибка {x}\\nКласс не найден или расписания не существует.\",\n random_id=random.randint(0, 2 ** 64))\n cur_requests[event.obj.message['from_id']] = {}\n send_menu()\n else:\n # Если же запрос не расписания звонков, то происходит запрос класса у пользователя и день недели\n # записывается в словарь с ключем weekday\n\n if event.obj.message['from_id'] not in cur_requests:\n cur_requests[event.obj.message['from_id']] = {}\n cur_requests[event.obj.message['from_id']][\"weekday\"] = event.object[\"message\"][\"text\"].strip(\n \"\\n\").strip().split()\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=\"Введите свой класс c буквой через пробел (Например: 7 А)\",\n random_id=random.randint(0, 2 ** 64))\n\n elif eval(event.object[\"message\"][\"payload\"])[\"command\"] == \"news\":\n try:\n news = get('http://127.0.0.1:5000/api/get/news').json()[\"news\"]\n if news:\n count = min(NEWS_COUNT, len(news))\n for i in range(count, 0, 1):\n message = \"\"\n message += f'{news[i][\"title\"]}\\n'\n message += news[i][\"data\"] + \"\\n\"\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=message,\n random_id=random.randint(0, 2 ** 64))\n else:\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=\"К сожалению, список новостей пуст\",\n random_id=random.randint(0, 2 ** 64))\n send_menu()\n\n except Exception as x:\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=f\"Ошибка {x}\\n\",\n random_id=random.randint(0, 2 ** 64))\n send_menu()\n elif eval(event.object[\"message\"][\"payload\"])[\"command\"] == \"menu\":\n send_menu()\n\n else:\n message = event.object[\"message\"][\"text\"].strip(\"\\n\").strip().split()\n if event.obj.message['from_id'] in cur_requests:\n if \"action\" not in cur_requests[event.obj.message['from_id']]:\n cur_requests[event.obj.message['from_id']][\"action\"] = None\n if cur_requests[event.obj.message['from_id']][\"action\"] == \"schedule\":\n # Если действие - получение расписания (schedule) то происходит запрос к api и обрабатываются все\n # полученные расписания, которые удовлетворяют запросу ( день недели и выбранный класс)\n if \"grade\" not in cur_requests[event.obj.message['from_id']]:\n cur_requests[event.obj.message['from_id']][\"grade\"] = \"\".join(message).lower()\n try:\n all_schedules = get('http://127.0.0.1:5000/api/get/schedule').json()[\"schedule\"]\n if cur_requests[event.obj.message['from_id']][\"grade\"].lower() in [item[\"grade\"].lower() for\n item in all_schedules]:\n needed = list(\n filter(\n lambda schedule: schedule[\"weekday\"].lower() ==\n cur_requests[event.obj.message['from_id']][\n \"weekday\"][0].lower() and schedule[\n \"grade\"] == cur_requests[event.obj.message['from_id']][\n \"grade\"].lower(),\n all_schedules))[0]\n message = \"\"\n for i in range(1, len(needed[\"schedule\"].split(\"\\n\")) + 1):\n message += \"{}. {}\\n\".format(i, needed['schedule'].split('\\n')[i - 1])\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=message,\n random_id=random.randint(0, 2 ** 64))\n send_menu()\n else:\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=\"Класс не найден или расписания не существует.\",\n random_id=random.randint(0, 2 ** 64))\n cur_requests[event.obj.message['from_id']] = {}\n send_menu()\n except Exception as x:\n vk.messages.send(user_id=event.obj.message['from_id'],\n message=f\"Ошибка {x}\\nКласс не найден или расписания не существует.\",\n random_id=random.randint(0, 2 ** 64))\n cur_requests[event.obj.message['from_id']] = {}\n send_menu()\n","sub_path":"bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"426837583","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom scrapy.conf import settings\nfrom twisted.enterprise import adbapi\nimport pymysql\nfrom pymysql import cursors\n\n\nclass BoleSpiderPipeline(object):\n def process_item(self, item, spider):\n return item\n\n# class MysqlPipeline(object):\n# def __init__(self):\n# self.conn = pymysql.Connect(\n# host='127.0.0.1',\n# port=3306,\n# user='root',\n# passwd='123456',\n# db='jobbole_crawlspider',\n# charset='utf8',\n# )\n# self.cur = self.conn.cursor()\n#\n# def process_item(self, item, spider):\n# item_data=(item['title'],item['article_type'],item['url'],item['like_num'],item['collect'],item['comment'],)\n# sql = \"insert into jobbole(title,article_type,url,like_num,collect,comment)VALUES ('%s','%s','%s','%s','%s','%s')\" % (item_data)\n# self.cur.execute(sql)\n# self.conn.commit()\n# return item\n\nclass MysqlTwistedPipeline(object):\n def __init__(self):\n dbparms={\n 'host': settings['MYSQL_HOST'],\n 'port': settings['MYSQL_PORT'],\n 'user' :settings['MYSQL_USER'],\n 'password' : settings['MYSQL_PASSWORD'],\n 'database' : settings['MYSQL_DBNAME'],\n 'charset' : 'utf8',\n 'cursorclass':cursors.DictCursor\n }\n self.dbpool=adbapi.ConnectionPool('pymysql',**dbparms)\n\n def process_item(self,item,spider):\n query=self.dbpool.runInteraction(self.do_insert,item)\n query.addErrback(self.handle_error,spider)\n\n def handle_error(self,failure,spider):\n print(failure)\n\n def do_insert(self,cursor,item):\n item_data = (item['title'], item['article_type'], item['url'], item['like_num'], item['collect'], item['comment'])\n sql = \"insert into jobbole(title,article_type,url,like_num,collect,comment)VALUES (%s,%s,%s,%s,%s,%s)\"\n cursor.execute(sql,item_data)\n\n\n","sub_path":"bole_spider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"302213608","text":"import time\n\nimport digitalio\nimport analogio\nimport board\nimport busio\nimport supervisor\n\nimport capablerobot_usbhub\nimport capablerobot_tlc59116\nimport capablerobot_eeprom\nimport capablerobot_ucs2113\n\nboot_time = time.monotonic()\n\nled1 = digitalio.DigitalInOut(board.LED1)\nled1.switch_to_output(value=False)\n\nled2 = digitalio.DigitalInOut(board.LED2)\nled2.switch_to_output(value=True)\n\nled3 = digitalio.DigitalInOut(board.LED3)\nled3.switch_to_output(value=True)\n\nBEAT = 0.05\nLED_BRIGHT = 80\n\ndef stdout(*args):\n if supervisor.runtime.serial_connected:\n print(*args)\n\nstdout(\"... booted ...\")\n\ni2c1 = busio.I2C(board.SCL, board.SDA)\ni2c2 = busio.I2C(board.SCL2, board.SDA2)\n\nstdout(\"... configuring hub ...\")\nusb = capablerobot_usbhub.USBHub(i2c1, i2c2)\nucs = capablerobot_ucs2113.Ports(i2c1)\n\nstdout(\"... configuring leds ...\")\nBRIGHT = 20\nled_pwr = capablerobot_tlc59116.TLC59116(i2c1, 0x61, pwm=BRIGHT)\nled_data = capablerobot_tlc59116.TLC59116(i2c1, 0x62, pwm=BRIGHT)\n\neeprom = capablerobot_eeprom.EEPROM(i2c1, '24AA025E48')\nstdout()\nstdout(\"Unit SKU : %s\" % eeprom.sku)\nstdout(\" Serial : %s\" % eeprom.serial)\nstdout()\n\nexternal_heartbeat = False\n\n## Seconds that upstream link can be down before resetting the hub\nupstream_timeout = 30\nupstream_state = 'reset'\nupstream_last_time = boot_time\n\nwhile True:\n time.sleep(0.1)\n\n ## Internal heartbeat LED\n led3.value = not led3.value\n\n if external_heartbeat:\n if led3.value:\n led_data.aux(0)\n else:\n led_data.aux(250)\n\n data_state = usb.data_state()\n\n ## Set the data LEDs based on the detected per-port speeds\n for idx, speed in enumerate(usb.speeds):\n color = (0,0,0)\n\n if idx > 0 and data_state[idx-1] == False:\n ## If port data is disabled, light the LED orange\n color = (LED_BRIGHT,int(LED_BRIGHT/2),0)\n elif speed == 0b01:\n color = (0,0,LED_BRIGHT)\n elif speed == 0b10:\n color = (0,LED_BRIGHT,0)\n elif speed == 0b11:\n color = (LED_BRIGHT,LED_BRIGHT,LED_BRIGHT)\n\n if idx == 0:\n if speed == 0b00:\n ## If the upstream port is disconnected, light the \n ## LED red and record that the link is down\n color = (LED_BRIGHT,0,0)\n upstream_state = 'down'\n else:\n upstream_last_time = time.monotonic()\n upstream_state = 'up'\n\n led_data.rgb(idx, color, update=False)\n\n led_data.update()\n\n\n power_state = usb.power_state()\n\n ## Set the power LEDs based on the measured per-port current\n for idx, current in enumerate(ucs.currents(raw=True, rescale=2)):\n\n ## With rescaling, raw reading may be above 255 (max value for LED), so cap it\n if current > 255:\n current == 255\n\n if idx == 0:\n color = (0,0,int(current/4))\n else:\n if power_state[idx-1] == False:\n ## If port power is disabled, light the LED orange\n color = (LED_BRIGHT,int(LED_BRIGHT/2),0)\n else:\n ## Otherwise, light blue with intensity based on measured power draw\n color = (0,0,current)\n\n led_pwr.rgb(idx, color, update=False)\n\n led_pwr.update()\n\n if upstream_state == 'down' and time.monotonic() - upstream_last_time > upstream_timeout:\n stdout(\"--- RESET DUE TO LINK LOSS ---\")\n \n usb.reset()\n usb.configure()\n usb.set_mcp_config()\n\n ## Light the host data LED orange to show the reset is occuring\n led_data.rgb(0, (LED_BRIGHT,int(LED_BRIGHT/2),0), update=True)\n time.sleep(0.5)\n \n ## Reset the upstream timeout to ensure that the next\n ## reset can only occurs after the specified timeout\n upstream_state = 'reset'\n upstream_last_time = time.monotonic()\n\n\n\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"466006774","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-n_sfyb/Django/django/contrib/flatpages/templatetags/flatpages.py\n# Compiled at: 2019-02-14 00:35:16\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.flatpages.models import FlatPage\nfrom django.contrib.sites.shortcuts import get_current_site\nregister = template.Library()\n\nclass FlatpageNode(template.Node):\n\n def __init__(self, context_name, starts_with=None, user=None):\n self.context_name = context_name\n if starts_with:\n self.starts_with = template.Variable(starts_with)\n else:\n self.starts_with = None\n if user:\n self.user = template.Variable(user)\n else:\n self.user = None\n return\n\n def render(self, context):\n if 'request' in context:\n site_pk = get_current_site(context['request']).pk\n else:\n site_pk = settings.SITE_ID\n flatpages = FlatPage.objects.filter(sites__id=site_pk)\n if self.starts_with:\n flatpages = flatpages.filter(url__startswith=self.starts_with.resolve(context))\n if self.user:\n user = self.user.resolve(context)\n if not user.is_authenticated:\n flatpages = flatpages.filter(registration_required=False)\n else:\n flatpages = flatpages.filter(registration_required=False)\n context[self.context_name] = flatpages\n return ''\n\n\n@register.tag\ndef get_flatpages(parser, token):\n \"\"\"\n Retrieves all flatpage objects available for the current site and\n visible to the specific user (or visible to all users if no user is\n specified). Populates the template context with them in a variable\n whose name is defined by the ``as`` clause.\n\n An optional ``for`` clause can be used to control the user whose\n permissions are to be used in determining which flatpages are visible.\n\n An optional argument, ``starts_with``, can be applied to limit the\n returned flatpages to those beginning with a particular base URL.\n This argument can be passed as a variable or a string, as it resolves\n from the template context.\n\n Syntax::\n\n {% get_flatpages ['url_starts_with'] [for user] as context_name %}\n\n Example usage::\n\n {% get_flatpages as flatpages %}\n {% get_flatpages for someuser as flatpages %}\n {% get_flatpages '/about/' as about_pages %}\n {% get_flatpages prefix as about_pages %}\n {% get_flatpages '/about/' for someuser as about_pages %}\n \"\"\"\n bits = token.split_contents()\n syntax_message = \"%(tag_name)s expects a syntax of %(tag_name)s ['url_starts_with'] [for user] as context_name\" % dict(tag_name=bits[0])\n if len(bits) >= 3 and len(bits) <= 6:\n if len(bits) % 2 == 0:\n prefix = bits[1]\n else:\n prefix = None\n if bits[(-2)] != 'as':\n raise template.TemplateSyntaxError(syntax_message)\n context_name = bits[(-1)]\n if len(bits) >= 5:\n if bits[(-4)] != 'for':\n raise template.TemplateSyntaxError(syntax_message)\n user = bits[(-3)]\n else:\n user = None\n return FlatpageNode(context_name, starts_with=prefix, user=user)\n else:\n raise template.TemplateSyntaxError(syntax_message)\n return","sub_path":"pycfiles/djx-0.0.4-py2-none-any/flatpages.py","file_name":"flatpages.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"202119934","text":"'''Given two data sets of distance-versus-time data, one with very small velocity and one with large velocity. Notice that both may have the same standard error of slope, but different R-squared for the model overall, depending on the size of the slope (\"effect size\") as compared to the standard error (\"uncertainty\").\n\nIf we plot both data sets as scatter plots on the same axes, the contrast is clear. Variation due to the slope is different than variantion due to the random scatter about the trend line. In this exercise, your goal is to compute the standard error and R-squared for two data sets and compare.'''\n#TASK\n# Build and fit() an ols() model, for both data sets distances1 and distances2\n# Use the .bse of resulting models model_1 and model_2, and the 'times' key to extract the standard error values for the slope from each model.\n# Use the .rsquared attribute to extract the R-squared value from each model.\n# Print the resulting se_1, rsquared_1, se_2, rsquared_2, and visually compare.\n\n# Build and fit two models, for columns distances1 and distances2 in df\nmodel_1 = ols(formula=\"____ ~ times\", data=df).____()\nmodel_2 = ols(formula=\"____ ~ times\", data=df).____()\n\n# Extract R-squared for each model, and the standard error for each slope\nse_1 = model_1.____['times']\nse_2 = model_2.____['times']\nrsquared_1 = model_1.____\nrsquared_2 = model_2.____\n\n# Print the results\nprint('Model 1: SE = {:0.3f}, R-squared = {:0.3f}'.format(____, ____))\nprint('Model 2: SE = {:0.3f}, R-squared = {:0.3f}'.format(____, ____))\n\n\n\n\n\n\n\n#SOLUTION\n# Build and fit two models, for columns distances1 and distances2 in df\nmodel_1 = ols(formula=\"distances1 ~ times\", data=df).fit()\nmodel_2 = ols(formula=\"distances2 ~ times\", data=df).fit()\n\n# Extract R-squared for each model, and the standard error for each slope\nse_1 = model_1.bse['times']\nse_2 = model_2.bse['times']\nrsquared_1 = model_1.rsquared\nrsquared_2 = model_2.rsquared\n\n# Print the results\nprint('Model 1: SE = {:0.3f}, R-squared = {:0.3f}'.format(se_1, rsquared_1))\nprint('Model 2: SE = {:0.3f}, R-squared = {:0.3f}'.format(se_2, rsquared_2))","sub_path":"DataCamp_Introduction_to_Linear_Modeling_in_Python/3.4.2.Variation_in_Two_Parts.py","file_name":"3.4.2.Variation_in_Two_Parts.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"271079256","text":"\nimport imageio\n\nPATH = 'vortex/'\nimages = []\n\nfor i in range(15):\n images.append(imageio.imread(PATH + 'vortex_0.png'))\n\nfor i in range(0, 361, 3):\n filename = 'vortex_{}.png'.format(i)\n images.append(imageio.imread(PATH + filename))\n\nimageio.mimsave('vortex.gif', images, fps=20)\n","sub_path":"numpy/make_gif.py","file_name":"make_gif.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"366945678","text":"from torchtext.utils import unicode_csv_reader\nfrom torchtext.data.datasets_utils import RawTextIterableDataset\nfrom torchtext.data.datasets_utils import wrap_split_argument\nfrom torchtext.data.datasets_utils import add_docstring_header\nfrom torchtext.data.datasets_utils import download_extract_validate\nimport io\nimport os\nimport logging\n\nURL = 'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM'\n\nMD5 = 'fe39f8b653cada45afd5792e0f0e8f9b'\n\nNUM_LINES = {\n 'train': 3600000,\n 'test': 400000,\n}\n\n_PATH = 'amazon_review_polarity_csv.tar.gz'\n\n_EXTRACTED_FILES = {\n 'train': 'amazon_review_polarity_csv/train.csv',\n 'test': 'amazon_review_polarity_csv/test.csv'\n}\n\n_EXTRACTED_FILES_MD5 = {\n 'train': \"520937107c39a2d1d1f66cd410e9ed9e\",\n 'test': \"f4c8bded2ecbde5f996b675db6228f16\"\n}\n\n\n@add_docstring_header(num_lines=NUM_LINES)\n@wrap_split_argument(('train', 'test'))\ndef AmazonReviewPolarity(root, split):\n def _create_data_from_csv(data_path):\n with io.open(data_path, encoding=\"utf8\") as f:\n reader = unicode_csv_reader(f)\n for row in reader:\n yield int(row[0]), ' '.join(row[1:])\n path = download_extract_validate(root, URL, MD5, os.path.join(root, _PATH), os.path.join(root, _EXTRACTED_FILES[split]),\n _EXTRACTED_FILES_MD5[split], hash_type=\"md5\")\n logging.info('Creating {} data'.format(split))\n return RawTextIterableDataset(\"AmazonReviewPolarity\", NUM_LINES[split],\n _create_data_from_csv(path))\n","sub_path":"torchtext/datasets/amazonreviewpolarity.py","file_name":"amazonreviewpolarity.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"92218177","text":"import json\nimport logging\nfrom typing import Tuple\nfrom urllib import parse as urlparse\n\nimport feedparser\nimport requests\nfrom bs4 import BeautifulSoup\nfrom marshmallow import Schema, fields, post_load\n\nlogger = logging.getLogger('feedfinder4')\n\n\nclass FeedInfoSchema(Schema):\n url = fields.Url()\n site_url = fields.Url(allow_none=True)\n title = fields.String(allow_none=True)\n description = fields.String(allow_none=True)\n site_name = fields.String(allow_none=True)\n site_icon_url = fields.Url(allow_none=True)\n subscribed = fields.Boolean(allow_none=True)\n hub = fields.Url(allow_none=True)\n\n @post_load\n def make_feed_info(self, data):\n return FeedInfo(**data)\n\n\nclass FeedInfo:\n def __init__(self,\n url: str = None,\n site_url: str = None,\n title: str = None,\n description: str = None,\n site_name: str = None,\n site_icon_url: str = None,\n hub: str = None,\n subscribed: bool = False,\n is_push: bool = False) -> None:\n self.url = url\n self.site_url = site_url\n self.title = title\n self.description = description\n self.site_name = site_name\n self.site_icon_url = site_icon_url\n self.hub = hub\n self.subscribed = subscribed\n self.is_push = is_push\n\n def __repr__(self):\n return 'FeedInfo: {0}'.format(self.url)\n\n def __eq__(self, other):\n return self.url == other.url\n\n def __hash__(self):\n return hash(self.url)\n\n def get_info(self, text: str=None, soup=None, finder=None):\n if finder:\n self.finder = finder\n\n if text:\n parsed = self.parse_feed(text)\n self.title = self.feed_title(parsed)\n self.description = self.feed_description(parsed)\n self.hub, self_url = self.pubsubhubbub_links(parsed)\n if self.hub and self_url:\n self.is_push = True\n\n if self_url and self.finder is not None:\n if self_url != self.url:\n text = self.finder.is_feed(self_url)\n if text:\n self.url = self_url\n return self.get_info(text, soup)\n\n if soup:\n self.site_name = self.find_site_name(soup)\n self.site_url = self.find_site_url(soup, self.site_url)\n domain = self.domain(self.site_url)\n self.site_icon_url = self.find_site_icon_url(soup, domain)\n\n @staticmethod\n def parse_feed(text: str) -> dict:\n return feedparser.parse(text)\n\n @staticmethod\n def feed_title(parsed: dict)-> str:\n feed = parsed.get('feed', None)\n title = feed.get('title', None)\n if not title:\n return ''\n return FeedInfo.clean_title(title)\n\n @staticmethod\n def clean_title(title: str) -> str:\n try:\n title = BeautifulSoup(title, 'html.parser').get_text()\n if len(title) > 1024:\n title = title[:1020] + u'...'\n return title\n except Exception as e:\n logging.exception(u'Failed to clean title: {0}'.format(e))\n return ''\n\n @staticmethod\n def feed_description(parsed: dict) -> str:\n feed = parsed.get('feed', None)\n subtitle = feed.get('subtitle', None)\n if subtitle:\n return subtitle\n else:\n return feed.get('description', None)\n\n @staticmethod\n def find_site_name(soup) -> str:\n site_name_meta = [\n 'og:site_name',\n 'og:title',\n 'application:name',\n 'twitter:app:name:iphone'\n ]\n\n for p in site_name_meta:\n try:\n name = soup.find(name='meta', property=p).get('content')\n if name:\n return name\n except AttributeError:\n pass\n return ''\n\n @staticmethod\n def find_site_url(soup, url: str) -> str:\n canonical = soup.find(name='link', rel='canonical')\n try:\n site = canonical.get('href')\n if site:\n return site\n except AttributeError:\n pass\n\n meta = soup.find(name='meta', property='og:url')\n try:\n site = meta.get('content')\n if site:\n return site\n except AttributeError:\n return url\n\n def find_site_icon_url(self, soup, url) -> str:\n icon_rel = ['apple-touch-icon', 'shortcut icon', 'icon']\n\n icon = ''\n for r in icon_rel:\n rel = soup.find(name='link', rel=r)\n if rel:\n icon = rel.get('href', None)\n if icon[0] == '/':\n icon = '{0}{1}'.format(url, icon)\n if icon == 'favicon.ico':\n icon = '{0}/{1}'.format(url, icon)\n if not icon:\n send_url = url + '/favicon.ico'\n print('Trying url {0} for favicon'.format(send_url))\n r = self.finder.get_url(send_url)\n if r:\n print('Received url {0}'.format(r.url))\n if r.status_code == requests.codes.ok:\n icon = r.url\n return icon\n\n @staticmethod\n def domain(url: str) -> str:\n parsed = urlparse.urlparse(url)\n domain = '{uri.scheme}://{uri.netloc}'.format(uri=parsed)\n return domain\n\n @staticmethod\n def pubsubhubbub_links(parsed: dict) -> Tuple[str, str]:\n \"\"\"\n Returns a tuple containing the hub url and the self url for\n a parsed feed.\n\n :param parsed: An RSS feed parsed by feedparser\n :type parsed: dict\n :return: tuple\n \"\"\"\n\n hub_url = None\n self_url = None\n autodiscovery_url = None\n\n feed = parsed.get('feed', None)\n links = feed.get('links', None)\n if links is None:\n logger.warning(u'No feed links found')\n return '', ''\n\n try:\n for link in links:\n if link['rel'] == 'hub':\n hub_url = link['href']\n if link['rel'] == 'self':\n self_url = link['href']\n if link.get('id', None) == 'auto-discovery':\n autodiscovery_url = link['href']\n except AttributeError as e:\n logger.warning(u'Attribute Error getting feed links: {0}'\n .format(e))\n return '', ''\n\n if not hub_url and autodiscovery_url:\n return FeedInfo.pubsubhubbub_links(autodiscovery_url)\n\n return hub_url, self_url\n\n def serialize(self):\n return json.dumps(\n self, default=lambda o: o.__dict__, sort_keys=True, indent=4)","sub_path":"feedrsub/feeds/feedfinder/feedinfo.py","file_name":"feedinfo.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"259741194","text":"import os\n\nimport boto3\nimport json\n\nimport botocore\nfrom responses import logger\n\n# 制限付きURLの制限時間\nEXPIRES_IN = os.environ[\"EXPIRES_IN\"]\n# QRコード画像ファイル名\nQRIMG_JPG = os.environ[\"QRIMG_JPG\"]\n# クーポン画像ファイル名\nIMG_PNG = os.environ[\"IMG_PNG\"]\n# 画像管理S3バケット名\nBUCKET_NAME = os.environ[\"BUCKET_NAME\"]\n# クーポンデータ格納DynamoDBテーブル名\nTABLE_NAME = os.environ[\"TABLE_NAME\"]\n\nNO_DATA = \"no_data\"\nITEM = \"Item\"\n\ndynamodb = boto3.resource('dynamodb')\ns3 = boto3.client('s3')\n\n\ndef lambda_handler(event, context):\n try:\n # URLからIDを取得\n item_key = event['pathParameters']['id']\n # DynamoDBからデータ取得\n item = get_coupon_data(item_key, TABLE_NAME)\n if item.get(\"error_message\") is None:\n # S3からクーポン画像のURLを取得\n coupon_img_url = create_presigned_url(BUCKET_NAME, item_key + \"/\" + IMG_PNG)\n # S3からQRコード画像のURLを取得\n qr_img_url = create_presigned_url(BUCKET_NAME, item_key + \"/\" + QRIMG_JPG)\n\n body = {\n \"Item\": item,\n \"coupon_img_url\": coupon_img_url,\n \"qr_img_url\": qr_img_url,\n }\n\n else:\n body = {\"Item\": item}\n\n except Exception as e:\n logger.error(e)\n raise e\n\n ret = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n }\n print(ret)\n return ret\n\n\ndef get_coupon_data(key, table_name):\n \"\"\"\n クーポン情報をDynamoDBから取得\n :param key: ID\n :param table_name テーブル名\n :return: ItemJSON\n \"\"\"\n table = dynamodb.Table(table_name)\n coupon_data = table.get_item(\n Key={\n \"id\": key\n }\n )\n if coupon_data.get(ITEM) is None:\n return {\"error_message\": NO_DATA}\n else:\n return coupon_data.get(ITEM)\n\n\ndef create_presigned_url(bucket_name, key):\n \"\"\"\n s3から時間制限付きのURLを取得\n :param bucket_name: バケット名\n :param key: ファイルパス\n :return: 制限時間付きURL。オブジェクトが存在しない場合はNone\n \"\"\"\n try:\n s3.get_object(\n Bucket=bucket_name,\n Key=key\n )\n return s3.generate_presigned_url(\n ClientMethod='get_object',\n Params={'Bucket': bucket_name, 'Key': key},\n ExpiresIn=EXPIRES_IN,\n HttpMethod='GET')\n except botocore.exceptions.ClientError as e:\n logger.warning(e)\n return None\n","sub_path":"functions/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"584011786","text":"# The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3.\n\n# Find the sum of the only eleven primes that are both truncatable from left to right and right to left.\n\n# NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.\n\n# Answer: 748317\n# Elapsed time: 267.5355 seconds\n\nfrom euler import is_prime, load_known_primes\nfrom timer import timer\n\n\ndef _is_prime(number, primes=[]):\n if not len(primes):\n return is_prime(number)\n\n largest_prime = max(primes)\n if number <= largest_prime:\n return number in primes\n else:\n return is_prime(number)\n\n\ndef pop_string(number, primes):\n string = str(number)\n length = len(string)\n i = 0\n while i <= length:\n popped = string[i:]\n popped_int = int(popped)\n if not _is_prime(popped_int, primes):\n return False\n i += 1\n return True\n\n\ndef is_truncatable_prime(number, primes):\n string = str(number)\n length = len(string)\n if length < 2:\n return False\n i = 1\n while i < length:\n popped = string[i:]\n try:\n popped_int = int(popped)\n if not _is_prime(popped_int, primes):\n return False\n except ValueError as e:\n print(e)\n print(f\"number: {number}, length: {length}, i: {i}, popped: {popped}\")\n finally:\n i += 1\n\n i = 1\n while i < length:\n popped = string[:-i]\n try:\n popped_int = int(popped)\n if not _is_prime(popped_int, primes):\n return False\n except ValueError as e:\n print(e)\n print(f\"number: {number}, length: {length}, i: {i}, popped: {popped}\")\n finally:\n i += 1\n\n return True\n\n\n@timer\ndef main():\n known_primes = load_known_primes()\n truncatable_primes = []\n for prime in known_primes:\n if is_truncatable_prime(prime, known_primes):\n truncatable_primes.append(prime)\n if len(truncatable_primes) >= 11:\n return sum(truncatable_primes)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Python Scripts/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"479357541","text":"from multiprocessing import Pool\nfrom time import sleep,ctime\ndef fun(mes):\n sleep(2)\n print(ctime(),mes)\n return 8888\npool = Pool(4)\n#向进程池中添加事件\nfor i in range(10):\n mes = 'neba%d'%i\n #r是返回函数事件的返回值\n r = pool.apply_async(func=fun,args=(mes,))\n#关闭进程池\npool.close()\n#回收进程,父进程等待子进程一同完成\npool.join()\n#获取进程事件的返回值\nprint(r.get())","sub_path":"month02/day07/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"518203233","text":"# https://atcoder.jp/contests/abc146/tasks/abc146_a\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda :sys.stdin.readline().rstrip()\ndef resolve():\n S=input()\n if(S==\"SUN\"):\n print(7)\n elif(S==\"MON\"):\n print(6)\n elif(S==\"TUE\"):\n print(5)\n elif(S==\"WED\"):\n print(4)\n elif(S==\"THU\"):\n print(3)\n elif(S==\"FRI\"):\n print(2)\n else:\n print(1)\nresolve()\n","sub_path":"ABC146/a_can't_wait_for_holiday.py","file_name":"a_can't_wait_for_holiday.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"429792541","text":"import pygame\r\n\r\n\r\ndef draw_board():\r\n for r in range(squares):\r\n for c in range(squares):\r\n color = colors[(r + c) % 2]\r\n screen.blit(color, pygame.Rect(r * square_size, c * square_size, square_size, square_size))\r\n\r\n\r\ndef draw_pieces():\r\n first = ''\r\n second = ''\r\n for r in range(squares):\r\n if r in [0, 1]:\r\n first = 'w'\r\n elif r in [6, 7]:\r\n first = 'b'\r\n for c in range(squares):\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pygame.init()\r\n width = height = 800\r\n screen = pygame.display.set_mode((width, height))\r\n screen.fill(pygame.Color(\"white\"))\r\n colors = [pygame.image.load(\"Images/White.png\"), pygame.image.load(\"Images/Black.png\")]\r\n squares = 8\r\n square_size = height // squares\r\n for item in colors:\r\n pygame.transform.scale(item, (square_size, square_size))\r\n pieces = ['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R']\r\n draw_board()\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n pygame.display.flip()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"223751645","text":"##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##\n## ##\n## This file forms part of the Badlands surface processes modelling application. ##\n## ##\n## For full license and copyright information, please refer to the LICENSE.md file ##\n## located at the project root, or contact the authors. ##\n## ##\n##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##\n\"\"\"\nThis module defines several functions used to define carbonate evolution in Badlands\nsimulation based on 3 forcing parameters: depth, wave and sedimentation rate.\n\"\"\"\nimport os\nimport time\nimport numpy\nimport pandas\nimport triangle\nimport mpi4py.MPI as mpi\nfrom pyBadlands.libUtils import ORmodel\nfrom scipy.ndimage.filters import gaussian_filter\nfrom scipy import interpolate\nfrom scipy.spatial import cKDTree\nfrom collections import OrderedDict\nfrom matplotlib import contour as cntr\n\nclass carbGrowth:\n \"\"\"\n This class defines external carbonate growth parameters.\n \"\"\"\n def __init__(self, input=None, regX=None, regY=None, boundsPt=None):\n\n self.regX = regX\n self.regY = regY\n self.boundsPt = boundsPt\n self.tXY = None\n\n self.growth = input.carbGrowth\n self.depthfile = input.carbDepth\n self.sedfile = input.carbSed\n self.wavefile = input.carbWave\n\n self.growth2 = input.carbGrowth2\n self.depthfile2 = input.carbDepth2\n self.sedfile2 = input.carbSed2\n self.wavefile2 = input.carbWave2\n\n self.depthval = None\n self.depthfct = None\n self.depthFunc = None\n self.sedval = None\n self.sedfct = None\n self.sedFunc = None\n self.waveval = None\n self.wavefct = None\n self.waveFunc = None\n\n self.depthval2 = None\n self.depthfct2 = None\n self.depthFunc2 = None\n self.sedval2 = None\n self.sedfct2 = None\n self.sedFunc2 = None\n self.waveval2 = None\n self.wavefct2 = None\n self.waveFunc2 = None\n\n self.sedgrowth = None\n self.depthgrowth = None\n self.wavegrowth = None\n\n self.sedgrowth2 = None\n self.depthgrowth2 = None\n self.wavegrowth2 = None\n\n self.mlen = input.islandPerim\n self.mdist = input.coastdist\n\n self.mlen2 = input.islandPerim2\n self.mdist2 = input.coastdist2\n\n self.Afactor = input.Afactor\n self.tree = None\n self.dx = None\n self.nx = None\n self.ny = None\n self.xi = None\n self.yi = None\n self.distances = None\n self.indices = None\n\n self.tinBase1 = None\n self.baseMap = input.baseMap\n\n self.tinBase2 = None\n self.baseMap2 = input.baseMap2\n\n if self.depthfile != None:\n self._build_depth_function(1)\n if self.sedfile != None:\n self._build_sed_function(1)\n if self.wavefile != None:\n self._build_wave_function(1)\n\n if self.depthfile2 != None:\n self._build_depth_function(2)\n if self.sedfile2 != None:\n self._build_sed_function(2)\n if self.wavefile2 != None:\n self._build_wave_function(2)\n\n def build_basement(self,tXY,id):\n \"\"\"\n Using Pandas library to read the basement map file and define consolidated and\n soft sediment region.\n \"\"\"\n self.tXY = tXY\n\n # Read basement file\n if id == 1:\n self.tinBase1 = numpy.ones(len(tXY))\n Bmap = pandas.read_csv(str(self.baseMap), sep=r'\\s+', engine='c',\n header=None, na_filter=False, dtype=numpy.float, low_memory=False)\n\n rectBase = numpy.reshape(Bmap.values,(len(self.regX), len(self.regY)),order='F')\n self.tinBase1[self.boundsPt:] = interpolate.interpn( (self.regX, self.regY), rectBase,\n tXY[self.boundsPt:,:], method='linear')\n elif id == 2:\n self.tinBase2 = numpy.ones(len(tXY))\n Bmap = pandas.read_csv(str(self.baseMap2), sep=r'\\s+', engine='c',\n header=None, na_filter=False, dtype=numpy.float, low_memory=False)\n\n rectBase = numpy.reshape(Bmap.values,(len(self.regX), len(self.regY)),order='F')\n self.tinBase2[self.boundsPt:] = interpolate.interpn( (self.regX, self.regY), rectBase,\n tXY[self.boundsPt:,:], method='linear')\n\n return\n\n def _build_depth_function(self,id):\n \"\"\"\n Using Pandas library to read the depth control file and define depth interpolation\n function based on Scipy 1D linear function.\n \"\"\"\n\n # Read depth control file\n if id == 1:\n depthdata = pandas.read_csv(self.depthfile, sep=r'\\s+', engine='c',\n header=None, na_filter=False,\n dtype=numpy.float, low_memory=False)\n\n self.depthval = numpy.zeros(len(depthdata.values[:,0])+2)\n self.depthfct = numpy.zeros(len(self.depthval))\n\n self.depthval[1:-1] = depthdata.values[:,0]\n self.depthfct[1:-1] = depthdata.values[:,1]\n self.depthval[0] = -1.0e7\n self.depthfct[0] = self.depthfct[1]\n self.depthval[-1] = 1.e7\n self.depthfct[-1] = self.depthfct[-2]\n\n self.depthFunc = interpolate.interp1d(self.depthval, self.depthfct, kind='linear')\n\n if id == 2:\n depthdata = pandas.read_csv(self.depthfile2, sep=r'\\s+', engine='c',\n header=None, na_filter=False,\n dtype=numpy.float, low_memory=False)\n\n self.depthval2 = numpy.zeros(len(depthdata.values[:,0])+2)\n self.depthfct2 = numpy.zeros(len(self.depthval))\n\n self.depthval2[1:-1] = depthdata.values[:,0]\n self.depthfct2[1:-1] = depthdata.values[:,1]\n self.depthval2[0] = -1.0e7\n self.depthfct2[0] = self.depthfct2[1]\n self.depthval2[-1] = 1.e7\n self.depthfct2[-1] = self.depthfct2[-2]\n\n self.depthFunc2 = interpolate.interp1d(self.depthval2, self.depthfct2, kind='linear')\n\n def _build_sed_function(self,id):\n \"\"\"\n Using Pandas library to read the sedimentation control file and define sedimentation interpolation\n function based on Scipy 1D linear function.\n \"\"\"\n\n # Read sedimentation rate file\n if id == 1:\n seddata = pandas.read_csv(self.sedfile, sep=r'\\s+', engine='c',\n header=None, na_filter=False,\n dtype=numpy.float, low_memory=False)\n\n self.sedval = numpy.zeros(len(seddata.values[:,0])+2)\n self.sedfct = numpy.zeros(len(self.sedval))\n\n self.sedval[1:-1] = seddata.values[:,0]\n self.sedfct[1:-1] = seddata.values[:,1]\n self.sedval[0] = -1.0e7\n self.sedfct[0] = self.sedfct[1]\n self.sedval[-1] = 1.e7\n self.sedfct[-1] = self.sedfct[-2]\n\n self.sedFunc = interpolate.interp1d(self.sedval, self.sedfct, kind='linear')\n\n if id == 2:\n seddata = pandas.read_csv(self.sedfile2, sep=r'\\s+', engine='c',\n header=None, na_filter=False,\n dtype=numpy.float, low_memory=False)\n\n self.sedval2 = numpy.zeros(len(seddata.values[:,0])+2)\n self.sedfct2 = numpy.zeros(len(self.sedval))\n\n self.sedval2[1:-1] = seddata.values[:,0]\n self.sedfct2[1:-1] = seddata.values[:,1]\n self.sedval2[0] = -1.0e7\n self.sedfct2[0] = self.sedfct2[1]\n self.sedval2[-1] = 1.e7\n self.sedfct2[-1] = self.sedfct2[-2]\n\n self.sedFunc2 = interpolate.interp1d(self.sedval2, self.sedfct2, kind='linear')\n\n def _build_wave_function(self, id):\n \"\"\"\n Using Pandas library to read the wave control file and define wave interpolation\n function based on Scipy 1D linear function.\n \"\"\"\n\n # Read wave control file\n if id == 1:\n wavedata = pandas.read_csv(self.wavefile, sep=r'\\s+', engine='c',\n header=None, na_filter=False,\n dtype=numpy.float, low_memory=False)\n\n self.waveval = numpy.zeros(len(wavedata.values[:,0])+2)\n self.wavefct = numpy.zeros(len(self.waveval))\n\n self.waveval[1:-1] = wavedata.values[:,0]\n self.wavefct[1:-1] = wavedata.values[:,1]\n self.waveval[0] = -1.0e7\n self.wavefct[0] = self.wavefct[1]\n self.waveval[-1] = 1.e7\n self.wavefct[-1] = self.wavefct[-2]\n\n self.waveFunc = interpolate.interp1d(self.waveval, self.wavefct, kind='linear')\n\n if id == 2:\n wavedata = pandas.read_csv(self.wavefile2, sep=r'\\s+', engine='c',\n header=None, na_filter=False,\n dtype=numpy.float, low_memory=False)\n\n self.waveval2 = numpy.zeros(len(wavedata.values[:,0])+2)\n self.wavefct2 = numpy.zeros(len(self.waveval2))\n\n self.waveval2[1:-1] = wavedata.values[:,0]\n self.wavefct2[1:-1] = wavedata.values[:,1]\n self.waveval2[0] = -1.0e7\n self.wavefct2[0] = self.wavefct2[1]\n self.waveval2[-1] = 1.e7\n self.wavefct2[-1] = self.wavefct2[-2]\n\n self.waveFunc2 = interpolate.interp1d(self.waveval2, self.wavefct2, kind='linear')\n\n def _getWaveFct(self, wavefield, id):\n \"\"\"\n Computes for a given wave field to carbonate wave dependent growth function.\n\n Parameters\n ----------\n wavefield : numpy array containing wave height.\n \"\"\"\n\n if id == 1:\n if self.wavefile == None:\n self.wavegrowth = numpy.ones(len(wavefield))\n else:\n self.wavegrowth = self.waveFunc(wavefield)\n\n if id == 2:\n if self.wavefile2 == None:\n self.wavegrowth2 = numpy.ones(len(wavefield))\n else:\n self.wavegrowth2 = self.waveFunc2(wavefield)\n\n return\n\n def _getSedFct(self, sedfield, id):\n \"\"\"\n Computes for a given sedimentation rate dependent growth function.\n\n Parameters\n ----------\n wavefield : numpy array containing sedimentation rate.\n \"\"\"\n\n if id == 1:\n if self.sedfile == None:\n self.sedgrowth = numpy.ones(len(sedfield))\n else:\n self.sedgrowth = self.sedFunc(sedfield)\n\n if id == 2:\n if self.sedfile2 == None:\n self.sedgrowth2 = numpy.ones(len(sedfield))\n else:\n self.sedgrowth2 = self.sedFunc2(sedfield)\n\n return\n\n def _getDepthFct(self, depthfield, id):\n \"\"\"\n Computes for a given depth field to carbonate wave dependent growth function.\n\n Parameters\n ----------\n depthfield : numpy array containing depth.\n \"\"\"\n\n if id == 1:\n if self.depthfile == None:\n self.depthgrowth = numpy.ones(len(depthfield))\n else:\n self.depthgrowth = self.depthFunc(-depthfield)\n\n if id == 2:\n if self.depthfile2 == None:\n self.depthgrowth2 = numpy.ones(len(depthfield))\n else:\n self.depthgrowth2 = self.depthFunc2(-depthfield)\n\n return\n\n def computeShoreline(self,z,lvl=0.):\n \"\"\"\n This function computes the shoreline position for a given sea-level.\n Parameters\n ----------\n variable: z\n Mesh relative elevation to sea-level.\n variable: lvl\n Water level defined in the input.\n \"\"\"\n\n c = cntr.Cntr(self.xi, self.yi, z)\n contour = c.trace(lvl)\n\n nseg = len(contour) // 2\n contours, codes = contour[:nseg], contour[nseg:]\n contourList = []\n start = True\n\n # Loop through each contour\n for c in range(len(contours)):\n tmpts = contours[c]\n closed = False\n if tmpts[0,0] == tmpts[-1,0] and tmpts[0,1] == tmpts[-1,1]:\n closed = True\n\n # Remove duplicate points\n unique = OrderedDict()\n for p in zip(tmpts[:,0], tmpts[:,1]):\n unique.setdefault(p[:2], p)\n pts = numpy.asarray(unique.values())\n\n if closed:\n cpts = numpy.zeros((len(pts)+1,2), order='F')\n cpts[0:len(pts),0:2] = pts\n cpts[-1,0:2] = pts[0,0:2]\n\n # Get contour length\n arr = cpts\n val = (arr[:-1,:] - arr[1:,:]).ravel()\n dist = val.reshape((arr.shape[0]-1,2))\n lgth = numpy.sum(numpy.sqrt(numpy.sum(dist**2, axis=1)))\n else:\n lgth = 1.e8\n cpts = pts\n\n if len(cpts) > 2 and lgth > self.mlen:\n contourList.append(cpts)\n if start:\n contourPts = cpts\n start = False\n else:\n contourPts = numpy.concatenate((contourPts,cpts))\n\n return contourPts\n\n def oceanIDs(self,xy,depthfield):\n\n tree = cKDTree(xy)\n distances, indices = tree.query(self.tXY, k=1)\n seaIDs = numpy.where(numpy.logical_and(distances[:]>=self.mdist,depthfield<=0.))[0]\n\n return seaIDs\n\n def buildReg(self,tXY):\n \"\"\"\n Build regular grid for shoreline contour calculation.\n \"\"\"\n\n self.tXY = tXY\n self.tree = cKDTree(self.tXY)\n self.dx = (self.tXY[1,0] - self.tXY[0,0])*self.Afactor\n\n if self.nx is None:\n self.nx = int((self.tXY[:,0].max() - self.tXY[:,1].min())/self.dx+1)\n self.ny = int((self.tXY[:,1].max() - self.tXY[:,1].min())/self.dx+1)\n xi = numpy.linspace(self.tXY[:,0].min(), self.tXY[:,0].max(), self.nx)\n yi = numpy.linspace(self.tXY[:,1].min(), self.tXY[:,1].max(), self.ny)\n self.xi, self.yi = numpy.meshgrid(xi, yi)\n xyi = numpy.dstack([self.xi.flatten(), self.yi.flatten()])[0]\n\n self.distances, self.indices = self.tree.query(xyi, k=3)\n\n return\n\n def getDistanceShore(self,depthfield):\n \"\"\"\n Computes IDs of nodes at a given distance from shoreline.\n \"\"\"\n\n if len(depthfield[self.indices].shape) == 3:\n z_vals = depthfield[self.indices][:,:,0]\n else:\n z_vals = depthfield[self.indices]\n\n zi = numpy.average(z_vals,weights=(1./self.distances), axis=1)\n onIDs = numpy.where(self.distances[:,0] == 0)[0]\n if len(onIDs) > 0:\n zi[onIDs] = depthfield[self.indices[onIDs,0]]\n\n z = numpy.reshape(zi,(self.ny, self.nx))\n\n xy = self.computeShoreline(z)\n\n seaIDs = self.oceanIDs(xy, depthfield)\n\n return seaIDs\n\n def computeCarbonate(self, wavefield, sedfield, depthfield, dt):\n \"\"\"\n Computes carbonate growth.\n \"\"\"\n\n if self.mdist == 0.:\n if self.baseMap is not None:\n tmpids = numpy.where(depthfield<0.)[0]\n seaIds = numpy.where(numpy.logical_and(self.tinBase1==0,depthfield<0.))[0]\n else:\n seaIds = numpy.where(depthfield<0.)[0]\n\n if self.growth2 > 0.:\n if self.baseMap2 is not None:\n tmpids = numpy.where(depthfield<0.)[0]\n seaIds2 = numpy.where(numpy.logical_and(self.tinBase2==0,depthfield<0.))[0]\n else:\n seaIds2 = numpy.where(depthfield<0.)[0]\n else:\n seaIds = self.getDistanceShore(depthfield)\n seaIds2 = self.getDistanceShore(depthfield)\n\n growth = numpy.zeros(len(depthfield))\n growth.fill(1.1e6)\n\n if self.growth2 > 0.:\n growth2 = numpy.zeros(len(depthfield))\n growth2.fill(1.1e6)\n\n # Get each controlling function values\n if self.depthfile != None:\n self._getDepthFct(depthfield,1)\n growth[seaIds] = numpy.minimum(growth[seaIds],self.depthgrowth[seaIds])\n if self.sedfile != None:\n self._getSedFct(sedfield,1)\n growth[seaIds] = numpy.minimum(growth[seaIds],self.sedgrowth[seaIds])\n if self.wavefile != None:\n self._getWaveFct(wavefield,1)\n growth[seaIds] = numpy.minimum(growth[seaIds],self.wavegrowth[seaIds])\n growth[growth>1.e6] = 0.\n\n if self.growth2 > 0.:\n if self.depthfile2 != None:\n self._getDepthFct(depthfield,2)\n growth2[seaIds2] = numpy.minimum(growth2[seaIds2],self.depthgrowth2[seaIds2])\n if self.sedfile2 != None:\n self._getSedFct(sedfield,2)\n growth2[seaIds2] = numpy.minimum(growth2[seaIds2],self.sedgrowth2[seaIds2])\n if self.wavefile2 != None:\n self._getWaveFct(wavefield,2)\n growth2[seaIds2] = numpy.minimum(growth2[seaIds2],self.wavegrowth2[seaIds2])\n growth2[growth2>1.e6] = 0.\n\n # Average growth function limitation\n val = self.growth*growth*dt\n val[val<0.] = 0.\n val[seaIds] = numpy.minimum(val[seaIds],-depthfield[seaIds]*0.98)\n tmpid = numpy.where(numpy.logical_and(val==val.max(),val>0))[0]\n if self.growth2 > 0.:\n val2 = self.growth2*growth2*dt\n val2[val2<0.] = 0.\n val2[seaIds2] = numpy.minimum(val2[seaIds2],-depthfield[seaIds2]*0.98)\n else:\n val2 = None\n\n return val, val2\n","sub_path":"pyBadlands/forcing/carbGrowth.py","file_name":"carbGrowth.py","file_ext":"py","file_size_in_byte":18171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"278101820","text":"#!/usr/bin/env python3\n\"\"\"Scrub needless files to save space\"\"\"\n\nfrom pathlib import Path\nimport shutil\nimport subprocess\n\ntotal_saved = 0\n\ndef command_output(cmd, harden=False):\n if harden:\n cmd = f\"{cmd} 2>/dev/null || true\"\n try:\n return subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n except Exception as exc:\n print(exc)\n\ndef path_for(dirname):\n return Path(dirname).expanduser()\n\ndef get_size(dirpath):\n if dirpath.exists():\n # -H: Symbolic links on the command line are followed, symbolic links in file hierarchies are not followed.\n # -s: Display an entry for each specified file.\n # -k: Display block counts in 1024-byte (1 kiB) blocks.\n output = command_output(f\"du -Hsk {dirpath}\", harden=True)\n size = int(output.split(\"\\t\")[0])\n else:\n size = 0\n return size * 1024\n\ndef clean(dirname, cmd=None):\n print(f\"---- {dirname}: {cmd.__doc__.strip()}\")\n dirpath = path_for(dirname)\n if not dirpath.exists():\n print(\"Doesn't exist\")\n return\n before = get_size(dirpath)\n print(f\"before: {before:15,d}\")\n output = cmd(str(dirpath)) or \"\"\n if output.strip():\n print(output.rstrip())\n after = get_size(dirpath)\n print(f\"after: {after:15,d}\")\n print(f\"saved: {before-after:15,d}\")\n global total_saved\n total_saved += before - after\n\n\ndef rmrf(dirname):\n \"\"\"Remove completely\"\"\"\n shutil.rmtree(dirname, ignore_errors=True)\n\ndef cmd(template, doc=None, harden=False):\n def doit(dirname):\n print(command_output(template.format(dirname=dirname), harden=harden))\n doit.__doc__ = doc or template\n return doit\n\nrm_pyc = cmd(r\"find {dirname} -regex '.*\\.py[cow]' -delete\", \"Delete .pyc etc files\")\nrm_tox = cmd(r\"find {dirname} -name '.tox' -exec rm -rf {{}} \\; -prune\", \"Delete .tox directories\", harden=True)\n\nclean(command_output(\"brew --cache\").strip(), rmrf)\nclean(\"/usr/local/pyenv/pyenv/cache\", rmrf)\nclean(\"~/Documents/Zoom\", rmrf)\nclean(\"~/Library/Caches/com.spotify.client\", rmrf)\nclean(\"~/Library/Caches/pip\", rmrf)\nclean(\"~/Library/Caches/pipenv\", rmrf)\nclean(\"~/Library/Caches/pip-tools\", rmrf)\nclean(\"~/Library/Caches/yarn\", rmrf)\nclean(\"~\", rm_tox)\nclean(\"/src\", rm_tox)\nclean(\"/usr/local/virtualenvs\", rm_pyc)\nclean(\"/usr/local/pyenv\", rm_pyc)\nclean(\"/usr/local/pypy\", rm_pyc)\nclean(\"/tmp/nedbatchelder-pyc\", rmrf)\nclean(\"~/log/irc\", cmd(\"afsctool -cvv -9 {dirname}\"))\n\nprint(f\"----\\nTOTAL: {total_saved:15,d}\")\n","sub_path":"bin/clean-disk.py","file_name":"clean-disk.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"92358139","text":"# importing the required modules\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium import webdriver\nimport time\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"headless\")\n\npd.options.display.max_columns = 999\npd.options.display.max_rows = 999\ndf = pd.read_csv('New_profile_links.csv')\npl = list(df[\"0\"])\n\nbasic_data = []\nmain_data = []\nproduct_review = []\nprofile_all = []\n#all_project = []\nreview_all = []\nmain_category = []\nurls = []\nproject_data = []\n\nlength = len(pl)\nfor i in range(length):\n \n try:\n one_user = []\n print(\"Serial :\",i)\n url = pl[i]\n print(url)\n urls.append(url)\n\n driver = webdriver.Chrome(r'C:\\Users\\David\\chromedriver_win32\\chromedriver.exe', chrome_options=options)\n driver.get(url)\n time.sleep(5)\n\n ######################################### basic data section ########################################\n #basic informations\n basic = []\n source_code = driver.page_source\n soup = BeautifulSoup(source_code, 'html.parser')\n except:\n print(\"Error in webdriver\")\n continue\n try:\n #name\n name = soup.find_all('strong', class_ = 'userName--1ZA07')\n for n in name:\n basic.append(n.text) \n\n #category\n category = soup.find_all('strong', class_ = 'introCategory--F81Ky')\n for e in category:\n basic.append(e.text)\n\n #specialty \n ba = []\n sp = soup.find_all('div', class_ = 'categoryName--1zWtA')\n for m in sp:\n ba.append(m.text)\n basic.append(ba)\n\n #rating\n rating = soup.find_all('div', class_ = 'itemRating--360UA itemRating--2-rFv typeLarge--1cEMN')\n for k in rating:\n km = k.text\n basic.append(km.replace(\"평균 평점\", \"\"))\n\n #Reviews and consultations\n reviews = soup.find_all('span', class_ = 'statsNum--32OX2')\n for kk in reviews:\n basic.append(kk.text)\n\n #appending basic data of all user\n basic_data.append(basic)\n\n\n ######################################### main ########################################\n ### main info data for one user\n maininfo = []\n uh = [\"대표자\",\"상호명\",\"사업자등록번호\",\"통신판매업번호\", \"사업장 주소\", \"고객센터\",'메일']\n\n #main section info\n nn = []\n infos = soup.find_all('ul', class_ = 'productInfoList--1-H-D')\n for f in infos:\n li = f.find_all('li')\n #each list item\n for i in range(len(li)):\n ii = li[i]\n val = uh[i]\n head = ii.find_all(\"span\", class_ = \"title--2YCH3\")\n maini = ii.find_all(\"span\", class_ = \"text--1z2Eb\")\n for h in head:\n if h.text != val:\n if [k, \" \"] not in nn:\n nn.append(\"NA\")\n else:\n for j in maini:\n if j.text not in nn:\n if j.text == None or j.text == \"\" or j.text == \" \":\n nn.append(\"NA\")\n else: \n nn.append(j.text)\n main_data.append(nn)\n\n\n ######################################### count product section ########################################\n #count product and review section\n products = []\n tt = soup.find_all('div', class_ = \"list--e6w5E\")\n for t in tt: \n cc = t.find_all('a', class_='item--3Oz2i')\n for cd in cc:\n ce = cd.find_all('div', class_ = \"count--2w5o6\")\n for i in ce:\n products.append(i.text)\n product_review.append(products)\n\n\n ######################################### Profile data section ########################################\n #profile informations\n profile_heading = []\n profile_text = []\n firm_name = []\n firm_text = []\n\n div = soup.find_all('div', class_ = 'sectionIntroduce--3_qQB')\n for heading in div: \n indiv = heading.find_all('div', class_ = 'introduceMain--g3aND')\n for i in indiv:\n head = i.find_all('strong', class_ = 'introduceMainTitle--2MZc-')\n for h in head:\n profile_heading.append(h.text)\n\n text = i.find_all('p', class_ = 'introduceText--2R5pY')\n for ii in text:\n profile_text.append(ii.text)\n\n careerdiv = soup.find_all('div', class_ = ['profileCareer--3_uFh','isExpert--2GkDA'])\n for i in careerdiv:\n cd = i.find_all('div', class_ = 'profileBox--1jlog')\n for j in cd:\n cd = j.find_all('div', class_ = 'careerJob--2-hX4')\n for c in cd:\n firm_name.append(c.text)\n\n cui = j.find_all('ul', class_ = 'careerList--2dpZg')\n for cc in cui:\n firm_text.append(cc.text)\n\n profile_all.append([profile_heading, profile_text, firm_name, firm_text]) \n\n\n\n ######################################### Project data section ########################################\n ### Project data for one user\n projects = soup.find_all('div', class_ = 'listArea--peDdh')\n #projects and consultations \n all_project = []\n for y in projects:\n one = []\n yy = y.find_all('div', class_ = 'item--1ZJSx')\n for t in yy:\n project_item = []\n tdiv = t.find_all('div', class_ =['itemTitle--2vWBq','elip2--nFWXY'])\n for td in tdiv:\n project_title = td.text\n project_item.append(project_title)\n\n ratdiv = t.find_all('div', class_ =['itemGroup--2RnIL','ItemGroup_itemGroup--1f-on'])\n for rd in ratdiv:\n ratscore = rd.find_all(\"div\", class_ = \"itemRating--360UA\")\n for r in ratscore:\n b = r.text\n if \"평균 평점\" in b:\n b = b.replace(\"평균 평점\", \" \")\n project_item.append(b)\n\n ratreview = rd.find_all(\"div\", class_ = \"itemCount--2HsJv\")\n for rr in ratreview:\n c = rr.text\n if \"후기\" in c:\n c = c.replace(\"후기\", \" \")\n project_item.append(c)\n\n feediv = t.find_all('span', class_ =['priceInner--1HE2v'])\n for fd in feediv:\n fee = fd.find_all(\"span\", class_=[\"priceNum--1rXJI\",\"ItemPrice_priceNum--2OFHI\"])\n for f in fee:\n project_item.append(f.text)\n\n discount = fd.find_all(\"em\", class_=\"discountPercent--3n0bl\")\n for dis in discount:\n project_item.append(dis.text)\n\n actualPrize = fd.find_all(\"span\", class_=\"beforeDiscount--W1C4G\")\n for fp in actualPrize:\n project_item.append(fp.text)\n\n one.append([*project_item]) \n all_project.append([*one])\n\n proj = []\n for i in range(len(all_project)):\n data = all_project[i]\n for j in range(len(data)):\n dj = data[j]\n for k in range(len(dj)):\n bb = dj[k]\n proj.append(bb) \n\n lis = [\"평균 평점\",\"후기\",\"판매가\",\"원할인률\",\"할인 전 가격\", \"할인률\"]\n for i in range(len(proj)):\n for j in range(len(lis)):\n if lis[j] in proj[i]:\n proj[i] = proj[i].replace(lis[j], \" \") \n\n\n project_data.append(proj) \n\n\n\n\n ########################################## review section ########################################\n #review object\n review_obj = []\n reviews_user = []\n reviews_rating = []\n reviews_heading = []\n reviews_text = []\n\n rdiv = soup.find_all('div', class_ = \"listSection--kViCl\")\n for eachr in rdiv:\n ee = eachr.find_all('div', class_ = \"reviewItem--1OwNO\")\n\n for each in ee:\n name = each.find_all('span', class_ = [\"item--3sQA9 \",\"nickname--2OOe6\"])\n for nm in name:\n reviews_user.append(nm.text)\n\n rating = each.find_all('div', class_ = [\"expertPoint--2Zrvr\",\"expertPoint--13H3V\"])\n for r in rating:\n reviews_rating.append(r.text)\n\n head = each.find_all('div', class_ = \"reviewTitle--qv3Pk\")\n for r in head:\n reviews_heading.append(r.text)\n\n commentdiv = each.find_all('p', class_ = \"reviewText--28mzN\")\n for ecom in commentdiv:\n reviews_text.append(ecom.text)\n\n for i in range(len(reviews_user)):\n try:\n review_obj.append(reviews_user[i])\n if \"평점\" in reviews_rating[i]:\n rating = reviews_rating[i].replace(\"평점\", \"\")\n review_obj.append(rating)\n else:\n review_obj.append(reviews_rating)\n review_obj.append(reviews_heading[i])\n review_obj.append(reviews_text[i])\n except:\n continue\n\n review_all.append(review_obj)\n\n ######################################### driver close section ########################################\n\n driver.quit()\n ######################################### Final dataframe section ########################################\n\n except:\n continue\n\ntry:\n #Url dataframe section\n urldf = pd.DataFrame(urls)\n urldf.columns = [\"Url\"]\n\n #basic dataframe section\n basicdf = pd.DataFrame(basic_data)\n basicdf.columns = [\"Name\",\"subcategory\",\"Specialty\",\"Review_score\",\"Review_count\",\"Consultations\"]\n\n #main dataframe section\n maindf = pd.DataFrame(main_data)\n maindf.columns =[\"Representative\", \"Company_name\", \"Business_registration_number\", \"Mail_order_number\", \"Business_address\", \"Customer_Center\",'Mail']\n\n #product review dataframe section\n prdf = pd.DataFrame(product_review)\n prdf.columns =[\"Class_Count\", \"Total_User_Reviews\"]\n\n # # profile dataframe section\n profiledf = pd.DataFrame(profile_all)\n profiledf.columns =[\"Profile\", \"Details\", \"Firm\", \"Education/Career\"]\n\n #projects dataframe section\n projdf = pd.DataFrame(project_data)\n\n reviewdf = pd.DataFrame(review_all)\n rr = []\n\n for i in range(len(reviews_user)):\n rr.append(\"Reviewer_name_\"+ str(i))\n rr.append(\"Reviewer_rating_\"+ str(i))\n rr.append(\"Review_heading_\"+ str(i))\n rr.append(\"Review_text_\"+ str(i)) \n reviewdf.columns = rr\n\n\n for i in range(len(basicdf[\"subcategory\"])):\n if basicdf[\"subcategory\"][i] in ['법률','노동/노무','지식재산/특허',\"등기/공탁/법무\",'민원/행정']:\n main_category.append([\"법률\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['세금/세무','회계/감사','통관/관세','온라인 마케팅','온라인쇼핑몰','엑스퍼트 사업','경영/기술컨설팅','유통관리','가맹점창업','건축','번역/통역','날씨컨설팅','원가 분석']:\n main_category.append([\"비즈니스\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['자산컨설팅','부동산 상담','손해사정','신용상담','감정평가', '경제동향/전망']:\n main_category.append([\"금융/재테크\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['심리상담','영양/다이어트','MBTI ']:\n main_category.append([\"건강\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['운세/사주','타로카드','작명','꿈해몽','관상','풍수']:\n main_category.append([\"운세\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['펫 관리','연애','육아','명상','패션/스타일','뷰티','요리/홈쿠킹','커피/주류','인테리어','청소/세탁','교통사고 분석','자동차수리']:\n main_category.append([\"생활\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['음악/악기','미술/디자인','공예/공방','무용/ 발레','방송/영화','사진','실용/방송댄스','뮤지컬/공연','낚시','원예/홈가드닝','여행','글쓰기/논술']:\n main_category.append([\"취미\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['입시/진학','해외유학','대학교학습','고등학교학습','중학교학습','초등학교학습']:\n main_category.append([\"교육/학습\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['영어','중국어','일본어','프랑스어', '러시아어']:\n main_category.append([\"외국어학습\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['피트니스','골프','필라테스','요가','생활스포츠','자전거','수상 스포츠','동계 스포츠','유아체육']:\n main_category.append([\"운동/스포츠\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['게임하우투','IT노하우','코딩','오피스문서','동영상 제작']:\n main_category.append([\"IT/컨텐츠\", basicdf[\"subcategory\"][i]])\n\n elif basicdf[\"subcategory\"][i] in ['라이프 코칭','취업','자기PR','공무원시험 ','자격증시험']:\n main_category.append([\"자기계발\", basicdf[\"subcategory\"][i]])\n\n else:\n main_category.append([\"네이버고객센터\", basicdf[\"subcategory\"][i]])\n categorydf = pd.DataFrame(main_category)\n categorydf.columns = [\"Main_category\", \"Sub_category\"]\n\n basicdf = basicdf.drop(['subcategory'], axis = 1) \n\n #merging the dataframes\n frames = [categorydf, urldf, basicdf, maindf, prdf, profiledf, reviewdf, projdf]\n df = pd.concat(frames, axis = 1)\n\n df.to_csv(\"DATASET.csv\")\nexcept:\n print(\"Error during storing data\")\n","sub_path":"Project01/Final/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":14484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"59546835","text":"\"\"\"\r\nGreengrass related custom resource event handler\r\n\r\nWritten by Nathan Slawson, 2020\r\n\"\"\"\r\nimport json\r\nimport logging\r\nimport sys\r\n\r\nfrom cfnResponse import CfnResponse\r\nfrom manageGreengrass import ManageGreengrass\r\nfrom createIoTThing import CreateIoTThing\r\n\r\nlog = logging.getLogger('cfnGreengrass')\r\nlog.setLevel(logging.INFO)\r\n\r\n# consoleHandler = logging.StreamHandler(sys.stdout)\r\n# consoleHandler.setLevel(logging.INFO)\r\n# formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')\r\n# consoleHandler.setFormatter(formatter)\r\n# log.addHandler(consoleHandler)\r\n\r\n\r\ndef handler(event, context):\r\n \"\"\"\r\n Out event handler. This decides what type of event type it is, and calls the appropriate event in the related class.\r\n :param event: Lambda event\r\n :param context: Lambda context\r\n :return:\r\n \"\"\"\r\n try:\r\n log.info(json.dumps(event, indent=4))\r\n resourceProps = event['ResourceProperties']\r\n\r\n responseData = {}\r\n\r\n stackName = resourceProps['StackName']\r\n eventType = resourceProps['EventType']\r\n\r\n if eventType == 'CreateIoTThing':\r\n thingName = event['ResourceProperties']['ThingName']\r\n gatewayID = event['ResourceProperties']['GatewayID']\r\n responseData = CreateIoTThing(stackName=stackName, thingName=thingName, gatewayID=gatewayID).handleEvent(event)\r\n\r\n elif eventType == 'ManageGreengrass':\r\n responseData = ManageGreengrass(stackName=stackName).handleEvent(event)\r\n\r\n CfnResponse().send(event, context, responseData=responseData)\r\n\r\n return\r\n\r\n except Exception as genErr:\r\n log.exception(genErr)\r\n\r\n CfnResponse().error(event, context)\r\n","sub_path":"functions/source/GreengrassLambda/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"450797234","text":"from fromxty import crop_scale, getIgnore, save_anotation\nimport os\nimport sys\nimport json\nimport glob\nimport math\n\ndef main(labelsFolder, destFolder, fileList):\n if not labelsFolder.endswith('/'):\n labelsFolder += '/'\n\n if not destFolder.endswith('/'):\n destFolder += '/'\n\n with open(fileList, 'r') as file: \n for line in file.readlines():\n props = line.split(' ')\n\n file = 'tr_kitti_' + props[0].split('.')[0] + '.txt'\n width = int(props[1])\n height = int(props[2])\n\n print(file + ' ' + str(width) + ' ' + str(height)) \n \n if (os.path.isfile(labelsFolder + file) and os.path.getsize(labelsFolder + file) > 0):\n with open(labelsFolder + file, 'r') as f, open(destFolder + file, 'w') as g:\n for l in f.readlines():\n ss = l.split(' ')\n label = ss[0].lower()\n\n if not (label in getIgnore()):\n _x1 = int(ss[4].replace(',', '.').split('.')[0]) # achei alguns arquivos com , no lugar de .\n _y1 = int(ss[5].replace(',', '.').split('.')[0])\n _x2 = int(ss[6].replace(',', '.').split('.')[0])\n _y2 = int(ss[7].replace(',', '.').split('.')[0])\n\n _x = min(_x1, _x2)\n _y = min(_y1, _y2)\n\n _w = max(_x1, _x2) - _x\n _h = max(_y1, _y2) - _y\n\n x = _x\n y = _y\n w = _w\n h = _h\n\n print(label + ' ' + str(x) + ' ' + str(y) + ' ' + str(w) + ' ' + ' ' + str(h))\n \n x, y, w, h = crop_scale(_x, _y, _w, _h, width, height)\n\n if 0 < w and 0 < h:\n save_anotation(g, label, x, y, w, h)\n \n\nif __name__ == \"__main__\":\n if 3 < len(sys.argv):\n main(sys.argv[1], sys.argv[2], sys.argv[3].lower())\n else:\n print(\"usage: python3 source/folder/path/ destination/folder/path/ fileImageList\\n\")\n","sub_path":"fix_kitti_smaller.py","file_name":"fix_kitti_smaller.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"451682183","text":"#!/usr/bin/env python\n\n# encoding: utf-8\n\n\"\"\"\n@author: swensun\n\n@github:https://github.com/yunshuipiao\n\n@software: python\n\n@file: longest_non_repeat.py\n\n@desc: 最长不重复子串 (字串连续, 子序列不连续)\n\n@hint:\n\"\"\"\n\n\ndef longest_non_repeat(string):\n if string is None:\n return 0\n temp = []\n max_len = 0\n for i in string:\n if i in temp:\n temp = []\n temp.append(i)\n max_len = max(max_len, len(temp))\n return max_len\n\ndef longest_non_repeat_two(string):\n if string is None:\n return 0\n start, max_len = 0, 0\n used_char = {}\n for index, char in enumerate(string):\n if char in used_char and start <= used_char[char]:\n start = used_char[char] + 1\n else:\n max_len = max(max_len, index - start + 1)\n used_char[char] = index\n return max_len\n\nif __name__ == '__main__':\n a = \"abcabcdefbb\"\n print(a)\n print(longest_non_repeat(a))\n print(longest_non_repeat_two(a))\n","sub_path":"pythonic/array_questions/longest_non_repeat.py","file_name":"longest_non_repeat.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"568775512","text":"__author__ = 'Jwely'\n\nimport pip\nimport os\n\n\ndef main():\n # just change this directory to the local filepath of the resource folder\n wheel_dir = r\"C:\\Users\\Jeff\\Downloads\\python27_64bit_resources\"\n\n # install microsoft visual studio. You may hit cancel without error if its already installed.\n os.system(os.path.join(wheel_dir, r\"VCForPython27.msi\"))\n\n # pip installation thingies\n pip.main([\"install\", \"--upgrade\", \"pip\"])\n pip.main([\"install\", os.path.join(wheel_dir, \"numpy-1.9.3+mkl-cp27-none-win_amd64.whl\")])\n pip.main([\"install\", \"pandas\"])\n pip.main([\"install\", \"matplotlib\"])\n #pip.main([\"install\", \"openpiv\"]) didnt end up using\n #pip.main([\"install\", \"plotly\"]) didn't end up using\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"py/utils/check_dependencies.py","file_name":"check_dependencies.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"302097067","text":"# -*- coding: utf-8 -*-\n\nfrom sqlalchemy.orm import joinedload\n\nfrom base import authenticated\nfrom base_note import BaseNoteHandler\nfrom orgtag import BaseOrgtagHandler\nfrom org import BaseOrgHandler\nfrom model import Note\n\n\n\nclass NoteListHandler(BaseNoteHandler):\n def get(self):\n note_list = self.orm.query(Note)\n\n note_list = self.filter_visibility(\n note_list, Note, self.parameters[\"visibility\"])\n\n note_search = self.get_argument(\"note_search\", None)\n note_order = self.get_argument_order(\"note_order\", None)\n note_list = self._filter_search(note_list, note_search, note_order)\n\n note_list = [note.obj(public=bool(self.current_user)) \\\n for note in note_list.limit(20)]\n\n self.render(\n 'note_list.html',\n note_list=note_list,\n note_search=note_search,\n note_order=note_order,\n )\n\n def post(self):\n text, source, public = BaseNoteHandler._get_arguments(self)\n note = Note(text, source,\n moderation_user=self.current_user,\n public=public,\n )\n self.orm.add(note)\n self.orm.commit()\n self.redirect(self.next or note.url)\n\n\n\nclass NoteLinkHandler(BaseNoteHandler, BaseOrgHandler, BaseOrgtagHandler):\n @authenticated\n def get(self, note_id_string):\n orgtag_search = self.get_argument(\"orgtag_search\", None)\n orgtag_list = self._get_orgtag_list_search(search=orgtag_search)\n\n org_search = self.get_argument(\"org_search\", None)\n org_list, org_count, geobox, latlon = self._get_org_list_search(name_search=org_search)\n\n note = self._get_note(note_id_string)\n self.render(\n 'note_link.html',\n note=note,\n orgtag_search=orgtag_search,\n orgtag_list=orgtag_list,\n org_search=org_search,\n org_list=org_list,\n org_count=org_count,\n )\n\n\n\nclass NoteNewHandler(BaseNoteHandler):\n def get(self):\n self.render('note.html')\n\n\n\nclass NoteHandler(BaseNoteHandler):\n def get(self, note_id_string):\n public = bool(self.current_user)\n\n if self.deep_visible():\n options = (\n joinedload(\"address_list\"),\n joinedload(\"orgtag_list\"),\n joinedload(\"org_list\"),\n )\n else:\n options = (\n joinedload(\"address_list_public\"),\n joinedload(\"orgtag_list_public\"),\n joinedload(\"org_list_public\"),\n )\n\n note = self._get_note(note_id_string, options=options)\n\n if self.deep_visible():\n address_list=note.address_list\n orgtag_list=note.orgtag_list\n org_list=note.org_list\n else:\n address_list=note.address_list_public\n orgtag_list=note.orgtag_list_public\n org_list=note.org_list_public\n\n address_list = [address.obj(public=public) for address in address_list]\n orgtag_list = [orgtag.obj(public=public) for orgtag in orgtag_list]\n org_list = [org.obj(public=public) for org in org_list]\n\n obj = note.obj(\n public=public,\n address_obj_list=address_list,\n orgtag_obj_list=orgtag_list,\n org_obj_list=org_list,\n )\n\n if self.accept_type(\"json\"):\n self.write_json(obj)\n else:\n self.render(\n 'note.html',\n obj=obj\n )\n\n @authenticated\n def delete(self, note_id_string):\n note = self._get_note(note_id_string)\n self.orm.delete(note)\n self.orm.commit()\n self.redirect(self.next or \"/note\")\n \n @authenticated\n def put(self, note_id_string):\n note = self._get_note(note_id_string)\n\n text, source, public = BaseNoteHandler._get_arguments(self)\n\n if note.text == text and \\\n note.public == public and \\\n note.source == source:\n self.redirect(self.next or note.url)\n return\n\n note.text = text\n note.source = source\n note.public = public\n note.moderation_user = self.current_user\n self.orm.commit()\n self.redirect(self.next or note.url)\n","sub_path":"handle/note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"228427338","text":"\"\"\"DelayQueue class.\n\nDelay load requests a configurable amount of time before submitting them.\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport threading\nimport time\nfrom typing import TYPE_CHECKING, Callable, List, NamedTuple, Optional\n\nfrom napari.utils.perf import add_counter_event\n\nLOGGER = logging.getLogger(\"napari.loader\")\n\nif TYPE_CHECKING:\n from napari.components.experimental.chunk._request import ChunkRequest\n\n\nclass QueueEntry(NamedTuple):\n \"\"\"The request we are doing to submit and when to submit it.\n\n Parameters\n ----------\n request : ChunkRequest\n The request to submit.\n submit_time : float\n The time to submit the request in time.time() seconds.\n \"\"\"\n\n request: ChunkRequest\n submit_time: float\n\n\nclass DelayQueue(threading.Thread):\n \"\"\"A threaded queue that delays request submission.\n\n The DelayQueue exists so we can avoid spamming the ChunkLoader loader\n pools with requests for chunks that are potentially going to be out of\n view before they are loaded.\n\n For example, when rapidly scrolling through slices, it would be\n pointless to submit requests for every slice we pass through. Instead,\n by using a small delay, we hold back submitting our requests until the\n user has settled on a specific slice.\n\n Similarly with the Octree, when panning and zooming rapidly we might\n choose to delay loads so that we don't waste time loading chunks that\n will quickly be out of view.\n\n With the Octree however we do want to show something as you pan and\n zoom around. For this reason, ChunkLoader can have multiple loader\n pools each with different delays. Typically we we delay the \"ideal\n level\" chunks the most, but we load coarser levels sooner. We want to\n show the user something quickly, but we only want to load the full set\n of ideal chunks when the camera movement has settled down.\n\n Parameters\n ----------\n delay_queue_ms : float\n Delay the request for this many milliseconds.\n submit_func\n Call this function to submit the request.\n\n Attributes\n ----------\n delay_seconds : float\n Delay each request by this many seconds.\n _submit_func : Callable[[ChunkRequest], None]\n Call this function to submit the request.\n _entries : List[QueueEntry]\n The entries in the queue.\n _lock : threading.Lock\n Lock access to the self.entires queue.\n _event : threading.Event\n Event we signal to wake up the worker.\n \"\"\"\n\n def __init__(\n self,\n delay_queue_ms: float,\n submit_func: Callable[[ChunkRequest], None],\n ) -> None:\n super().__init__(daemon=True)\n self._shutdown = False\n self.delay_seconds: float = delay_queue_ms / 1000\n self._submit_func = submit_func\n\n self._entries: List[QueueEntry] = []\n self._lock = threading.Lock()\n self._wakeup = threading.Event()\n self._exit = threading.Event()\n\n self.start()\n\n def add(self, request) -> None:\n \"\"\"Insert the request into the queue.\n\n Parameters\n ----------\n request : ChunkRequest\n Insert this request into the queue.\n \"\"\"\n if self.delay_seconds == 0:\n self._submit_func(request) # Submit with no delay.\n return\n\n LOGGER.info(\"DelayQueue.add: %s\", request.location)\n\n # Create entry with the time to submit it.\n submit_time = time.time() + self.delay_seconds\n entry = QueueEntry(request, submit_time)\n\n with self._lock:\n self._entries.append(entry)\n num_entries = len(self._entries)\n\n add_counter_event(\"delay_queue\", entries=num_entries)\n\n if num_entries == 1:\n self._wakeup.set() # The list was empty so wake up the worker.\n\n def cancel_requests(\n self, should_cancel: Callable[[ChunkRequest], bool]\n ) -> List[ChunkRequest]:\n \"\"\"Cancel pending requests based on the given filter.\n\n Parameters\n ----------\n should_cancel : Callable[[ChunkRequest], bool]\n Cancel the request if this returns True.\n\n Returns\n -------\n List[ChunkRequests]\n The requests that were cancelled, if any.\n \"\"\"\n keep = []\n cancel = []\n with self._lock:\n for entry in self._entries:\n if should_cancel(entry.request):\n cancel.append(entry.request)\n else:\n keep.append(entry)\n self._entries = keep\n\n return cancel\n\n def submit(self, entry: QueueEntry, now: float) -> bool:\n \"\"\"Submit and return True if entry is ready to be submitted.\n\n Parameters\n ----------\n entry : QueueEntry\n The entry to potentially submit.\n now : float\n Current time in seconds.\n\n Returns\n -------\n bool\n True if the entry was submitted.\n \"\"\"\n # If entry is due to be submitted.\n if entry.submit_time < now:\n LOGGER.info(\"DelayQueue.submit: %s\", entry.request.location)\n self._submit_func(entry.request)\n return True # We submitted this request.\n return False\n\n def run(self):\n \"\"\"The DelayQueue thread's main method.\n\n Submit all due entires, then sleep or wait on self._wakeup\n for new entries.\n \"\"\"\n while self._shutdown is False:\n now = time.time()\n\n with self._lock:\n seconds = self._submit_due_entries(now)\n num_entries = len(self._entries)\n\n add_counter_event(\"delay_queue\", entries=num_entries)\n\n if seconds is None:\n # There were no entries left, so wait until there is one.\n self._wakeup.wait()\n self._wakeup.clear()\n else:\n # Sleep until the next entry is due. This will tend to\n # oversleep by a few milliseconds, but close enough for our\n # purposes. Once we wake up we'll submit all due entries.\n # So we won't miss any.\n time.sleep(seconds)\n\n self._exit.set() # We are exiting now.\n\n def shutdown(self) -> None:\n \"\"\"Shutdown the DelayQueue's thread.\"\"\"\n self._shutdown = True\n self._wakeup.set()\n self._exit.wait()\n\n def _submit_due_entries(self, now: float) -> Optional[float]:\n \"\"\"Submit all due entries, oldest to newest.\n\n Parameters\n ----------\n now : float\n Current time in seconds.\n\n Returns\n -------\n Optional[float]\n Seconds until next entry is due, or None if no next entry.\n \"\"\"\n while self._entries:\n # Submit the oldest entry if it's due.\n if self.submit(self._entries[0], now):\n self._entries.pop(0) # Remove the one we just submitted.\n else:\n # Oldest entry is not due, return time until it is.\n return self._entries[0].submit_time - now\n\n return None # There are no more entries.\n\n def flush(self):\n \"\"\"Submit all entries right now.\"\"\"\n with self._lock:\n for entry in self._entries:\n self._submit_func(entry.request)\n self._entries = []\n","sub_path":"napari/components/experimental/chunk/_delay_queue.py","file_name":"_delay_queue.py","file_ext":"py","file_size_in_byte":7400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"304491382","text":"from loader import *\nfrom processData import *\nfrom detect_head_shoulder import *\nfrom display_patterns import *\nfrom detect_double_top_bot import *\n\nimport numpy as np\nimport os.path\nfrom tqdm import tqdm\nimport talib\n\nload_from = '/home/mve/storage/data/Google_jan_mar_2017-8'\n\n\nif __name__ == '__main__':\n print('Gathering data')\n data = loadData(load_from + '.csv')\n print('Creating training dataset')\n X, Y = createX_Y(data)\n\n\n len = len(X)\n X = np.transpose(X).reshape((5,len))\n WINDOW=25\n STEP=1\n Y = []\n counter = 0\n index = 0\n with tqdm(total=X.shape[1]-WINDOW) as pbar:\n for start in range(0, X.shape[1]-WINDOW, STEP):\n pbar.update(STEP)\n\n X2 = X[:, start:start+WINDOW]\n\n res = talib.CDLIDENTICAL3CROWS(X2[0], X2[1], X2[2], X2[3])\n\n if not res.any() == 0:\n #print([i for i, j in enumerate(res) if not j == 0])\n #display_OHLC(X2)\n counter+=1\n '''trios = detect_double_top(X2)\n if trios:\n counter+=1\n #display_double_top_bot(X2, trios, True)'''\n print(counter)\n","sub_path":"look_for_pattern_talib.py","file_name":"look_for_pattern_talib.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"593797324","text":"import csv\nimport json\nimport time\n\nimport h5py\nimport numpy as np\nfrom keras.layers.advanced_activations import LeakyReLU,PReLU\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping\nfrom keras.models import Model, Sequential, load_model\nfrom keras.regularizers import l1_l2, l2\nfrom keras import initializers\nfrom keras import callbacks\n\nfrom keras.layers import Input, Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, TimeDistributed, LSTM\nfrom keras.optimizers import adagrad, SGD, Adam\n\nimport ThermographicDataHandler\nimport DoubleExponentialHeatingModel\nimport IrrigationDetector\n\nimport matplotlib.pyplot as plt\nimport VAE2\n\nfrom keras.layers.convolutional import Conv1D,MaxPooling1D\nfrom keras.layers.normalization import BatchNormalization\n\npModel = \"../models/model_medium_new_relu.h5\"\npModelTrain = \"../models/model_medium_new_relu_beta.h5\"\npModelAE_vae = \"../models/model_ae_vae_medium_new.h5\"\npModelAE_encoder = \"../models/model_ae_encoder_medium_new.h5\"\npDPACNNCP = \"dpa_CNN.{epoch:02d}-{val_acc:.2f}-{val_loss:.2f}.hdf5.h5\"\n\nrndSeed = 23\nnoEpochs = 500\nbatchSz = int(2*4096)\n#batchSz = 2048\nregulPen = l2(0.001)\n\n\ndef scaleParameters(param):\n\t#param[:,4] -= 32\n\t#scalingFactor = np.array([-0.5, -1, -6, -6, 6]) # used to be 32\n\t#param /= scalingFactor\n\treturn param\n\n\ndef unscaleParameters(param):\n\t#scalingFactor = np.array([-0.5, -1, -6, -6, 6]) # used to be 32\n\t#param *= scalingFactor\n\t#param[:,4] += 32\n\treturn param\n\n\ndef scaleData(ts):\n\t#ts2 = ts / 36\n\t#ts2 = ts - 28\n\t#ts2 = ts2 / 5\n\treturn ts\n\n\ndef unscaleData(ts):\n\tts2 = ts * 5\n\tts2 = ts2 + 28\n\treturn ts\n\n\ndef buildMICCAIModel(inputSz):\n\tregulPen = l2(0.001)\n\t#regulPen = l1_l2(l1=0.01, l2=0.01) # used to 0.01/0.01 22.5.2017\n\n\t## Architecture\n\tmodel = Sequential()\n\t\t\n\tmodel.add(Dense(50, input_dim=inputSz,kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # , init='he_uniform' % 5 # sigmoid\n ##model.add(LeakyReLU(alpha=0.3))\n\t#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))\n\t#model.add(Dropout(0.5))\n\t\n\t\n\t#model.add(Dense(50,kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh # linear\n\t##model.add(LeakyReLU(alpha=0.3))\n\t#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))\n\t#model.add(Dropout(0.5))\n\t\n\t\n\t#model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh\n\t##model.add(LeakyReLU(alpha=0.3))\n\t#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))\n\t#model.add(Dropout(0.5))\n\n\t\n\t\n\t#model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh\n\tmodel.add(Activation('relu'))\n\tmodel.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh\n\tmodel.add(Activation('relu'))\n\tmodel.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh\n\tmodel.add(Activation('relu'))\n\t\n\tmodel.add(Dense(5, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # % 4 # linear\n\tada = adagrad(lr=0.001)\n\tsgd = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=False)\n\t#earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')\n\t\n\tmodel.compile(loss='mse', optimizer=ada)\n\treturn model\n\ndef build1DCNNModel(inputSz):\n\tfilter_length = 32\n\tnb_filter = 32\n\t\n\t# Input Layer\n\tinp = Input(shape=(inputSz,1))\n\t\n\t# Conv Layer\n\tconv1 = Conv1D(filters=nb_filter,\n\t\t\t\tkernel_size=filter_length,\n\t\t\t\tpadding=\"valid\",\n\t\t\t\tactivation=\"relu\")(inp)\n\tmp1 = MaxPooling1D(pool_size=2)(conv1)\n\tbn1 = BatchNormalization()(mp1)\n\t\n\t# Conv Layer 2\n\tnb_filter = int(2 * nb_filter)\n\tfilter_length = int(filter_length / 2)\n\tconv2 = Conv1D(filters=nb_filter,\n\t\t\t\tkernel_size=filter_length,\n\t\t\t\tpadding=\"valid\",\n\t\t\t\tactivation=\"relu\")(bn1)\n\tmp2 = MaxPooling1D(pool_size=2)(conv2)\n\tbn2 = BatchNormalization()(mp2)\n\t\n\t# Conv Layer 3\n\tnb_filter = int(2 * nb_filter)\n\tfilter_length = int(filter_length / 2)\n\tconv3 = Conv1D(filters=nb_filter,\n\t\t\t\tkernel_size=filter_length,\n\t\t\t\tpadding=\"valid\",\n\t\t\t\tactivation=\"relu\")(bn2)\n\tmp3 = MaxPooling1D(pool_size=2)(conv3)\n\tbn3 = BatchNormalization()(mp3)\n\t\n\t# Conv Layer 4\n\tnb_filter = int(2 * nb_filter)\n\tfilter_length = int(filter_length / 2)\n\tconv4 = Conv1D(filters=nb_filter,\n\t\t\t\tkernel_size=filter_length,\n\t\t\t\tpadding=\"valid\",\n\t\t\t\tactivation=\"relu\")(bn3)\n\tmp4 = MaxPooling1D(pool_size=2)(conv4)\n\tbn4 = BatchNormalization()(mp4)\n\t\n\t# Conv Layer 5\n\tnb_filter = int(2 * nb_filter)\n\tfilter_length = int(filter_length / 2)\n\tconv5 = Conv1D(filters=nb_filter,\n\t\t\t\tkernel_size=filter_length,\n\t\t\t\tpadding=\"valid\",\n\t\t\t\tactivation=\"relu\")(bn4)\n\tmp5 = MaxPooling1D(pool_size=2)(conv5)\n\tbn5 = BatchNormalization()(mp5)\n\t\n\t# Regression Layers\n\t#d1 = Dense(1000, activation='relu')(bn4)\n\td2 = Dense(1000, activation='relu')(bn5)\n\tfl = Flatten()(d2)\n\tout = Dense(5, activation='linear')(fl)\n\n\tmodel = Model(inputs=inp, outputs=out)\n\toptimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n\tmodel.compile(optimizer=optimizer, loss='mae', metrics=['accuracy'])\n\tmodel.summary()\n\n\treturn model\n\n\ndef train(pTrainingData,windowSz,doLoadModel):\n\t#############\n\t### CONSTANTS\n\t#############\n\t#regulPen = l1_l2(l1=0.01, l2=0.01) # used to 0.01/0.01 22.5.2017\n\n\n\t###################\n\t### Train regressor (MICCAI'17 architecture)\n\t###################\n\tprint(\"loading training data \" + pTrainingData)\n\twith h5py.File(pTrainingData, 'r') as hf:\n\t\tx_train = np.array(hf.get('x_train'))\n\t\tx_test = np.array(hf.get('x_test'))\n\t\ty_train = np.array(hf.get('y_train'))\n\t\ty_test = np.array(hf.get('y_test'))\n\t\n\tx_train = x_train[1:50000,:]\n\ty_train = y_train[1:50000,:]\n\t\n\tx_train = scaleData(x_train)\n\tx_test = scaleData(x_test)\n\ty_train = scaleParameters(y_train)\n\ty_test = scaleParameters(y_test)\n\n#\tmodel = buildMICCAIModel(300)\n#\tmodel = buildMICCAIModel(VAE2.n_z);\n#\tmodel = build1DCNNModel(VAE2.n_z)\n\tmodel = build1DCNNModel(300)\n#\tif(doLoadModel==1):\n#\t\t#model = load_model(pModel)\n#\t\t#vae = load_model(pModelAE_vae)\n#\t\tencoder = load_model(pModelAE_encoder)\n#\telse:\n#\t\tprint('train VAE')\n#\t\tvae, encoder = VAE2.train(x_train, noEpochsVAE)\n#\tprint('predicting latent states')\n#\tx_train = encoder.predict(x_train)\n#\tx_test = encoder.predict(x_test)\n#\tprint(str(x_test.shape))\n\tx_train = x_train[..., np.newaxis]\n\tx_test = x_test[..., np.newaxis]\n\t\n\tcheckpoint = ModelCheckpoint(pDPACNNCP, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n\thistLogger = CSVLogger(\"epochLog.csv\", separator=',', append=False)\n\treduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=5, cooldown=0, verbose=1)\n\t#earlyStop = EarlyStopping(monitor='val_loss', patience=20, verbose=0),\n\tcallbacks_list = [checkpoint, reduce_lr, histLogger]\n\t\n\tmodel.fit(x_train, y_train, batch_size=batchSz, epochs=noEpochs, shuffle=True, validation_split=0.1, verbose=2, callbacks=callbacks_list)\n\tscore = model.evaluate(x_test, y_test, batch_size=batchSz)\n\tprint(\"\\n\\nscore \" + str(score))\n\ty_ai = model.predict(x_test)\n\tprint('ai' + str(y_ai[0,:]) + '\\ngt' + str(y_test[0,:]) + '\\n')\n\tprint('ai' + str(y_ai[1,:]) + '\\ngt' + str(y_test[1,:]) + '\\n')\n\tprint('ai' + str(y_ai[2,:]) + '\\ngt' + str(y_test[2,:]) + '\\n')\n\n\t### store model\n\tmodel.save(pModelTrain)\n\t\n\tpTrainingData=\"../Data/irr_dr_training_new.mat\"\n\tf = h5py.File(pTrainingData, \"r\")\n\tbg = f[\"seqBackground\"]\n\tfg = f[\"seqSpuelung\"]\n\tx_fg = ThermographicDataHandler.slidingWindowPartitioning(np.array(fg.value).T, windowSz, 150)\n\tx_bg = ThermographicDataHandler.slidingWindowPartitioning(np.array(bg.value).T, windowSz, 150)\n\t\n#\tx_ai = model.predict(encoder.predict(scaleData(x_fg[:,0,:])))\n#\tx_ai_bg = model.predict(encoder.predict(scaleData(x_bg[:,0,:])))\n\tx_fg = x_fg[..., np.newaxis]\n\tx_bg = x_bg[..., np.newaxis]\n\tx_ai = model.predict((scaleData(x_fg[:,0,:,:])))\n\tx_ai_bg = model.predict((scaleData(x_bg[:,0,:,:])))\n\tprint(\"fg: \" + str(x_ai[0,]))\n\tprint(\"bg_1: \" + str(x_ai_bg[0,]))\n\tprint(\"bg_2: \" + str(x_ai_bg[1,]))\n\n\ndef testModel():\n\twindowSz = 300\n\t\n\tmodel = load_model('dpa_beta_0.38.h5')\n\tpTrainingData = \"trainDoubleExpHeating_60Hz.h5\"\n\tprint(\"loading synthetic data \" + pTrainingData)\n\twith h5py.File(pTrainingData, 'r') as hf:\n\t\tx_test = np.array(hf.get('x_test'))\n\t\ty_test = np.array(hf.get('y_test'))\n\tx_test = x_test[..., np.newaxis]\n\ty_ai = model.predict(x_test)\n\tprint('ai' + str(y_ai[0,:]) + '\\ngt' + str(y_test[0,:]) + '\\n')\n\tprint('ai' + str(y_ai[1,:]) + '\\ngt' + str(y_test[1,:]) + '\\n')\n\tprint('ai' + str(y_ai[2,:]) + '\\ngt' + str(y_test[2,:]) + '\\n')\n\t\n\tprint(\"loading io. data \")\n\tpTrainingData=\"../Data/irr_dr_training_new.mat\"\n\tf = h5py.File(pTrainingData, \"r\")\n\tbg = f[\"seqBackground\"]\n\tfg = f[\"seqSpuelung\"]\n\tx_fg = ThermographicDataHandler.slidingWindowPartitioning(np.array(fg.value).T, windowSz, 150)\n\tx_bg = ThermographicDataHandler.slidingWindowPartitioning(np.array(bg.value).T, windowSz, 150)\n\t\n#\tx_ai = model.predict(encoder.predict(scaleData(x_fg[:,0,:])))\n#\tx_ai_bg = model.predict(encoder.predict(scaleData(x_bg[:,0,:])))\n\tx_fg = x_fg[..., np.newaxis]\n\tx_bg = x_bg[..., np.newaxis]\n\tx_ai = model.predict((scaleData(x_fg[:,0,:,:])))\n\tx_ai_bg = model.predict((scaleData(x_bg[:,0,:,:])))\n\tprint(\"fg: \" + str(x_ai[0,]))\n\tprint(\"bg_1: \" + str(x_ai_bg[0,]))\n\tprint(\"bg_2: \" + str(x_ai_bg[1,]))\n\n\ndef predict(X):\n dpa = load_model(pModel)\n x_ai = dpa.predict(scaleData(X))\n return x_ai\n\n\ndef evalPerformance(noSamples, windowSz):\n\tnp.random.seed(2342)\n\tdata = 0 * np.ones((noSamples, 16))\n\tfor k in range(0, noSamples):\n\t\t(y, param) = DoubleExponentialHeatingModel.generateTrainingSampleHeating(1, 300)\n\t\tyraw = y\n\t\tprint(\"x_ast: \" + str(param))\n\t\t#print(\"\\n\\n*** Starting optimization scheme with near optimal initialization\")\n\t\tx = param + 0.1 * np.random.rand(1, 5)\n\t\tt = np.linspace(0, 4, windowSz)\n #print(\"x_ast: \" + str(param))\n\t\t#print(\"x_no: \" + str(x))\n\t\t# (i,x,f) = gnFit(y,x,t)\n\t\tt1 = time.time()\n\t\t(i, x, f) = DoubleExponentialHeatingModel.lmFit(y, x, t)\n\t\tt2 = time.time() - t1\n\t\tgn_no_noIterations = i\n\t\tgn_no_ss = np.linalg.norm(f)\n\t\tgn_no_epsParam = np.linalg.norm(x[0:1] - param[0, 0:1])\n\t\tgn_no_eps = np.linalg.norm(x.transpose() - param[0,])\n\t\tdata[k, 0] = gn_no_noIterations\n\t\tdata[k, 1] = t2\n\t\tdata[k, 2] = gn_no_epsParam\n\t\tdata[k, 3] = gn_no_eps\n\n\t\t#print(\"\\n\\n*** Starting optimization scheme with AI initialization\")\n\t\t#pModelWeights = \"model_weights_2017_25\"\n\t\t#pModelConfig = \"model_config_2017_25\"\n\t\theatingParamAI = IrrigationDetector.loadGenericModel(pModelConfig, pWeights)\n\t\tt = np.linspace(0, 4, windowSz)\n\t\tt1 = time.time()\n\t\tx = heatingParamAI.predict(scaleData(y))\n\t\tt2 = time.time() - t1\n\t\tx = unscaleParameters(x)\n\t\tprint(\"x_ai: \" + str(x))\n\t\tyh = DoubleExponentialHeatingModel.doubleExponentialFunction(t, x[0,])\n\t\tf = (y) - (yh)\n\t\tai_noIterations = 1\n\t\tai_ss = np.linalg.norm(f)\n\t\tai_epsParam = np.linalg.norm(x[0, 0:1] - param[0, 0:1])\n\t\tai_eps = np.linalg.norm(x - param[0,])\n\t\tdata[k, 4] = ai_noIterations\n\t\tdata[k, 5] = t2\n\t\tdata[k, 6] = ai_epsParam\n\t\tdata[k, 7] = ai_eps\n\t\t# (i,x,f) = gnFit(y,x,t)\n\t\tt1 = time.time()\n\t\t(i, x, f) = DoubleExponentialHeatingModel.lmFit(y, x, t)\n\t\tt2 = time.time() - t1\n\t\tgn_ai_noIterations = i\n\t\tgn_ai_ss = np.linalg.norm(f)\n\t\tgn_ai_epsParam = np.linalg.norm(x[0:1] - param[0, 0:1])\n\t\tgn_ai_eps = np.linalg.norm(x - param[0,])\n\t\tdata[k, 8] = gn_ai_noIterations\n\t\tdata[k, 9] = t2\n\t\tdata[k, 10] = gn_ai_epsParam\n\t\tdata[k, 11] = gn_ai_eps\n\n\t\t#print(\"\\n\\n*** Starting optimization scheme with random initialization\")\n\t\tx = -1 * np.random.rand(1, 5)\n\t\tx[0, 4] = 2 * x[0, 4] + 28\n\t\tt = np.linspace(0, 4, windowSz)\n\t\t#print(\"x_rnd: \" + str(x))\n\t\t# (i,x,f) = gnFit(y,x,t)\n\t\tt1 = time.time()\n\t\t(i, x, f) = DoubleExponentialHeatingModel.lmFit(y, x, t)\n\t\tt2 = time.time() - t1\n\t\tgn_rand_noIterations = i\n\t\tgn_rand_ss = np.linalg.norm(f)\n\t\tgn_rand_epsParam = np.linalg.norm(x[0:1] - param[0, 0:1])\n\t\tgn_rand_eps = np.linalg.norm(x - param[0,])\n\t\tdata[k, 12] = gn_rand_noIterations\n\t\tdata[k, 13] = t2\n\t\tdata[k, 14] = gn_rand_epsParam\n\t\tdata[k, 15] = gn_rand_eps\n\n\t\t#print(\"\\n\\nStatistics\")\n\t\t#print(\"==========\")\n\t\t#print(\n\t\t#\"NO : \" + str(gn_no_noIterations) + \" ss \" + str(gn_no_ss) + \" epsParam \" + str(gn_no_epsParam) + \" eps \" + str(\n\t\t#print(\n\t\t#\"AI : \" + str(ai_noIterations) + \" ss \" + str(ai_ss) + \" epsParam \" + str(ai_epsParam) + \" eps \" + str(ai_eps))\n\t\t#print(\"AILM : \" + str(gn_ai_noIterations) + \" ss \" + str(gn_ai_ss) + \" epsParam \" + str(\n\t\t#\tgn_ai_epsParam) + \" eps \" + str(gn_ai_eps))\n\t\t#print(\"RND: \" + str(gn_rand_noIterations) + \" ss \" + str(gn_rand_ss) + \" epsParam \" + str(\n\t\t#\tgn_rand_epsParam) + \" eps \" + str(gn_rand_eps))\n\n\t\tdd = np.mean(data, axis=0)\n \n\t\tprint(\"\\n\\nAccuracy Statistics\")\n\t\tprint(\"==========\")\n\t\tprint(\"NO : \" + str(dd[0]) + \" epsParam \" + str(dd[2]) + \" eps \" + str(dd[3]))\n\t\tprint(\"RND: \" + str(dd[12]) + \" epsParam \" + str(dd[14]) + \" eps \" + str(dd[15]))\n\t\tprint(\"AI : \" + str(dd[4]) + \" epsParam \" + str(dd[6]) + \" eps \" + str(dd[7]))\n\t\tprint(\"AILM : \" + str(dd[8]) + \" epsParam \" + str(dd[10]) + \" eps \" + str(dd[11]))\n\n\tb = open('eval_results.csv', 'w')\n\ta = csv.writer(b)\n\ta.writerows(data)\n\tb.close()\n\treturn data\n\ndef main():\n\tpData = \"trainDoubleExpHeating_60Hz.h5\"\n\ttrain(pData, 300, 0)\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"IrrigationAnalysis/DeepHeatingParameterApproximation.py","file_name":"DeepHeatingParameterApproximation.py","file_ext":"py","file_size_in_byte":13564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"307509262","text":"import boto3\nimport json\nimport datetime as DT\nimport os\n\nprint(os.environ[\"AWS_PROFILE\"])\n\n# fyi\nprint(f\"Using Account; {boto3.client('sts').get_caller_identity().get('Account')}\")\n\nddb = boto3.resource('dynamodb')\ntable = ddb.Table(\"cl_syn-csi_configs\")\n\nitem={\n \"app_id\": \"DSD1\",\n \"name\": \"DSD1 Database\",\n \"db\": {\n \"host\": \"10.0.3.99\",\n \"port\": 5432,\n \"database\": \"dsd1\",\n \"user\": \"david\",\n \"password\": \"\"\n },\n \"created_at\": DT.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S %z\")\n }\n\ntable.put_item(Item=item)\n\n# read it back\nprint(item['app_id'])\nresponse = table.get_item(\n Key={\n \"app_id\": item['app_id'],\n }\n)\n\nprint(response)\n\n# and report\nitem = response['Item']\nprint(f'Saved item: {item}')","sub_path":"ddbScripts/addConf.py","file_name":"addConf.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"235366502","text":"import sys, time, os, logging\nfrom watchdog.observers import Observer\nfrom watchdog.events import LoggingEventHandler, FileSystemEventHandler\n\nclass Show(FileSystemEventHandler):\n def on_created(self, event):\n # Image viewer started as another system process because\n # matplotlib is used, and cannot be started on a separate\n # thread in the main process\n if event.src_path.split('.')[-1] == 'png':\n os.system(\"python show.py %s\" % event.src_path)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n path = sys.argv[1] if len(sys.argv) > 1 else '.'\n event_handler = LoggingEventHandler()\n observer = Observer()\n observer.schedule(event_handler, path)\n observer.schedule(Show(), path)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","sub_path":"watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"347862102","text":"import urllib.request\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\n\r\nf = open('links.txt', 'r')\r\n#Set a list to hold the links\r\nlinks = []\r\n#Set a list to hold the number of Mementos of the link\r\nmementos = []\r\n\r\n#Read links from the file 1 by 1\r\nfor line in f:\r\n links.append(line)\r\n memento = 0\r\n\r\n try:\r\n #Get the TimeMaps html\r\n response = urllib.request.urlopen('http://memgator.cs.odu.edu/timemap/link/'+line)\r\n html = response.read()\r\n soup = BeautifulSoup(html, \"html.parser\")\r\n \r\n #Traverse all the descendants and count the number of Mementos\r\n for child in soup.descendants:\r\n #For each descendant, search if there is any Memento in it\r\n if re.search(r'rel=\".*memento\"', str(child.string)):\r\n memento = memento+1\r\n\r\n mementos.append(memento)\r\n except:\r\n mementos.append(0)\r\n continue\r\n\r\nf.close()\r\n\r\n#Save the links and numbers of Mementos to a file\r\nf = open('data_hist.csv', 'w')\r\nline = f.write('URI\\tmementos\\n')\r\nfor i in range(len(links)):\r\n line = f.write(links[i].replace('\\n','')+'\\t{}'.format(mementos[i])+'\\n')\r\n\r\nf.close()\r\n","sub_path":"assg02_submission/twitter_ComputeMementos.py","file_name":"twitter_ComputeMementos.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"225267085","text":"#Uses python3\n\nimport sys\n\ndef diff_size(a, b):\n return len(a) != len(b)\n\ndef find_short(a, b):\n if len(a) > len(b):\n return (b, a)\n else:\n return (a, b)\n\ndef larger2(a,b):\n option1 = a+b\n option2 = b+a\n\n if int(option2) > int(option1):\n return b, a\n else:\n return a,b\n\ndef larger(a, b):\n if diff_size(a, b):\n short, longer = find_short(a, b)\n if int(short) > int(longer[:len(short)]): # 如果短的大,短的在前\n first, second = short, longer\n return first, second\n elif int(short) == int(longer[:len(short)]):\n extra_length = len(longer)-len(short)\n tail = longer[-extra_length:]\n head = longer[0:extra_length]\n # print(tail, head)\n if int(tail) - int(head) > 0:\n first, second = longer, short # 如果前面相等,看多出来的部分是不是比开头部分大\n return first, second # 如果相等或者小,则放后\n else:\n first, second = short, longer\n return first, second\n else:\n first, second = longer, short # 如果长的大,长的在前\n return first, second\n else:\n first, second = str(max([int(a), int(b)])), str(min([int(a), int(b)]))\n return first, second \n\n\n\n\ndef largest_number(a):\n for i1 in range(len(a)-1):\n # print(a)\n for i2 in range(i1+1, len(a)):\n # print(a[i1], a[i2])\n first, second = larger2(a[i1], a[i2])\n a[i1], a[i2] = first, second\n # print(a)\n res = \"\"\n for x in a:\n res += x\n return res\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = input.split()\n a = data[1:]\n print(largest_number(a))\n\n# l1 = ['21', '2']\n# l2 = ['9', '4', '6', '1', '9']\n# l3 = ['23', '39', '92']\n# l4 = ['1', '9', '23', '456', '789']\n# l5 = ['129', '1291291']\n# print(largest_number(l2))\n# print(largest_number(l3))\n# # print('')\n# print(largest_number(l4))\n# print(largest_number(l1))\n# print(largest_number(l5)) \n","sub_path":"algorithmic_toolbox/w3/largest_number/largest_number.py","file_name":"largest_number.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"497440716","text":"#!/usr/bin/python\n\n# Authentication for a simple get request to Elemental Live.\n# python auth_request.py --login --api_key \n#\n# link to Elemental's ruby version:\n# https://github.com/guardian/content_delivery_system/blob/master/CDS/Ruby/lib/Elemental/auth_curl.rb\n\nimport argparse\nimport requests\nimport time\nimport hashlib\n\n# parse login user and api-key\nparser = argparse.ArgumentParser()\nparser.add_argument('--login', nargs=1)\nparser.add_argument('--api_key', nargs=1)\nargs = parser.parse_known_args()\n\nheaders = {\"Accept\":\"\", \"X-Auth-User\":\"\", \"X-Auth-Expires\":\"\", \"X-Auth-Key\":\"\"}\napi_key = args[0].api_key[0]\nurl = args[-1][0] # url is last argument\n\n# Set the response Type. Hardcoded as xml\nheaders[\"Accept\"] = \"application/xml\"\n\n# Set the user\nheaders[\"X-Auth-User\"] = args[0].login[0]\n\n# Set the time for session to expire. Should be ~30 seconds in the future\nheaders[\"X-Auth-Expires\"] = str(int(time.time()) + 30)\n\n# Set the auth key using this algorithm:\n# md5(api_key + md5(url + X-Auth-User + api_key + X-Auth-Expires))\n\n# Extract every part of the URL after /api and before headers\ntemp = url.partition(\"api\")[2]\nsub_url = temp.partition(\"?\")[0]\n\nstring1 =(sub_url + headers[\"X-Auth-User\"] + api_key + headers[\"X-Auth-Expires\"]).encode('utf-8')\ninner = hashlib.md5(string1).hexdigest()\n\nstring2 = (api_key + inner).encode('utf-8')\nheaders[\"X-Auth-Key\"] = hashlib.md5(string2).hexdigest()\n\nresp = requests.get(url, headers=headers)\nprint(resp.text)\n","sub_path":"auth_request.py","file_name":"auth_request.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"328834879","text":"import numpy as np\nimport pandas as pd\nimport xarray as xr\nfrom brainio_base.stimuli import StimulusSet\nfrom brainio_base.assemblies import NeuronRecordingAssembly\n\nfrom brainio_collection.packaging import package_stimulus_set, package_data_assembly\n\nstorage_location = \"D:/MIT/EcogData/brainscore_img_elec_50ms250_70hz150_positive.tar/brainscore_img_elec_50ms250_70hz150_positive/\"\n\n\ndef collect_stimuli(stimuli_directory):\n labels = np.load(stimuli_directory + 'stimgroups.npy') # labels of image\n stim_sequence = np.load(\n stimuli_directory + 'stimsequence.npy') # the names of the files with a b: \"b'V12'\" (Image ID)\n\n # image file name will be the stimuli_directory + ID.jpg\n\n stimuli = []\n for x in range(len(labels)):\n stimuli.append({\n 'image_id': stim_sequence[x].decode('UTF-8'), # extract just the ID\n 'image_file_name': stimuli_directory + \"stimuli/\" + str(stim_sequence[x].decode('UTF-8')) + \".jpg\",\n 'image_number': x,\n 'label': labels[x],\n })\n stimuli = pd.DataFrame(stimuli)\n\n\n\n # convert stimuli object into something that can be used with all the packaging functions\n stimuli = StimulusSet(stimuli)\n\n # after converted to a type \"StimulusSet\", you set an attribute of the object, such as \"image_paths\":\n stimuli.image_paths = {key: stimuli['image_file_name'][i] for i, key in enumerate(stimuli['image_id'])}\n\n return stimuli\n\n\n# pass into this function the stimuli object that you obtain from the above function\n# stimuli is a Pandas DataFrame\n# also pass into this function the neural response file (neural_responses.npy)\ndef load_responses(response_file, stimuli):\n neural_response_file = response_file + \"neural_responses.npy\"\n neural_responses = np.load(neural_response_file)\n\n brodmann_file = response_file + \"brodmann_areas.npy\"\n brodmann_locations = np.load(brodmann_file)\n\n assembly = xr.DataArray(neural_responses,\n coords={\n 'image_num': ('presentation', list(range(neural_responses.shape[0]))),\n 'image_id': ('presentation',\n [stimuli['image_id'][stimuli['image_number'] == num].values[0]\n for num in range(neural_responses.shape[0])]),\n\n 'region': ('neuroid', brodmann_locations),\n\n\n 'neuroid_id': ('neuroid', list(range(neural_responses.shape[1]))),\n\n 'time': ('time_bin', np.linspace(0, 1, 32)),\n 'time_bin_start': ('time_bin', np.arange(0, 1000, 31.25)),\n 'time_bin_end': ('time_bin', np.arange(31.25, 1001, 31.25))\n },\n dims=['presentation', 'neuroid', 'time_bin'])\n\n assembly = NeuronRecordingAssembly(assembly)\n\n\n assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')\n return assembly\n\n\n","sub_path":"mkgu_packaging/aru/Kuzovkin2018/aru_early.py","file_name":"aru_early.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"18191789","text":"from django.conf.urls import url,include\nfrom django.contrib import admin\nfrom .views import *\nfrom django.contrib.auth.views import login,logout\nfrom .views import index1\nfrom django.contrib.auth.decorators import login_required\n\n\nurlpatterns = [\n url(r'^$', home, name='home'),\n url(r'^viewproduct/', ProductView.as_view(), name=\"view_product\"),\n url(r'^login/$',login,{'template_name':'profile/login.html'},name='login'),\n url(r'^profile/', profile, name=\"profile\"),\n url(r'^edit-profile/', edit_profile, name=\"edit_profile\"),\n url(r'^addproduct/', AddProductView.as_view(), name=\"add_product\"),\n url(r'^productdetails/(?P[0-9]+)', ProductDetailView.as_view(), name=\"product_detail\"),\n url(r'^bidderlist/(?P[0-9]+)', BidderListView.as_view(), name=\"bidder_list\"),\n url(r'^deleteproduct/(?P[0-9]+)', ProductDelete.as_view(), name=\"delete_product\"),\n url(r'^save_bid/',save_bid, name=\"save_bid\"),\n url(r'^logout/$',logout,{'template_name':'profile/logout.html'},name='logout'),\n url(r'^register/$', register, name='register'),\n url(r'^homepage/$', homepage.as_view(), name='homepage'),\n url(r'^chatroom/(?P[0-9]+)', index1, name='chatroom'),\n url(r'^about/' , about , name='about') ,\n url(r'^contact/' , contact , name='contact') ,\n url(r'^ajax/validate_notif/$', last_notification, name='last_notification'),\n url(r'^ajax/validate_seller/$', notif_seller, name='notif_seller'),\n url(r'^ajax/validate_winner/$', notif_winner, name='notif_winner'),\n\n\n\n]\n","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"409944594","text":"#!/usr/bin/env python\n\nfrom expmanager import ExperimentManager\nfrom argparse import ArgumentParser\nfrom expmanager import DEFAULT_BASE_PORT\n\nDEFAULT_HOSTS = \"self-check\"\nDEFAULT_CMDLINES = \"self-check\"\nDEFAULT_EXP = \"self-check\"\nDEFAULT_KLEECMD = \"self-check\"\nDEFAULT_COVERABLE = \"self-check\"\n\nUID_PREFIX = \"self-check\"\nDURATION = 600\nSTRATEGY = \"random-path,cov-opt,partitioning\"\n\ndef main():\n parser = ArgumentParser(description=\"Run the self-check suite.\")\n\n parser.add_argument(\"--hosts\", default=DEFAULT_HOSTS, help=\"Available cluster machines\")\n parser.add_argument(\"--cmdlines\", default=DEFAULT_CMDLINES, help=\"Command lines of the testing targets\")\n parser.add_argument(\"--exp\", default=DEFAULT_EXP, help=\"The experiment schedule file\")\n parser.add_argument(\"--kleecmd\", default=DEFAULT_KLEECMD, help=\"The command line parameters to pass to Klee\")\n parser.add_argument(\"--coverable\", default=DEFAULT_COVERABLE, help=\"The file containing coverable files\")\n\n args = parser.parse_args()\n\n manager = ExperimentManager(args.hosts, args.cmdlines, args.exp,\n args.kleecmd, args.coverable,\n debugcomm=False, uidprefix=UID_PREFIX,\n duration=DURATION,\n balancetout=None,\n strategy=STRATEGY,\n basePort=DEFAULT_BASE_PORT)\n\n manager.initHosts()\n manager.runExperiment()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"infra/self-check.py","file_name":"self-check.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"531465359","text":"import unittest\nfrom dexstore import DexStore, exceptions\nfrom dexstore.instance import set_shared_dexstore_instance\nfrom dexstore.account import Account\nfrom dexstore.committee import Committee\nfrom .fixtures import fixture_data\n\n\nclass Testcases(unittest.TestCase):\n\n def setUp(self):\n fixture_data()\n\n def test_Committee(self):\n with self.assertRaises(\n exceptions.AccountDoesNotExistsException\n ):\n Committee(\"FOObarNonExisting\")\n\n c = Committee(\"xeroc\")\n self.assertEqual(c[\"id\"], \"1.5.27\")\n self.assertIsInstance(c.account, Account)\n\n with self.assertRaises(\n exceptions.CommitteeMemberDoesNotExistsException\n ):\n Committee(\"nathan\")\n","sub_path":"tests/test_base_objects.py","file_name":"test_base_objects.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"639458473","text":"#Copyright 2020, Alexander Wassell, All rights reserved.\n\nclass Category:\n def __init__(self, id=0, name=None):\n self.id = id\n self.name = name\n\nclass Product:\n def __init__(self, id=0, code=None, name=None, price=0.0, category=None):\n self.id = id\n self.code = code\n self.name = name\n self.price = price\n self.category = category\n","sub_path":"objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"93470740","text":"# import flask\nfrom flask import Flask, url_for\n\n# import jinja template engine\nfrom flask import render_template\n\nfrom flask import request, jsonify\n\nimport json\nimport random\n\napp = Flask(__name__)\n\nwith open(\"calls.json\") as c:\n\tcalls = json.load(c)\n\ncurrent_id = len(calls) + 1\n\n@app.route('/')\n\ndef home():\n\treturn render_template('landing.html', calls=calls)\n\n# ------------------------------------------------\n@app.route('/call_index')\n\ndef call_index():\n\treturn render_template('call_index.html', calls=calls)\n\n# ------------------------------------------------\n\n@app.route('/view/', methods=['GET', 'POST'])\n\ndef view(id=None):\n\tcall = {}\n\tint_id = int(id)\n\n\tfor i in range(len(calls)):\n\t\tif calls[i][\"id\"] == int_id:\n\t\t\tcall = calls[i]\n\t\t\tbreak\n\n\tprint(call)\n\n\treturn render_template('view.html', call=call)\n\n# --------------------------------------------------\n\n@app.route('/search', methods=['POST'])\n\n# We search through call names\n\ndef search():\n\tmatches = []\n\tsearch_query = request.get_json()\n\n\tfor call in calls:\n\t\t\tif search_query.lower() in call[\"call\"].lower():\n\t\t\t\tmatches.append(call)\n\treturn jsonify(matches=matches)\n\n# --------------------------------------------------\n\ntotal_questions = 4\ncurrent_question = 0\nmissed_calls = []\ncorrect_answers = 0\ntotal_options = 4\ncorrect_call_name = \"\"\ncorrect_option = 0\nalready_asked = []\n\n@app.route('/quiz_index')\n\ndef quiz_index():\n\tglobal current_question, missed_calls, correct_answers\n\talready_asked = []\n\tcurrent_question = 0\n\tmissed_calls = []\n\tcorrect_answers = 0\n\treturn render_template('quiz_index.html')\n\n# ------------------------------------------------\n\n@app.route('/quiz/gif', methods=['GET'])\n\ndef gif_quiz():\n\tglobal current_question, correct_call_name, correct_option\n\tcurrent_question += 1\n\toptions = random.sample(calls, total_options)\n\twhile True:\n\t\tcorrect_option = random.randint(0, total_options - 1)\n\t\tcorrect_call_gif = options[correct_option][\"gif\"]\n\t\tcorrect_call_name = options[correct_option][\"call\"]\n\t\tcorrect_call_id = options[correct_option][\"id\"]\n\t\tif correct_call_id not in already_asked:\n\t\t\talready_asked.append(correct_call_id)\n\t\t\tbreak\n\treturn render_template('quiz/gif.html', options=options, correct_call=correct_call_gif, current_question=current_question, total_questions=total_questions)\n\n# ------------------------------------------------\n\n@app.route('/quiz/rules', methods=['GET'])\n\ndef rules_quiz():\n\tglobal current_question, correct_call_name, correct_option\n\tcurrent_question += 1\n\toptions = random.sample(calls, total_options)\n\twhile True:\n\t\tcorrect_option = random.randint(0, total_options - 1)\n\t\tcorrect_call_rules = options[correct_option][\"quiz_description\"]\n\t\tcorrect_call_name = options[correct_option][\"call\"]\n\t\tcorrect_call_id = options[correct_option][\"id\"]\n\t\tif correct_call_id not in already_asked:\n\t\t\talready_asked.append(correct_call_id)\n\t\t\tbreak\n\treturn render_template('quiz/rules.html', options=options, correct_call=correct_call_rules, current_question=current_question, total_questions=total_questions)\n\n\t# ------------------------------------------------\n\n@app.route('/quiz/call', methods=['GET'])\n\ndef call_quiz():\n\tglobal current_question, correct_call_name, correct_option\n\tcurrent_question += 1\n\toptions = random.sample(calls, total_options)\n\twhile True:\n\t\tcorrect_option = random.randint(0, total_options - 1)\n\t\tcorrect_call_rules = options[correct_option][\"quiz_description\"]\n\t\tcorrect_call_name = options[correct_option][\"call\"]\n\t\tcorrect_call_id = options[correct_option][\"id\"]\n\t\tif correct_call_id not in already_asked:\n\t\t\talready_asked.append(correct_call_id)\n\t\t\tbreak\n\treturn render_template('quiz/call.html', options=options, correct_call=correct_call_name, current_question=current_question, total_questions=total_questions)\n# --------------------------------------------------\n\n@app.route('/submit', methods=['POST'])\n\ndef submit():\n\tglobal correct_answers\n\tanswer = request.get_json()\n\tif correct_option == int(answer):\n\t\tcorrect_answers += 1\n\telse:\n\t\tfor call in calls:\n\t\t\tif call[\"call\"] == correct_call_name:\n\t\t\t\tmissed_call_id = call[\"id\"]\n\t\t\t\tmissed_calls.append(tuple([missed_call_id, correct_call_name]))\n\n\tif current_question == total_questions:\n\t\treturn jsonify(done=True, correct_option=correct_option)\n\telse:\n\t\treturn jsonify(done=False, correct_option=correct_option)\n\n# --------------------------------------------------\n\n@app.route('/final_score', methods=['GET'])\n\ndef final_score_gif():\n\treturn render_template('final_score.html', correct_answers=correct_answers, total_questions=total_questions, missed_calls=missed_calls)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"485137117","text":"import replayparser as rp\n\n\nclass reader:\n\n def __init__(self, f):\n self.f = f\n self.buf = None\n self.isEof = None\n self.bitVal = 0\n self.bitCount = 0\n\n def readBits(self, bits):\n while bits > self.bitCount:\n self.bitVal = self.bitVal | (int.from_bytes(self.f.read(1), byteorder='big') << self.bitCount)\n self.bitCount += 8\n x = self.bitVal & ((1 << bits) - 1)\n self.bitVal = self.bitVal >> bits\n self.bitCount -= bits\n return x\n\n def readUBit(self):\n ret = self.readBits(6)\n if ret & 48 == 16:\n ret = (ret & 15) | (self.readBits(4) << 4)\n elif ret & 48 == 32:\n ret = (ret & 15) | (self.readBits(8) << 4)\n elif ret & 48 == 48:\n ret = (ret & 15) | (self.readBits(28) << 4)\n return ret\n\n def readByte(self):\n # if self.bitCount == 0:\n # return rp.readByte(self.f)\n return self.readBits(8)\n\n def readBytes(self, n):\n buf = bytes()\n if self.bitCount == 0:\n return rp.readBytes(self.f, n)\n for i in range(n):\n buf += bytes([self.readBits(8)])\n return buf\n\n def readVarInt(self):\n x = 0\n y = 0\n stopThis = 1\n while stopThis:\n b = self.readByte()\n x = x | ((b & 127) << y)\n y += 7\n if (b & 128) == 0 or y == 35:\n stopThis = 0\n return x\n","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"512552041","text":"from __future__ import print_function\r\n\r\nimport github\r\nimport argparse\r\nimport sys\r\nfrom datetime import timedelta, datetime\r\nimport calendar\r\nimport csv\r\nfrom copy import deepcopy\r\nfrom itertools import tee\r\n\r\n# For OpenCV\r\nimport numpy as np\r\nimport cv2\r\n\r\n# For Google Drive API\r\nimport pickle\r\nimport os.path\r\nfrom googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom google.auth.transport.requests import Request\r\nfrom googleapiclient.http import MediaFileUpload\r\n\r\n\r\nclass PullRequests:\r\n def __sub__(self, other):\r\n out = PullRequests()\r\n\r\n out.all = self.all - other.all\r\n out.open = self.open - other.open\r\n out.opened = self.opened - other.opened\r\n out.closed = self.closed - other.closed\r\n out.merged = self.merged - other.merged\r\n\r\n return out\r\n\r\n def get_stat(self, repo, date_begin, date_end):\r\n self.all = 0\r\n self.open = 0\r\n self.opened = 0\r\n self.closed = 0\r\n self.merged = 0\r\n\r\n for pr in repo.get_pulls(state='all', sort='created_at'):\r\n assert (pr.created_at is not None)\r\n\r\n counter_inc = False\r\n\r\n if pr.created_at.date() > date_end:\r\n break\r\n\r\n if (pr.created_at.date() < date_begin) and \\\r\n ((pr.closed_at is None or pr.closed_at.date() > date_begin) and\r\n (pr.merged_at is None or pr.merged_at.date() > date_begin)):\r\n self.open += 1\r\n counter_inc = True\r\n\r\n if date_begin <= pr.created_at.date() <= date_end:\r\n self.opened += 1\r\n counter_inc = True\r\n\r\n if pr.merged_at is not None and \\\r\n date_begin <= pr.merged_at.date() <= date_end:\r\n self.merged += 1\r\n counter_inc = True\r\n\r\n if pr.closed_at is not None and \\\r\n date_begin <= pr.closed_at.date() <= date_end:\r\n self.closed += 1\r\n counter_inc = True\r\n\r\n if counter_inc is True:\r\n self.all += 1\r\n\r\n def copy(self):\r\n out = PullRequests()\r\n\r\n out.all = deepcopy(self.all)\r\n out.open = deepcopy(self.open)\r\n out.opened = deepcopy(self.opened)\r\n out.closed = deepcopy(self.closed)\r\n out.merged = deepcopy(self.merged)\r\n\r\n return out\r\n\r\n def show(self):\r\n print('\\tPullR: ',\r\n '\\tall:', self.all,\r\n '\\topen:', self.open,\r\n '\\topened:', self.opened,\r\n '\\tclosed:', self.closed,\r\n '\\tmerged:', self.merged)\r\n\r\n all: int = 0\r\n open: int = 0\r\n opened: int = 0\r\n closed: int = 0\r\n merged: int = 0\r\n\r\n\r\nclass Issues:\r\n def __sub__(self, other):\r\n out = Issues()\r\n\r\n out.all = self.all - other.all\r\n out.open = self.open - other.open\r\n out.opened = self.opened - other.opened\r\n out.closed = self.closed - other.closed\r\n\r\n return out\r\n\r\n def get_stat(self, repo, date_begin, date_end):\r\n self.all = 0\r\n self.open = 0\r\n self.opened = 0\r\n self.closed = 0\r\n\r\n for issue in reversed(list(repo.get_issues(state='all'))):\r\n assert (issue.created_at is not None)\r\n counter_inc = False\r\n\r\n if issue.created_at.date() > date_end:\r\n break\r\n\r\n if (issue.created_at.date() < date_begin) and \\\r\n (issue.closed_at is None or issue.closed_at.date() > date_begin):\r\n self.open += 1\r\n counter_inc = True\r\n\r\n if date_begin <= issue.created_at.date() <= date_end:\r\n self.opened += 1\r\n counter_inc = True\r\n\r\n if issue.closed_at is not None and \\\r\n date_begin <= issue.closed_at.date() <= date_end:\r\n self.closed += 1\r\n counter_inc = True\r\n\r\n if counter_inc is True:\r\n self.all += 1\r\n\r\n def copy(self):\r\n out = Issues()\r\n\r\n out.all = deepcopy(self.all)\r\n out.open = deepcopy(self.open)\r\n out.opened = deepcopy(self.opened)\r\n out.closed = deepcopy(self.closed)\r\n\r\n return out\r\n\r\n def show(self):\r\n print('\\tIssues: ',\r\n '\\tall:', self.all,\r\n '\\topen:', self.open,\r\n '\\topened:', self.opened,\r\n '\\tclosed:', self.closed)\r\n\r\n all: int = 0\r\n open: int = 0\r\n opened: int = 0\r\n closed: int = 0\r\n\r\n\r\nclass Stars:\r\n def __sub__(self, other):\r\n out = Stars()\r\n\r\n out.all = self.all - other.all\r\n out.snapshot = self.snapshot - other.snapshot\r\n out.period = self.period - other.period\r\n\r\n return out\r\n\r\n def get_stat(self, repo, date_begin, date_end):\r\n self.all = 0\r\n self.snapshot = 0\r\n self.period = 0\r\n\r\n for star in repo.get_stargazers_with_dates():\r\n assert (star.starred_at is not None)\r\n\r\n counter_inc = False\r\n\r\n if star.starred_at.date() > date_end:\r\n break\r\n\r\n if date_begin <= star.starred_at.date() <= date_end:\r\n self.period += 1\r\n counter_inc = True\r\n\r\n if star.starred_at.date() <= date_begin:\r\n self.snapshot += 1\r\n counter_inc = True\r\n\r\n if counter_inc is True:\r\n self.all += 1\r\n\r\n def copy(self):\r\n out = Stars()\r\n\r\n out.all = deepcopy(self.all)\r\n out.snapshot = deepcopy(self.snapshot)\r\n out.period = deepcopy(self.period)\r\n\r\n return out\r\n\r\n def show(self):\r\n print('\\tStars:',\r\n '\\t\\tall:', self.all,\r\n '\\tStars snapshot:', self.snapshot,\r\n '\\tStars per period:', self.period)\r\n\r\n all: int = 0\r\n snapshot: int = 0\r\n period: int = 0\r\n\r\n\r\nclass Forks:\r\n def __sub__(self, other):\r\n out = Forks()\r\n\r\n out.all = self.all - other.all\r\n out.snapshot = self.snapshot - other.snapshot\r\n out.period = self.period - other.period\r\n\r\n return out\r\n\r\n def get_stat(self, repo, date_begin, date_end):\r\n self.all = 0\r\n self.snapshot = 0\r\n self.period = 0\r\n\r\n for fork in reversed(list(repo.get_forks())):\r\n assert (fork.created_at is not None)\r\n counter_inc = False\r\n\r\n if fork.created_at.date() > date_end:\r\n break\r\n\r\n if fork.created_at.date() <= date_begin:\r\n self.snapshot += 1\r\n counter_inc = True\r\n\r\n if date_begin <= fork.created_at.date() <= date_end:\r\n self.period += 1\r\n counter_inc = True\r\n\r\n if counter_inc is True:\r\n self.all += 1\r\n\r\n def copy(self):\r\n out = Forks()\r\n\r\n out.all = deepcopy(self.all)\r\n out.snapshot = deepcopy(self.snapshot)\r\n out.period = deepcopy(self.period)\r\n\r\n return out\r\n\r\n def show(self):\r\n print('\\tForks:',\r\n '\\t\\tall:', self.all,\r\n '\\tForks snapshot:', self.snapshot,\r\n '\\tForks per period:', self.period)\r\n\r\n all: int = 0\r\n snapshot: int = 0\r\n period: int = 0\r\n\r\n\r\nclass Traffic:\r\n def __sub__(self, other):\r\n out = Traffic()\r\n\r\n out.visitors = self.visitors - other.visitors\r\n out.cloners = self.cloners - other.cloners\r\n\r\n return out\r\n\r\n # --TODO error Message 403\r\n def get_stat(self, repo, date_begin, date_end):\r\n self.visitors = 0\r\n self.cloners = 0\r\n\r\n try:\r\n self.visitors = repo.get_views_traffic(per='week')['uniques']\r\n self.cloners = repo.get_clones_traffic(per='week')['uniques']\r\n except Exception as exc:\r\n self.visitors = -1\r\n self.cloners = -1\r\n\r\n def copy(self):\r\n out = Traffic()\r\n\r\n out.visitors = deepcopy(self.visitors)\r\n out.cloners = deepcopy(self.cloners)\r\n\r\n return out\r\n\r\n def show(self):\r\n print('\\tTraffic:',\r\n '\\tVisitors unique:', self.visitors,\r\n '\\tCloners unique:', self.cloners)\r\n\r\n visitors: int = 0\r\n cloners: int = 0\r\n\r\n\r\nclass GithubStats:\r\n def __init__(self, begin=str(), end=str()):\r\n self.begin = begin\r\n self.end = end\r\n\r\n def __sub__(self, other):\r\n out = GithubStats()\r\n\r\n out.pulls = self.pulls - other.pulls\r\n out.issues = self.issues - other.issues\r\n out.stars = self.stars - other.stars\r\n out.forks = self.forks - other.forks\r\n out.traffic = self.traffic - other.traffic\r\n\r\n return out\r\n\r\n def get_stat(self, repo, period1, period2):\r\n self.begin = datetime.strftime(period1, '%Y.%m.%d')\r\n self.end = datetime.strftime(period2, '%Y.%m.%d')\r\n\r\n self.pulls.get_stat(repo, period1, period2)\r\n self.issues.get_stat(repo, period1, period2)\r\n self.stars.get_stat(repo, period1, period2)\r\n self.forks.get_stat(repo, period1, period2)\r\n self.traffic.get_stat(repo, period1, period2)\r\n\r\n def copy(self):\r\n out = GithubStats()\r\n out.begin = deepcopy(self.begin)\r\n out.end = deepcopy(self.end)\r\n out.tag = deepcopy(self.tag)\r\n\r\n out.pulls = self.pulls.copy()\r\n out.issues = self.issues.copy()\r\n out.stars = self.stars.copy()\r\n out.forks = self.forks.copy()\r\n out.traffic = self.traffic.copy()\r\n\r\n return out\r\n\r\n def show(self):\r\n print('\\nPeriod:', self.begin, self.end,\r\n '\\nTag:', self.tag)\r\n\r\n self.pulls.show()\r\n self.issues.show()\r\n self.stars.show()\r\n self.forks.show()\r\n self.traffic.show()\r\n\r\n begin = str()\r\n end = str()\r\n tag = str()\r\n\r\n pulls = PullRequests()\r\n issues = Issues()\r\n stars = Stars()\r\n forks = Forks()\r\n traffic = Traffic()\r\n\r\n\r\ndef show_metric_snapshot(repo):\r\n print('Metric snapshot:',\r\n '\\n\\tPR open:', repo.open_pulls_count,\r\n '\\n\\tIssues open:', repo.open_issues_count,\r\n '\\n\\tStars:', repo.stargazers_count,\r\n '\\n\\tForks:', repo.forks_count, )\r\n\r\n\r\ndef write_opencv(file_name, stats_array):\r\n print('Save to', file_name)\r\n\r\n # Open OpenCV file storage\r\n fs = cv2.FileStorage(file_name, cv2.FileStorage_WRITE)\r\n\r\n if fs.isOpened() is False:\r\n sys.exit('File is not open')\r\n\r\n matrix = []\r\n for stats in stats_array:\r\n fs.write('Period stats', str(stats.begin + ' - ' + stats.end))\r\n fs.write('Tag', stats.tag)\r\n\r\n # Save pulls data to file\r\n fs.write('PR open', stats.pulls.open)\r\n fs.write('PR opened', stats.pulls.opened)\r\n fs.write('PR closed', stats.pulls.closed)\r\n fs.write('PR merged', stats.pulls.merged)\r\n\r\n # Save issues data to file\r\n fs.write('Issues open', stats.issues.open)\r\n fs.write('Issues opened', stats.issues.opened)\r\n fs.write('Issues closed', stats.issues.closed)\r\n\r\n # Save stars data to file\\\r\n fs.write('Stars snapshot', stats.stars.snapshot)\r\n fs.write('Stars per period', stats.stars.period)\r\n\r\n # Save forks data to file\r\n fs.write('Forks snapshot', stats.forks.snapshot)\r\n fs.write('Forks per period', stats.forks.period)\r\n\r\n fs.write('Visitors uniques', stats.traffic.visitors)\r\n fs.write('Cloners uniques', stats.traffic.cloners)\r\n\r\n # Create stats array\r\n arr = np.array([stats.pulls.open, stats.issues.open, stats.stars.snapshot, stats.forks.snapshot,\r\n stats.traffic.visitors, stats.traffic.cloners, stats.pulls.opened, stats.pulls.merged,\r\n stats.pulls.closed,\r\n stats.issues.opened, stats.issues.closed, stats.forks.period])\r\n matrix.append(arr)\r\n\r\n fs.write('stat_array', arr)\r\n\r\n fs.write('output_matrix', np.asmatrix(matrix))\r\n fs.release()\r\n\r\n\r\ndef write_csv(file_name, stats_array):\r\n print('Save to', file_name)\r\n writer = csv.writer(open(file_name, 'w', newline=''), delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n writer.writerow(['Period', 'PR open', 'Issue open', 'Stars snapshot', 'Forks snapshot',\r\n 'Visitors (uniques)', 'Cloners (uniques)', 'PR opened', 'PR merged', 'PR closed',\r\n 'Issues opened', 'Issues closed', 'Stars per period', 'Forks per period'])\r\n for stats in stats_array:\r\n writer.writerow([stats.begin + ' - ' + stats.end + ' ' + stats.tag,\r\n stats.pulls.open, stats.issues.open,\r\n stats.stars.snapshot, stats.forks.snapshot, stats.traffic.visitors, stats.traffic.cloners,\r\n stats.pulls.opened, stats.pulls.merged, stats.pulls.closed,\r\n stats.issues.opened, stats.issues.closed, stats.stars.period, stats.forks.period])\r\n\r\n\r\ndef write_gdocs(file_name, stats_array):\r\n # If modifying these scopes, delete the file token.pickle.\r\n SCOPES = ['https://www.googleapis.com/auth/drive']\r\n\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('drive', 'v3', credentials=creds)\r\n\r\n # Retrieve the documents contents from the Docs service.\r\n try:\r\n file_metadata = {'name': file_name}\r\n media = MediaFileUpload(file_name,\r\n mimetype='application/octet-stream',\r\n resumable=True)\r\n file = service.files().create(body=file_metadata,\r\n media_body=media,\r\n fields='id').execute()\r\n print('File ID: %s' % file.get('id'))\r\n except Exception as exc:\r\n print(exc)\r\n\r\n\r\n# Parsing periods by tags or dates\r\n# If tags are defined, returned True\r\n# Return check tags, parsed periods\r\ndef parsing_period(repo, periods):\r\n isTags = False\r\n output_periods = list()\r\n\r\n # Parse periods by tags\r\n isFound = False\r\n prev_rel = None\r\n for release in reversed(list(repo.get_releases())):\r\n if isFound is True:\r\n output_periods.append({'begin': prev_rel.created_at.date(),\r\n 'end': release.created_at.date(),\r\n 'tag': prev_rel.tag_name})\r\n prev_rel = None\r\n isFound = False\r\n\r\n for per in periods:\r\n if release.tag_name == per:\r\n isFound = True\r\n prev_rel = release\r\n\r\n # If there was a last release, then the end date will be today\r\n if isFound is True:\r\n output_periods.append({'begin': prev_rel.created_at.date(),\r\n 'end': datetime.today().date(),\r\n 'tag': prev_rel.tag_name})\r\n\r\n if output_periods:\r\n isTags = True\r\n\r\n # Parse periods by dates\r\n if not output_periods:\r\n try:\r\n begin = datetime.strptime(periods[0], '%Y.%m.%d').date()\r\n end = datetime.strptime(periods[1], '%Y.%m.%d').date()\r\n\r\n while begin < end:\r\n days = calendar.monthrange(begin.year, begin.month)[1]\r\n end_tmp = begin + timedelta(days=days)\r\n\r\n output_periods.append({'begin': begin, 'end': end_tmp, 'tag': ''})\r\n begin = end_tmp\r\n\r\n except Exception as exc:\r\n sys.exit(exc)\r\n\r\n return isTags, output_periods\r\n\r\n\r\n# Compare stats by tags (delta)\r\ndef compare_releases(stats_array):\r\n cmp_stats = list()\r\n\r\n iter1, iter2 = tee(stats_array, 2)\r\n it2 = next(iter2)\r\n\r\n while it2 is not stats_array[-1]:\r\n it1 = next(iter1)\r\n it2 = next(iter2)\r\n\r\n stat_tmp = it2 - it1\r\n\r\n stat_tmp.tag = it1.tag + '_' + it2.tag\r\n stat_tmp.begin = it1.begin\r\n stat_tmp.end = it2.end\r\n\r\n cmp_stats.append(stat_tmp.copy())\r\n\r\n return cmp_stats\r\n\r\n\r\ndef collect_metric(login_or_token, repo_name, periods, password=None):\r\n if len(periods) < 2:\r\n sys.exit('Periods: input 2 dates or 2 or many tags')\r\n\r\n # Connect to github API\r\n g = github.Github(login_or_token=login_or_token, password=password)\r\n try:\r\n g.get_repo(repo_name)\r\n except Exception as exc:\r\n sys.exit(exc)\r\n\r\n # Get repository\r\n repo = g.get_repo(repo_name)\r\n\r\n # Parse period\r\n isTags, out_periods = parsing_period(repo, periods)\r\n if not out_periods:\r\n sys.exit('Periods parse error')\r\n\r\n print('Parsed dates:')\r\n for per in out_periods:\r\n print('\\t', per['begin'], ' - ', per['end'], per['tag'])\r\n\r\n # Get stats by periods\r\n stats_array = []\r\n for per in out_periods:\r\n print('>> get metric:', per['begin'], ' - ', per['end'])\r\n\r\n stats = GithubStats()\r\n stats.get_stat(repo, per['begin'], per['end'])\r\n stats.tag = per['tag']\r\n\r\n stats_array.append(stats.copy())\r\n\r\n # If tags are defined, releases are compare.\r\n if isTags is True and len(stats_array) != 1:\r\n return compare_releases(stats_array)\r\n\r\n return stats_array\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-u', '--github_username', help='login or token', required=True)\r\n parser.add_argument('-pw', '--github_password')\r\n parser.add_argument('-r', '--github_repo', required=True)\r\n parser.add_argument('-p', '--period', help='release tags or dates(format: year.month.day)', nargs='+',\r\n required=True)\r\n parser.add_argument('-ocv', '--export_opencv', help='save contents in OpenCV format', action='store_true')\r\n parser.add_argument('-csv', '--export_csv', help='export content to CSV file', action='store_true')\r\n parser.add_argument('-gdoc', '--export_gdoc', help='export content to Google Drive', action='store_true')\r\n\r\n args = parser.parse_args()\r\n\r\n # Get metric\r\n stats_array = collect_metric(login_or_token=args.github_username, password=args.github_password,\r\n repo_name=args.github_repo, periods=args.period)\r\n\r\n # Create name output file\r\n output_name = 'stat'\r\n for per in args.period:\r\n output_name += '_' + per\r\n\r\n # Show stats\r\n for stats in stats_array:\r\n stats.show()\r\n\r\n print('\\n')\r\n\r\n # Save to OpenCV format\r\n if args.export_opencv is True:\r\n write_opencv(output_name + '.yml', stats_array)\r\n\r\n # Save to csv format\r\n if args.export_csv is True or \\\r\n args.export_gdoc is True:\r\n write_csv(output_name + '.csv', stats_array)\r\n\r\n # Save to Google Drive\r\n if args.export_gdoc is True:\r\n write_gdocs(output_name + '.csv', stats_array)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"github_stats.py","file_name":"github_stats.py","file_ext":"py","file_size_in_byte":19657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"18301233","text":"# Copyright (c) 2008-2016 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"\nGINI Water Vapor Imagery\n========================\n\nUse MetPy's support for GINI files to read in a water vapor satellite image and plot the\ndata using CartoPy.\n\"\"\"\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\nfrom metpy.cbook import get_test_data\nfrom metpy.io import GiniFile\nfrom metpy.plots import ctables\n\n###########################################\n\n# Open the GINI file from the test data\nf = GiniFile(get_test_data('WEST-CONUS_4km_WV_20151208_2200.gini'))\nprint(f)\n\n###########################################\n\n# Get a Dataset view of the data (essentially a NetCDF-like interface to the\n# underlying data). Pull out the data, (x, y) coordinates, and the projection\n# information.\nds = f.to_dataset()\nx = ds.variables['x'][:]\ny = ds.variables['y'][:]\ndat = ds.variables['WV']\nproj_var = ds.variables[dat.grid_mapping]\nprint(proj_var)\n\n###########################################\n\n# Create CartoPy projection information for the file\nglobe = ccrs.Globe(ellipse='sphere', semimajor_axis=proj_var.earth_radius,\n semiminor_axis=proj_var.earth_radius)\nproj = ccrs.LambertConformal(central_longitude=proj_var.longitude_of_central_meridian,\n central_latitude=proj_var.latitude_of_projection_origin,\n standard_parallels=[proj_var.standard_parallel],\n globe=globe)\n\n###########################################\n\n# Plot the image\nfig = plt.figure(figsize=(10, 12))\nax = fig.add_subplot(1, 1, 1, projection=proj)\nwv_norm, wv_cmap = ctables.registry.get_with_steps('WVCIMSS', 0, 1)\nim = ax.imshow(dat[:], cmap=wv_cmap, norm=wv_norm, zorder=0,\n extent=ds.img_extent, origin='upper')\nax.coastlines(resolution='50m', zorder=2, color='black')\n\nplt.show()\n","sub_path":"examples/formats/GINI_Water_Vapor.py","file_name":"GINI_Water_Vapor.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"426654123","text":"import selenium_toolkit\nfrom file_handling import ReportDefinition\nfrom page_object import BasePage\nfrom selenium.webdriver.common.by import By\nimport selenium.common.exceptions as exceptions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport file_handling\nimport logging\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\nimport spider_exceptions\nfrom spider_exceptions import NoDownloadLinkError\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport datetime\n\nclass LoginPage(BasePage):\n \"\"\"Page object for US Bank login page\"\"\"\n \n #URL of login page\n url = 'https://usbtrustgateway.usbank.com/portal/login.do'\n \n #Webelement identifiers\n usernameElemID = 'uname'\n passwordElemID = 'pword'\n \n def attempt_login(self, username, password):\n \"\"\"Wait until login boxes are present then enter supplied login details and submit\"\"\"\n \n self.go_to_page()\n logging.info('Navigated to login page')\n \n wait = WebDriverWait(self.driver, 5)\n userbox = wait.until(EC.presence_of_element_located((By.ID, self.usernameElemID)))\n userbox.send_keys(username)\n \n pwbox = wait.until(EC.presence_of_element_located((By.ID, self.passwordElemID)))\n pwbox.send_keys(password)\n pwbox.submit()\n \n #let's test for the presence of an element that indicates a login was successful:\n try:\n wait.until(EC.presence_of_element_located((By.LINK_TEXT, 'Log Out')))\n return True\n except TimeoutException:\n #login has not worked. Is it a random fail or a proper login error? Check for existence of the banner for a wrong login:\n if selenium_toolkit.element_exists_by_xpath(self.driver, '//h6[contains(@class,\"error\")]'):\n #Username or password is incorrect. Can't go any further on this login, raise terminal exception\n #TODO: deactivate login in DB to prevent repeated attempts, raise alert\n raise spider_exceptions.LoginDetailsIncorrectError\n else:\n #hasn't explicitly rejected the login, let's try again\n raise spider_exceptions.LoginFailedError\n \n def close_popup_windows_and_return(self):\n \"\"\"Close all windows but one then make sure the driver is set to the remaining window\"\"\"\n \n while len(self.driver.window_handles) > 1:\n self.driver.switch_to_window(self.driver.window_handles[1]) \n #sneakiness in case of page load failure \n actions = ActionChains(self.driver) \n actions.send_keys(Keys.ESCAPE) \n self.driver.close()\n self.driver.switch_to_window(self.driver.window_handles[0])\n self.driver.maximize_window()\n \nclass PortfolioPage(BasePage):\n \"\"\"Basic portfolio list page for a logged in user\"\"\"\n \n #URL of portfolio list page\n url = 'https://trustinvestorreporting.usbank.com/TIR/portfolios/myPortfolio'\n \n #Web element identifiers\n portfolioNavLinkID = 'portfolio-nav-tab'\n portfolioTabsID = 'portfolio-tabs'\n frontPortTabXpath = '//ul[@id=\"portfolio-tabs\"]//li[1]'\n portfolioTabsListXpath = '//ul[@id=\"portfolio-tabs\"]//li/a'\n dealListXPath = '//form[@id=\"deal-list\"]//tbody/tr/td/a'\n logoutLinkText = 'Sign Out'\n \n def wait_for_portfolio_page_to_load(self):\n #WebDriverWait(self.driver, 10).until(EC.title_is('Welcome To Trust Investor Reporting')) \n #ooh hang on, all popout pages have this title once you are logged in - let's be more specific\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.XPATH, \"//h1[contains(text(),'Portfolio')]\")))\n \n def click_top_portfolio_nav_tab(self):\n nav = WebDriverWait(self.driver,15).until(EC.element_to_be_clickable((By.ID,self.portfolioNavLinkID)))\n nav.click()\n self.wait_for_portfolio_tab_to_load()\n \n def click_named_portfolio_tab(self, portTabName):\n #D'oh! The simple version doesn't work if they leave the default 'My Portfolio' tab but put another one in front of it...\n #selenium_toolkit.click_link_by_text(self.driver, portTabName)\n #Need to be a little more specific:\n tablist = self.driver.find_element_by_id(self.portfolioTabsID)\n tab = tablist.find_element_by_link_text(portTabName)\n tab.click()\n self.wait_for_portfolio_tab_to_load(frontonly=False) #should I test for a timeout here or leave it to go up the tree? \n\n def wait_for_portfolio_tab_to_load(self, frontonly=True):\n WebDriverWait(self.driver, 30).until(selenium_toolkit.xpath_attribute_to_change('//li', '@class', 'ui-state-processing'))\n WebDriverWait(self.driver, 30).until(selenium_toolkit.text_to_change((By.XPATH, self.frontPortTabXpath), u\"Loading\\u2026\")) \n \n def get_portfolio_tabs(self):\n \"\"\"Return List of link texts of portfolio tabs visible\"\"\"\n results = []\n tabs = self.driver.find_elements_by_xpath(self.portfolioTabsListXpath)\n for t in tabs:\n results.append(t.text)\n return results\n \n def get_currently_selected_tab(self):\n mytab = self.driver.find_element_by_xpath('//li[contains(@class, \"ui-tabs-selected\")]')\n return mytab\n \n def get_deal_list(self):\n \"\"\"Return List of deal link texts in the order they appear on the page\"\"\"\n #let's start off with the simplest list we can do and see if it will suffice - just the deal names\n results = []\n dealLinks = self.driver.find_elements_by_xpath(self.dealListXPath)\n #another interesting point - this website is set up to be able to page results\n #at the moment it seems to default to 'all' but I can't figure out how to check\n #one day it might be good to add something to make sure this option is selected no matter what\n for d in dealLinks:\n results.append(d.text)\n return results \n \n def navigate(self, tab, deal_map, deal_index, dl_handler, spider_args, deals_found_list):\n \"\"\"Navigate a portfolio tab page, iterating over deals and creating DealPages\"\"\"\n self.click_named_portfolio_tab(tab) \n currentPortTabName = self.get_currently_selected_tab().text\n logging.info('Current portfolio tab name: ' + currentPortTabName)\n dealLinks = self.get_deal_list() \n \n #########################TESTING ERROR HANDLING#########################\n #raise NoSuchElementException\n #raise ValueError\n \n #see if we are trying to start again from a previous navigation\n if self.statedict.has_key('LastClickedDealLinkText'):\n try:\n saved_deal_index = dealLinks.index(self.statedict['LastClickedDealLinkText'])\n dealLinks = dealLinks[saved_deal_index:]\n except ValueError:\n logging.warning('Saved deal link not found in list - defaulting to beginning')\n \n #check each deal in list against what we have from the database\n for dl in dealLinks:\n if deal_index.has_key(dl):\n self.statedict['LastClickedDealLinkText'] = dl \n deals_found_list.append(dl) \n selenium_toolkit.click_link_by_text(self.driver, dl)\n logging.info('Deal link clicked')\n #keep in mind here 'dl' = trustee deal name, deal_index[dl] = our database deal name\n if spider_args.withInvestorNotices:\n deal = DealPage(self.driver, self.db_interaction, self.statedict, deal_index[dl], dl, currentPortTabName, True)\n else:\n deal = DealPage(self.driver, self.db_interaction, self.statedict, deal_index[dl], dl, currentPortTabName)\n deal.url = self.driver.current_url\n deal.navigate(deal_map, deal_index, dl_handler, dl)\n #now go back to portfolio page, which always starts from front tab no matter how we get there\n self.go_to_page()\n self.wait_for_portfolio_tab_to_load()\n #and thence to the tab we were on before\n self.click_named_portfolio_tab(currentPortTabName)\n \n #clear last deal text from statedict so it doesn't try to find it on a different portfolio tab\n if self.statedict.has_key('LastClickedDealLinkText'):\n del self.statedict['LastClickedDealLinkText'] \n \n def sign_out(self):\n sign_out_link = self.driver.find_element_by_link_text(self.logoutLinkText)\n sign_out_link.click()\n \nclass DealPage(BasePage):\n \"\"\"Page of reports for individual deal\"\"\"\n \n #class constants, could be initialised from db or config file for easier updating\n db_deal_name = 'Not set'\n trustee_deal_name = 'Not set'\n parentPortfolioTab = 'Not set'\n reportTabsID = 'deal-tabs'\n reportTabsXpath = '//ul[@id=\"deal-tabs\"]//li'\n reportListXpath = '//form/table[contains(@id,\"results-table\")]//tbody/tr'\n reportTabsToIgnore = ['Investor Relations', 'Deal Documents', 'P & I Factors']\n \n def __init__(self, driver, db_interaction, statedict, dealname, trusteedealname, portfoliotab, withInvestorNotices = False, withWebFaxes = False):\n BasePage.__init__(self, driver, db_interaction, statedict)\n self.db_deal_name = dealname\n self.trustee_deal_name = trusteedealname\n self.parentPortfolioTab = portfoliotab\n if withInvestorNotices is False:\n self.reportTabsToIgnore.append('Investor Notices')\n if withWebFaxes is False:\n self.reportTabsToIgnore.append('Web Faxes') #can't handle these at all yet, but watch this space\n \n def get_report_tabs(self): \n \"\"\"Return List of tab texts\"\"\" \n WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.ID, self.reportTabsID)))\n result = []\n tabs = self.driver.find_elements_by_xpath(self.reportTabsXpath)\n for t in tabs:\n result.append(t.text)\n return result \n \n def get_currently_selected_tab(self):\n \"\"\"Return webelement representing the currently selected tab\"\"\"\n WebDriverWait(self.driver, 10).until(selenium_toolkit.xpath_attribute_to_change('//li', '@class', 'ui-state-processing'))\n myreptab = self.driver.find_element_by_xpath('//li[contains(@class, \"ui-tabs-selected\")]')\n return myreptab\n \n def navigate(self, deal_map, deal_index, dl_handler, deal_link_text):\n \"\"\"Navigate a deal page, iterating over desired tabs and checking and downloading files\"\"\"\n \n #we need some bits here to hold info about the whole collection of files downloaded for a given deal\n files_list = []\n check_names_list = []\n download_counter = 0\n reportTabs = self.get_report_tabs()\n #see if we are trying to start again from a previous navigation\n if self.statedict.has_key('CurrentReportTab'):\n try:\n saved_rep_tab_index = reportTabs.index(self.statedict['CurrentReportTab'])\n reportTabs = reportTabs[saved_rep_tab_index:]\n except ValueError:\n logging.warning('Saved report tab not found in list - defaulting to beginning')\n #now go through tabs, ignoring any in the ignore list\n for tab in [x for x in reportTabs if x not in self.reportTabsToIgnore]:\n self.statedict['CurrentReportTab'] = tab\n selenium_toolkit.click_link_by_text(self.driver, tab) \n currentRepTabName = self.get_currently_selected_tab().text\n logging.info('Current report tab name: ' + currentRepTabName)\n #deal name is db version to match previous code\n selenium_toolkit.take_deal_screenshot(self.driver, dl_handler, deal_index[deal_link_text], currentRepTabName)\n try:\n reportsFound = self.get_report_definition_list()\n except TimeoutException:\n #this could be abnormally slow to load, but they might have revoked access to this deal\n #if the latter we'll try to skip over it please\n if selenium_toolkit.element_exists_by_xpath(self.driver, '//p[contains(text(),\"You do not have access to this deal or feature.\")]'):\n logging.warning('No access to deal reports')\n continue\n else:\n raise\n \n logging.info(str(len(reportsFound)) + ' reports found')\n \n for r in reportsFound:\n for rep_def in reportsFound[r]:\n self.statedict['LatestCheckedReportName'] = r \n #oh thanks guys - you went to ALL THAT TROUBLE to strip out the () around 'zip' in the report name? why?!\n #now I have to do it too if I want to be backwards-compatible\n #but I can't just replace '(' and ')' because there are other places where you left those in\n #no really, cheers\n usb_name_before_dl = rep_def.name_pre_download.replace('(zip)', 'zip')\n rep_def.report_opts['namePreDL'] = usb_name_before_dl\n logging.info('Generated report name: ' + repr(usb_name_before_dl))\n already_downloaded = dl_handler.report_in_download_log(usb_name_before_dl)\n logging.info('Found in download log: ' + str(already_downloaded))\n if already_downloaded is False:\n old_dl_count = download_counter \n target_builder = file_handling.DownloadTargetBuilder(deal_map, deal_index[deal_link_text], rep_def.report_opts)\n download_counter = self.download_report(target_builder, dl_handler, files_list, download_counter, r, \n rep_def.file_extension, rep_def.report_opts, deal_map)\n if download_counter > old_dl_count:\n #2016-05-20 only add to dl log if download succeeds, even if error is 'too old for download'\n #making download_report return a bool breaks everything and I'm too sick to work out why\n #so have this hack for the moment. Sorry.\n check_names_list.append(usb_name_before_dl) \n else:\n #clear this out so we don't just go to this one tab for each subsequent deal\n del self.statedict['CurrentReportTab'] \n #so we've successfully iterated over each tab and through each report on each tab, downloading everything we want\n #let's take a breather to check out the collection and tidy it up\n try:\n WebDriverWait(self.driver, 60).until(file_handling.file_download_finished(dl_handler))\n except TimeoutException:\n logging.warning('Possible incomplete downloads detected')\n #move what we can move, increment initial download folder name if necessary\n if download_counter > 0:\n dl_handler.package_deal_downloads(self.db_deal_name, deal_map[self.db_deal_name]['dealId'], files_list, \n download_counter, check_names_list)\n self.statedict['DownloadCounts'][self.statedict['CurrentBusGrp']] += download_counter\n \n def get_report_table_elements(self):\n \"\"\"Get a list of line elements of the table of results shown when a report tab is opened.\n Since many of the results have the same xpath structure whether or not they are on the\n current tab, test for visibility of element before adding to results \n \"\"\"\n results = []\n #results table is animated so it fades to visibility once loaded - one in, all in as it were\n WebDriverWait(self.driver, 5).until(EC.presence_of_all_elements_located((By.XPATH, self.reportListXpath)))\n for el in self.driver.find_elements_by_xpath(self.reportListXpath):\n #another one which sometimes has paging options (but not always!) - something just to keep in mind\n if el.is_displayed():\n results.append(el)\n return results\n \n def get_report_definition_list(self):\n \"\"\"Return dictionary of report definitions keyed on the report names\"\"\"\n results = {}\n reports = self.get_report_table_elements()\n \n for r in reports:\n rname = r.find_element_by_xpath('td[1]').text\n #need to ignore 'electronic data file' instances apparently\n if 'electronic data file' in rname.lower():\n continue\n \n results[rname] = []\n \n rdate = r.find_element_by_xpath('td[2]').text\n #Multiple download links for different formats of the same report squeezed into the one table cell. Yay.\n rexts = r.find_element_by_xpath('td[3]').text.split(' ')\n for rext in rexts: \n rdef = ReportDefinition(self.trustee_deal_name, rname, rdate, rext, self.parentPortfolioTab, \n self.get_currently_selected_tab().text, self.get_currently_selected_tab().text)\n \n results[rname].append(rdef)\n \n return results \n \n def download_report(self, target_builder, dl_handler, files_list, download_counter, reportText, ext, report_opts, deal_map):\n targets = target_builder.make_download_target_list()\n try:\n self.click_report_download_link(reportText, ext, dl_handler, download_counter)\n WebDriverWait(self.driver, 60).until(file_handling.file_download_finished(dl_handler))\n dl_handler.stage_latest_download(files_list, report_opts['namePreDL'], targets, self.db_deal_name, \n deal_map, self.db_interaction, \n datetime.datetime.strptime(report_opts['date'], '%m/%d/%Y').strftime('%Y-%m-%d'))\n download_counter += 1\n except NoDownloadLinkError:\n #might be so old it's pre spider so not in log, but has no link to download. Note, but don't worry too much\n logging.warning('No download link detected')\n\n return download_counter\n \n def click_report_download_link(self, reportText, ext, dlHandler, filecount):\n \"\"\"Click the correct link to download a report with the supplied name. \n Handles both a single click-to-download and the scenario when a disclaimer page is hit.\n \n :param reportText: the website-given name of the report, *not* the text of the download link\n :type reportText: str\n :param ext: file extension of target report, which the website uses as the text of the download link\n :type ext: str\n :param dlHandler: spider-level instance of DownloadHandler class\n :param filecount: number of reports already downloaded on this page. Used to check that the download\n has successfully started.\n :type filecount: int\n \"\"\" \n logging.info('Attempting download')\n #first find the element with the report name text \n #fun and games time: sometimes there are extra spaces in the xpath that are absent in the text found\n #so we need a couple of tries to exhaust this possibility\n try:\n nameElement = self.driver.find_element_by_xpath('//td[contains(text(),\"' + reportText + '\")]') \n #then get a reference to its table row ancestor element\n reportTableRowElement = nameElement.find_element_by_xpath(\".//ancestor::tr\")\n except NoSuchElementException:\n #no xpath match, so we have to reload the collection and go looking for the report text\n reps = self.get_report_table_elements()\n for r in reps:\n rname = r.find_element_by_xpath('td[1]').text\n if rname == reportText:\n reportTableRowElement = r\n break\n \n #now get the report download link within the table row with the supplied extension string as link text \n try: \n replink = reportTableRowElement.find_element_by_xpath('td[3]/a[contains(text(),\"' + ext + '\")]')\n except NoSuchElementException:\n raise spider_exceptions.NoDownloadLinkError\n \n \n replink.click()\n #at this point, USBank sometimes throw you to a disclaimer page - we need to check for this and make it go away\n try:\n #wait a while to see if a file is being downloaded\n WebDriverWait(self.driver, 20).until(file_handling.file_download_started(dlHandler, filecount))\n logging.info('File download started')\n except exceptions.TimeoutException:\n #and if it isn't, check for the disclaimer page\n if 'madDisclaimer' in str(self.driver.current_url):\n self.dismiss_mad_disclaimer()\n #they even call it 'mad disclaimer' and I can't see what it stands for O_o\n else:\n #anything could have happened - going to need to pass up to outer wrapper for logging/restart\n raise \n \n def dismiss_mad_disclaimer(self):\n #first we need to 'accept' this thing\n subButton = WebDriverWait(self.driver,10).until(EC.element_to_be_clickable((By.ID, 'btn_submit_accepted')))\n subButton.click()\n #then actually dismiss it - at which point the download will hopefully commence\n closeButton = WebDriverWait(self.driver,10).until(EC.element_to_be_clickable((By.ID, 'btn_close')))\n closeButton.click()\n #now we need to get back to whichever report tab we were on - navigate back (in case of error) and grab from the statedict:\n self.go_to_page()\n WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.ID, self.reportTabsID)))\n selenium_toolkit.click_link_by_text(self.driver, self.statedict['CurrentReportTab'])\n ","sub_path":"USBank_pages.py","file_name":"USBank_pages.py","file_ext":"py","file_size_in_byte":22657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"627818147","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nimport sqlite3\r\nimport datetime\r\nfrom xlwt import Workbook\r\nimport os\r\nimport pandas as pd\r\n\r\n# connexion à la base de données sinon stop le programme et affiche l'erreur\r\ntry:\r\n #connect = sqlite3.connect(\"X:/finale.db\") \r\n connect = sqlite3.connect(\"finale.db\")\r\n cur = connect.cursor()\r\nexcept IOError:\r\n print(IOError)\r\n\r\ndef Is_checkStruct():\r\n '''\r\n fonction qui change l'etat des widgets en lien avec les structures\r\n '''\r\n if chkValueStruct.get() == True:\r\n ComboStruct.config(state='normal')\r\n BtnAdd.config(state='normal')\r\n BtnDel.config(state='normal')\r\n else:\r\n ComboStruct.config(state='disabled')\r\n BtnAdd.config(state='disabled')\r\n BtnDel.config(state='disabled')\r\n\r\ndef Is_check1():\r\n '''\r\n fonction qui change l'état des widgets en lien avec 1 code insee\r\n '''\r\n if chkInseeOne.get() == True:\r\n EntryInsee.config(state='normal')\r\n CheckMany.config(state='disabled')\r\n else:\r\n EntryInsee.config(state='disabled')\r\n CheckMany.config(state='normal')\r\n\r\ndef Is_check2():\r\n '''\r\n fonction qui change l'état des widgets en lien avec plusieurs codes insee\r\n '''\r\n if chkInseeMany.get() == True:\r\n BtnInsee.config(state='normal')\r\n CheckOne.config(state='disabled')\r\n else:\r\n BtnInsee.config(state='disabled')\r\n CheckOne.config(state='normal')\r\n listInsee.delete(0,END)\r\n LabelFile.config(text='')\r\n\r\ndef FileInsee():\r\n '''\r\n fonction pour chercher le fichier .txt contenant les plusieurs codes insee et l'insère dans la listbox\r\n '''\r\n filename = filedialog.askopenfilename(initialdir = \"/\",\r\n title=\"sélectionner votre fichier\",\r\n filetypes=((\"Fichier Texte\",\"*.txt\"),\r\n (\"Fichier pdf\",\"*.pdf\")))\r\n\r\n LabelFile.config(text = filename)\r\n fichier = open(str(filename), \"r\")\r\n lines = fichier.readlines()\r\n fichier.close()\r\n listInsee.delete(0,END)\r\n i=1\r\n for ligne in lines:\r\n listInsee.insert(i,ligne.strip())\r\n i+=1\r\n\r\ndef RequeteStruct():\r\n '''\r\n fonction pour les requetes sql selon les choix de la premiere fenetre et retourne la liste des structures demandés\r\n '''\r\n ListStruct = []\r\n if chkInseeOne.get() == True and chkValueStruct.get() == False: # si l'utilisateur coche seulement la checkbox pour 1 code Insee\r\n val = EntryInsee.get()\r\n cur.execute(\"SELECT DISTINCT Recyclerie FROM Organisation, Insee, Commune WHERE Organisation.Id_Recyclerie = Commune.Id_Recyclerie AND Commune.Id_insee = Insee.Id_Insee AND Code = '%s' \"%\\\r\n (val))\r\n StructList = cur.fetchall()\r\n ListStruct = [struct[0] for struct in StructList]\r\n \r\n elif chkInseeMany.get() == True and chkValueStruct.get() == False: # si l'utilisateur coche seulement la checkbox pour plusieurs codes Insee\r\n val = listInsee.get(0, END)\r\n for code in val:\r\n cur.execute(\"SELECT Recyclerie FROM Organisation, Insee, Commune WHERE Organisation.Id_Recyclerie = Commune.Id_Recyclerie AND Commune.Id_insee = Insee.Id_Insee AND Code = '%s'\"%\\\r\n (code))\r\n StructList = cur.fetchall()\r\n for struct in StructList:\r\n ListStruct.append(struct[0])\r\n\r\n elif chkInseeOne.get() == True and chkValueStruct.get() == True: # si l'utilisateur coche la checkbox pour 1 code Insee et la checkbox pour choisir les structures\r\n val = EntryInsee.get()\r\n cur.execute(\"SELECT Recyclerie FROM Organisation, Insee, Commune WHERE Organisation.Id_Recyclerie = Commune.Id_Recyclerie AND Commune.Id_insee = Insee.Id_Insee AND Code = '%s'\"%\\\r\n (val))\r\n StructList = cur.fetchall()\r\n ListStruct = [struct[0] for struct in StructList]\r\n print(ListStruct)\r\n\r\n elif chkInseeMany.get() == True and chkValueStruct.get() == True: # si l'utilisateur coche la checkbox pour pour plusieurs codes Insee et la checkbox pour choisir les structures\r\n print('kl')\r\n\r\n elif chkValueStruct.get() == True and chkInseeOne.get() == False and chkInseeMany.get() == False: # si l'utilisateur coche seulement la checkbox pour choisir les structures\r\n StructList = listStruct.get(0,END)\r\n ListStruct = [s for s in StructList]\r\n \r\n return list(set(ListStruct))\r\n\r\ndef ModaliteCollect(chkValue1, chkValue4, ComboModalite, LabelModalite, BtnAddModal, BtnDelModal, ListBoxModal):\r\n '''\r\n fonction qui affiche les widgets en lien avec les modalités si les checkbox des collectes sont cochés sinon les cache\r\n '''\r\n if chkValue1.get() == True:\r\n ComboModalite.grid(row=7,column=2)\r\n LabelModalite.grid(row=7,column=1)\r\n BtnAddModal.grid(row=7,column=3)\r\n BtnDelModal.grid(row=7,column=4)\r\n ListBoxModal.grid(row=7,column=5)\r\n elif chkValue4.get() == True:\r\n ComboModalite.grid(row=7,column=2)\r\n LabelModalite.grid(row=7,column=1)\r\n BtnAddModal.grid(row=7,column=3)\r\n BtnDelModal.grid(row=7,column=4)\r\n ListBoxModal.grid(row=7,column=5)\r\n else:\r\n ComboModalite.grid_forget()\r\n LabelModalite.grid_forget()\r\n BtnAddModal.grid_forget()\r\n BtnDelModal.grid_forget()\r\n ListBoxModal.grid_forget()\r\n\r\ndef new_window():\r\n '''\r\n fonction pour la deuxieme fenetre\r\n '''\r\n ListStruct = RequeteStruct()\r\n\r\n FirstFen.destroy() # Détruit la première fenêtre \r\n SecondFen = Tk() # initialisation de la première fenêtre\r\n SecondFen.title(\"Options d'importation\")\r\n\r\n #Partie catégorie :\r\n LabelCat = Label(SecondFen, text='Catégorie :', font = 60)\r\n LabelCat.grid(row=0,column=0, ipady = 30)\r\n\r\n ListCategorieBox = [\"tout\"] # initialise un tableau \r\n cur.execute(\"SELECT Catégorie FROM Catégorie\")\r\n Cat = cur.fetchall()\r\n for row in Cat:\r\n ListCategorieBox.append(row[0])\r\n \r\n Combo = ttk.Combobox(SecondFen, values = ListCategorieBox, width=29)\r\n Combo.set(\"Choississez la/les catégorie(s)\")\r\n Combo.grid(row=0,column = 1)\r\n Combo.bind(\"<>\", lambda e: Combo.get())\r\n\r\n BtnAdd2 = Button(SecondFen, text='Ajouter', command=lambda:addCat(listCat, Combo, ListCategorieBox))\r\n BtnAdd2.grid(row=0,column=2)\r\n\r\n BtnDel2 = Button(SecondFen, text='Supprimer', command=lambda:delCat(listCat))\r\n BtnDel2.grid(row=0,column=3,padx=5)\r\n\r\n listCat = Listbox(SecondFen, width=50)\r\n listCat.grid(row=0, column=5)\r\n\r\n #Partie temps :\r\n Labeltemps = Label(SecondFen, text='Temps :', font = 60)\r\n Labeltemps.grid(row=1,column=0, ipady = 10)\r\n\r\n cur.execute(\"SELECT Min(date) FROM Arrivage\")\r\n date_bdd = cur.fetchone()[0]\r\n an_bdd = date_bdd[:4]\r\n an=datetime.date.today().year \r\n an=int(an) \r\n jours,mois,ans=[str(i).zfill(2) for i in range(1,32)],[str(i).zfill(2) for i in range(1,13)],[i for i in range(int(an_bdd),an+1)]\r\n mess_debut=Label(SecondFen,text=\"choisissez la date de début :\")\r\n mess_debut.grid(row=2,column=1, padx = 10)\r\n choixjour1=ttk.Combobox(SecondFen,values=jours)\r\n choixjour1.grid(row=2,column=2)\r\n choixjour1.set('01')\r\n choixmois1=ttk.Combobox(SecondFen,values=mois)\r\n choixmois1.grid(row=2,column=3)\r\n choixmois1.set('01')\r\n choixan1=ttk.Combobox(SecondFen,values=ans)\r\n choixan1.grid(row=2,column=4)\r\n choixan1.set(int(an_bdd))\r\n mess_fin=Label(SecondFen,text=\"choisissez la date de fin :\")\r\n mess_fin.grid(row=3,column=1, pady = 10)\r\n choixjour2=ttk.Combobox(SecondFen,values=jours)\r\n choixjour2.grid(row=3,column=2)\r\n choixjour2.set('31')\r\n choixmois2=ttk.Combobox(SecondFen,values=mois)\r\n choixmois2.grid(row=3,column=3)\r\n choixmois2.set('12')\r\n choixan2=ttk.Combobox(SecondFen,values=ans)\r\n choixan2.grid(row=3,column=4)\r\n choixan2.set(an)\r\n\r\n #Partie quantitative :\r\n LabelQte = Label(SecondFen, text='Quantitative :', font = 60)\r\n LabelQte.grid(row=4,column=0, ipady = 40,padx= 5)\r\n\r\n LabelModalite = Label(SecondFen, text='Modalité(s) :')\r\n ListModaliteCombo = ['tout']\r\n cur.execute('SELECT DISTINCT origine FROM Arrivage')\r\n Modal = cur.fetchall()\r\n for row in Modal:\r\n if row[0] != '0':\r\n ListModaliteCombo.append(row[0]) \r\n ComboModalite = ttk.Combobox(SecondFen, values = ListModaliteCombo, width=25)\r\n ComboModalite.set(\"Choississez la/les modalité(s)\")\r\n BtnAddModal = Button(SecondFen, text='ajouter',command=lambda:addModal(ListBoxModal, ComboModalite, ListModaliteCombo))\r\n BtnDelModal = Button(SecondFen, text='supprimer',command=lambda:delModal(ListBoxModal))\r\n ListBoxModal = Listbox(SecondFen, width=30)\r\n\r\n chkValue1 = BooleanVar()\r\n chkValue1.set(False)\r\n chkValue2 = BooleanVar()\r\n chkValue2.set(False)\r\n chkValue3 = BooleanVar()\r\n chkValue3.set(False)\r\n chkValue4 = BooleanVar()\r\n chkValue4.set(False)\r\n chkValue5 = BooleanVar()\r\n chkValue5.set(False)\r\n Chkbox1 = Checkbutton(SecondFen, text = 'Poids collecté (en Kg)', var = chkValue1, command=lambda:ModaliteCollect(chkValue1, chkValue4, ComboModalite, LabelModalite, BtnAddModal, BtnDelModal, ListBoxModal))\r\n Chkbox1.grid(row=5,column=1)\r\n Chkbox2 = Checkbutton(SecondFen, text = 'Poids vendu (en Kg)', var = chkValue2)\r\n Chkbox2.grid(row=5,column=2)\r\n Chkbox3 = Checkbutton(SecondFen, text = 'Chiffre d\\'affaire (en €)', var = chkValue3)\r\n Chkbox3.grid(row=5,column=3)\r\n Chkbox4 = Checkbutton(SecondFen, text = 'Nombre d\\'objet collecté', var = chkValue4, command=lambda:ModaliteCollect(chkValue1,chkValue4, ComboModalite, LabelModalite, BtnAddModal, BtnDelModal, ListBoxModal))\r\n Chkbox4.grid(row=6,column=1)\r\n Chkbox5 = Checkbutton(SecondFen, text = 'Nombre d\\'objet vendu', var = chkValue5)\r\n Chkbox5.grid(row=6,column=2)\r\n\r\n BtnExport = Button(SecondFen, text='Exporter', command=lambda:export(SecondFen, listCat, choixjour1, choixmois1, choixan1, choixjour2, choixmois2, choixan2, ListStruct, chkValue1, chkValue2, chkValue3, chkValue4, chkValue5, ListBoxModal))\r\n BtnExport.grid(row=10,column=5, padx = 40, pady = 20)\r\n\r\n SecondFen.mainloop()\r\n\r\ndef addModal(ListBoxModal, ComboModalite, ListModaliteCombo):\r\n '''\r\n fonction du bouton ajouter pour des modalités\r\n '''\r\n listFiles = ListBoxModal.get(0,END)\r\n value = ComboModalite.get()\r\n if ComboModalite.get() == 'tout':\r\n for modal in ListModaliteCombo:\r\n if modal not in listFiles:\r\n ListBoxModal.insert(END, modal)\r\n idx = ListBoxModal.get(0, END).index('tout')\r\n ListBoxModal.delete(idx)\r\n\r\n if value not in listFiles and value != \"Choississez la/les modalité(s)\" and value != 'tout':\r\n ListBoxModal.insert(END, value)\r\n\r\ndef delModal(ListBoxModal):\r\n '''\r\n fonction pour le bouton supprimer des modalités\r\n\r\n Arguments:\r\n listBoxModal {listbox} -- listbox contenant les modalités sélectionnées\r\n '''\r\n itemSelected = ListBoxModal.curselection()\r\n ListBoxModal.delete(itemSelected[0])\r\n\r\ndef addCat(listCat, Combo, ListCatBox):\r\n '''\r\n fonction pour le bouton ajouter des catégories\r\n\r\n Arguments:\r\n listCat {Listbox} -- listbox contenant les catégories sélectionnées\r\n Combo {Combobox} -- combobox contenant la liste des catégories de la base sql\r\n ListCatBox {list[str]} -- liste contenant les catégories de la base sql\r\n '''\r\n listFiles = listCat.get(0,END)\r\n value = Combo.get()\r\n if Combo.get() == 'tout':\r\n for cat in ListCatBox:\r\n if cat not in listFiles:\r\n listCat.insert(END, cat)\r\n idx = listCat.get(0, END).index('tout')\r\n listCat.delete(idx)\r\n\r\n if value not in listFiles and value != 'Choississez la/les catégorie(s)' and value != 'tout':\r\n listCat.insert(END, value)\r\n\r\ndef delCat(listCat):\r\n '''\r\n fonction pour le bouton supprimer des catégories\r\n\r\n Arguments:\r\n listCat {listbox} -- listbox contenant les catégories sélectionnées\r\n '''\r\n itemSelected = listCat.curselection()\r\n listCat.delete(itemSelected[0])\r\n\r\ndef addStruct():\r\n '''\r\n fonction pour le bouton ajouter des structures\r\n '''\r\n listFiles = listStruct.get(0,END)\r\n value = ComboStruct.get()\r\n if ComboStruct.get() == 'tout':\r\n for struct in ListRecyclerieBox:\r\n if struct not in listFiles:\r\n listStruct.insert(END, struct)\r\n idx = listStruct.get(0, END).index('tout')\r\n listStruct.delete(idx)\r\n \r\n if value not in listFiles and value != 'tout' and value != 'Choississez la/les structure(s)':\r\n listStruct.insert(END, value)\r\n\r\ndef delStruct():\r\n '''\r\n fonction pour le bouton supprimer des structures\r\n '''\r\n itemSelected = listStruct.curselection()\r\n listStruct.delete(itemSelected[0])\r\n\r\ndef export(SecondFen, listCat, jour1, mois1, an1, jour2, mois2, an2, StructList, chk1, chk2, chk3, chk4, chk5, ListBoxModal):\r\n '''\r\n fonction d'export du csv contenant les données demandées\r\n\r\n Arguments:\r\n SecondFen {Tk} -- Deuxième fenetre\r\n listCat {listbox} -- listbox contenant les catégories choisis\r\n StructList {list[str]} -- liste contenant les structures choisis\r\n ListBoxModal {listbox} -- listbox contenant les modalités choisis\r\n '''\r\n filename = filedialog.asksaveasfilename(defaultextension = '.xls',\r\n filetypes = [(\"xls files\", '*.xls')])\r\n \r\n sizeCat = listCat.size()\r\n\r\n ListCategorie = [c for c in listCat.get(0,END)] # création d'une liste des catégories sélectionnées\r\n ListModal = [m for m in ListBoxModal.get(0,END)] # création d'une liste des modales sélectionnées\r\n\r\n jourfirst = jour1.get()\r\n moisfirst = mois1.get()\r\n anfirst = an1.get()\r\n joursec = jour2.get()\r\n moissec = mois2.get()\r\n ansec = an2.get()\r\n dateFirst = anfirst + \"/\" + moisfirst + \"/\" + jourfirst\r\n dateEnd = ansec + \"/\" + moissec + \"/\" + joursec\r\n classeurexport=Workbook()\r\n pageexport=classeurexport.add_sheet(\"EXPORT\", cell_overwrite_ok=True)\r\n if sizeCat == 0: # si la liste des catégories sélectionnées est vide alors affiche les données totaux\r\n pageexport.write(0,0,\"Structure(s)\")\r\n TailleCat = len(ListCategorie)\r\n TailleStruct = len(StructList)\r\n TailleModal = len(ListModal)\r\n y = 1\r\n if chk1.get() == True and TailleModal != 0: \r\n pageexport.write(0,y,\"Modalité(s)\")\r\n pageexport.write(0,y+1,\"Poids collecté (en kg)\")\r\n indice = 0 \r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0\r\n while (indice2 != TailleModal):\r\n cur.execute(\"SELECT sum(Poids)*nombre FROM Produit, Arrivage, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s' AND origine = '%s'\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd), ListModal[indice2]))\r\n PoidsCollect = cur.fetchone() [0]\r\n pageexport.write(x,1,ListModal[indice2])\r\n pageexport.write(x,0,StructList[indice])\r\n if PoidsCollect == None:\r\n pageexport.write(x, y+1, 'NR')\r\n else:\r\n pageexport.write(x, y+1, round(PoidsCollect))\r\n indice2+=1\r\n x+=1\r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n elif chk1.get() == True and TailleModal == 0:\r\n pageexport.write(0,y,\"Poids collecté (en kg)\")\r\n indice = 0 \r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n cur.execute(\"SELECT sum(Poids)*nombre FROM Produit, Arrivage, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s'\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd)))\r\n PoidsCollect = cur.fetchone() [0]\r\n pageexport.write(x,0,StructList[indice])\r\n if PoidsCollect == None:\r\n pageexport.write(x, y, 'NR')\r\n else:\r\n pageexport.write(x, y, round(PoidsCollect))\r\n x+=1\r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n if chk2.get() == True:\r\n pageexport.write(0,y,\"Poids vendu (en kg)\")\r\n indice = 0\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n cur.execute(\"SELECT sum(Lignes_vente.Poids) FROM Lignes_vente, Vente, Organisation WHERE Recyclerie = '%s' AND Vente.Id_recyclerie=Organisation.Id_recyclerie AND Lignes_vente.Id_vente = Vente.Id_Vente AND date > '%s' AND date < '%s'\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd)))\r\n PoidsVendu = cur.fetchone() [0]\r\n pageexport.write(x,0,StructList[indice])\r\n if PoidsVendu == None:\r\n pageexport.write(x, y, 'NR')\r\n else:\r\n pageexport.write(x, y, round(PoidsVendu))\r\n indice+=1\r\n x+=1\r\n y+=1\r\n if chk3.get() == True:\r\n pageexport.write(0,y,\"Chiffre d'affaire (en €)\")\r\n indice = 0\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n cur.execute(\"SELECT sum(Montant) FROM Vente, Lignes_vente, Organisation WHERE Recyclerie = '%s' AND Vente.Id_recyclerie=Organisation.Id_recyclerie AND Lignes_vente.Id_vente=Vente.Id_Vente AND date > '%s' AND date < '%s'\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd)))\r\n Chiffre = cur.fetchone() [0]\r\n pageexport.write(x,0,StructList[indice])\r\n if Chiffre == None:\r\n pageexport.write(x, y, 'NR')\r\n else:\r\n pageexport.write(x, y, round(Chiffre))\r\n indice+=1\r\n x+=1\r\n y+=1\r\n if chk4.get() == True and TailleModal != 0:\r\n if chk1.get() == False:\r\n pageexport.write(0,y,\"Modalité(s)\")\r\n pageexport.write(0,y+1,\"Nombre d'objet collecté\")\r\n indice = 0\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0\r\n while (indice2 != TailleModal):\r\n cur.execute(\"SELECT count(Id_Produit)*nombre FROM Produit, Arrivage, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s' AND origine = '%s'\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd), ListModal[indice2]))\r\n NbrCollect = cur.fetchone() [0]\r\n pageexport.write(x,1,ListModal[indice2])\r\n pageexport.write(x,0,StructList[indice])\r\n if NbrCollect == None:\r\n pageexport.write(x, y+1, 'NR')\r\n else:\r\n pageexport.write(x, y+1, round(NbrCollect))\r\n indice2+=1\r\n x+=1\r\n indice+=1\r\n y+=1\r\n elif chk4.get() == True and TailleModal == 0:\r\n pageexport.write(0,y,\"Nombre d'objet collecté\")\r\n indice = 0\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n cur.execute(\"SELECT count(Id_Produit)*nombre FROM Produit, Arrivage, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s'\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd)))\r\n NbrCollect = cur.fetchone() [0]\r\n pageexport.write(x,0,StructList[indice])\r\n if NbrCollect == None:\r\n pageexport.write(x, y, 'NR')\r\n else:\r\n pageexport.write(x, y, round(NbrCollect))\r\n x+=1\r\n indice+=1\r\n y+=1\r\n if chk5.get() == True:\r\n pageexport.write(0,y,\"Nombre d'objet vendu\")\r\n indice = 0\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n cur.execute(\"SELECT count(Id_ligne_vente) FROM Lignes_vente, Vente, Organisation WHERE Recyclerie = '%s' AND Vente.Id_recyclerie=Organisation.Id_recyclerie AND Vente.Id_Vente=Lignes_vente.Id_vente AND date > '%s' AND date < '%s'\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd)))\r\n NbrVente = cur.fetchone() [0]\r\n pageexport.write(x,0,StructList[indice])\r\n if NbrVente == None:\r\n pageexport.write(x, y, 'NR')\r\n else:\r\n pageexport.write(x, y, round(NbrVente))\r\n indice+=1\r\n x+=1\r\n y+=1\r\n else:\r\n x = 1\r\n pageexport.write(0,0,\"Structure(s)\")\r\n pageexport.write(0,1,\"Catégorie(s)\")\r\n TailleCat = len(ListCategorie)\r\n TailleStruct = len(StructList)\r\n TailleModal = len(ListModal)\r\n y = 2\r\n '''\r\n if chk1.get() == True:\r\n indice = 0 # variable pour l'indexation selon la taille de la liste des catégories\r\n pageexport.write(0,y,\"Poids collecté (en kg)\")\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleCat):\r\n indice2 = 0 # variable pour l'indexation selon la taille de la liste des structures\r\n val = 0 # variable qui va recevoir les valeurs de la requete\r\n while (indice2 != TailleStruct):\r\n cur.execute(\"SELECT sum(Poids)*nombre FROM Produit, Catégorie, Arrivage, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_catégorie = Catégorie.Id_catégorie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s' AND Catégorie = '%s' GROUP BY Catégorie.Id_catégorie\"%\\\r\n (StructList[indice2], str(dateFirst),str(dateEnd),ListCategorie[indice]))\r\n PoidsCollect = cur.fetchall()\r\n if len(PoidsCollect) == 0:\r\n val+=0\r\n else:\r\n for poids in PoidsCollect:\r\n val+=poids[0]\r\n \r\n indice2+=1\r\n if indice2 == TailleStruct:\r\n if val != 0:\r\n pageexport.write(x, y, round(val))\r\n x +=1\r\n else:\r\n pageexport.write(x, y, 'NR')\r\n x +=1\r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n '''\r\n if chk1.get() == True and TailleModal != 0:\r\n indice = 0 # variable pour l'indexation selon la taille de la liste des catégories\r\n pageexport.write(0,y,\"Modalité(s)\")\r\n pageexport.write(0,y+1,\"Poids collecté (en kg)\")\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0\r\n while (indice2 != TailleCat):\r\n indice3 = 0 # variable pour l'indexation selon la taille de la liste des modalités\r\n while (indice3 != TailleModal):\r\n cur.execute(\"SELECT sum(Poids)*nombre FROM Produit, Catégorie, Arrivage, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_catégorie = Catégorie.Id_catégorie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s' AND Catégorie = '%s' AND origine = '%s' GROUP BY Catégorie.Id_catégorie\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd),ListCategorie[indice2], ListModal[indice3]))\r\n PoidsCollect = cur.fetchall()\r\n pageexport.write(x, 0, StructList[indice])\r\n pageexport.write(x, 2, ListModal[indice3])\r\n pageexport.write(x, 1, ListCategorie[indice2])\r\n if len(PoidsCollect) == 0:\r\n pageexport.write(x, y+1, 'NR')\r\n x+=1\r\n else:\r\n for poids in PoidsCollect:\r\n pageexport.write(x, y+1, round(poids[0]))\r\n x +=1 \r\n indice3+=1 \r\n indice2+=1 \r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n elif chk1.get() == True and TailleModal == 0:\r\n indice = 0 # variable pour l'indexation selon la taille de la liste des catégories\r\n pageexport.write(0,y,\"Poids collecté (en kg)\")\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0\r\n while (indice2 != TailleCat):\r\n cur.execute(\"SELECT sum(Poids)*nombre FROM Produit, Catégorie, Arrivage, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_catégorie = Catégorie.Id_catégorie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s' AND Catégorie = '%s' GROUP BY Catégorie.Id_catégorie\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd),ListCategorie[indice2]))\r\n PoidsCollect = cur.fetchall()\r\n pageexport.write(x, 0, StructList[indice])\r\n pageexport.write(x, 1, ListCategorie[indice2])\r\n if len(PoidsCollect) == 0:\r\n pageexport.write(x, y, 'NR')\r\n x+=1\r\n else:\r\n for poids in PoidsCollect:\r\n pageexport.write(x, y, round(poids[0]))\r\n x +=1 \r\n indice2+=1 \r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n if chk2.get() == True:\r\n indice = 0 # variable pour l'indexation selon la taille de la liste des catégories\r\n pageexport.write(0,y,\"Poids vendu (en kg)\")\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0 # variable pour l'indexation selon la taille de la liste des structures\r\n while (indice2 != TailleCat):\r\n cur.execute(\"SELECT sum(Lignes_vente.Poids) FROM Lignes_vente, Vente, Catégorie, Organisation WHERE Recyclerie = '%s' AND Vente.Id_recyclerie=Organisation.Id_recyclerie AND Lignes_vente.Id_vente = Vente.Id_Vente AND Lignes_vente.Id_catégorie=Catégorie.Id_catégorie AND date > '%s' AND date < '%s' AND Catégorie = '%s' GROUP BY Catégorie.Id_catégorie\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd),ListCategorie[indice2]))\r\n PoidsVente = cur.fetchall()\r\n pageexport.write(x, 0, StructList[indice])\r\n pageexport.write(x, 1, ListCategorie[indice2])\r\n if len(PoidsVente) == 0:\r\n pageexport.write(x, y, 'NR')\r\n x+=1\r\n else:\r\n for poids in PoidsVente:\r\n pageexport.write(x, y, round(poids[0]))\r\n x +=1 \r\n indice2+=1 \r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n if chk3.get() == True:\r\n indice = 0 # variable pour l'indexation selon la taille de la liste des catégories\r\n pageexport.write(0,y,\"Chiffre d'affaire (en €)\")\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0 # variable pour l'indexation selon la taille de la liste des structures\r\n while (indice2 != TailleCat):\r\n cur.execute(\"SELECT sum(Montant), Catégorie FROM Vente, Catégorie, Lignes_vente, Organisation WHERE Recyclerie = '%s' AND Vente.Id_recyclerie=Organisation.Id_recyclerie AND Lignes_vente.Id_vente=Vente.Id_Vente AND Lignes_vente.Id_catégorie=Catégorie.Id_catégorie AND date > '%s' AND date < '%s' AND Catégorie = '%s' GROUP BY Catégorie.Id_catégorie\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd),ListCategorie[indice2]))\r\n Chiffre = cur.fetchall()\r\n pageexport.write(x, 0, StructList[indice])\r\n pageexport.write(x, 1, ListCategorie[indice2])\r\n if len(Chiffre) == 0:\r\n pageexport.write(x, y, 'NR')\r\n x+=1\r\n else:\r\n for c in Chiffre:\r\n pageexport.write(x, y, round(c[0]))\r\n x +=1 \r\n indice2+=1 \r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n if chk4.get() == True and TailleModal != 0:\r\n indice = 0 # variable pour l'indexation selon la taille de la liste des catégories\r\n if chk1.get() == False:\r\n pageexport.write(0,y,\"Modalité(s)\")\r\n pageexport.write(0,y+1,\"Nombre d'objet collecté\")\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0 # variable pour l'indexation selon la taille de la liste des structures\r\n while (indice2 != TailleCat):\r\n indice3 = 0 # variable pour l'indexation selon la taille de la liste des modalités\r\n while (indice3 != TailleModal):\r\n cur.execute(\"SELECT count(Id_Produit)*nombre FROM Produit, Arrivage, Catégorie, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_catégorie=Catégorie.Id_catégorie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s' AND Catégorie = '%s' AND origine = '%s' GROUP BY Catégorie.Id_catégorie\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd),ListCategorie[indice2], ListModal[indice3]))\r\n NbrCollect = cur.fetchall()\r\n pageexport.write(x, 0, StructList[indice])\r\n pageexport.write(x, 2, ListModal[indice3])\r\n pageexport.write(x, 1, ListCategorie[indice2])\r\n if len(NbrCollect) == 0:\r\n pageexport.write(x, y+1, 'NR')\r\n x+=1\r\n else:\r\n for nbr in NbrCollect:\r\n pageexport.write(x, y+1, round(nbr[0]))\r\n x +=1\r\n indice3+=1\r\n indice2+=1 \r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n elif chk4.get() == True and TailleModal == 0:\r\n indice = 0 # variable pour l'indexation selon la taille de la liste des catégories\r\n pageexport.write(0,y,\"Nombre d'objet collecté\")\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0 # variable pour l'indexation selon la taille de la liste des structures\r\n while (indice2 != TailleCat):\r\n cur.execute(\"SELECT count(Id_Produit)*nombre FROM Produit, Arrivage, Catégorie, Organisation WHERE Recyclerie = '%s' AND Produit.Id_recyclerie=Organisation.Id_recyclerie AND Produit.Id_catégorie=Catégorie.Id_catégorie AND Produit.Id_arrivage=Arrivage.Id_arrivage AND date > '%s' AND date < '%s' AND Catégorie = '%s' GROUP BY Catégorie.Id_catégorie\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd),ListCategorie[indice2]))\r\n NbrCollect = cur.fetchall()\r\n pageexport.write(x, 0, StructList[indice])\r\n pageexport.write(x, 1, ListCategorie[indice2])\r\n if len(NbrCollect) == 0:\r\n pageexport.write(x, y, 'NR')\r\n x+=1\r\n else:\r\n for nbr in NbrCollect:\r\n pageexport.write(x, y, round(nbr[0]))\r\n x +=1\r\n indice2+=1 \r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n if chk5.get() == True:\r\n indice = 0 # variable pour l'indexation selon la taille de la liste des catégories\r\n pageexport.write(0,y,\"Nombre d'objet vendu\")\r\n x = 1 # indice pour la ligne dans le fichier xls\r\n while (indice != TailleStruct):\r\n indice2 = 0 # variable pour l'indexation selon la taille de la liste des structures\r\n while (indice2 != TailleCat):\r\n cur.execute(\"SELECT count(Id_ligne_vente) FROM Lignes_vente, Vente, Catégorie, Organisation WHERE Recyclerie = '%s' AND Vente.Id_recyclerie=Organisation.Id_recyclerie AND Lignes_vente.Id_catégorie=Catégorie.Id_catégorie AND Vente.Id_Vente=Lignes_vente.Id_vente AND date > '%s' AND date < '%s' AND Catégorie = '%s' GROUP BY Catégorie.Id_catégorie\"%\\\r\n (StructList[indice], str(dateFirst),str(dateEnd),ListCategorie[indice2]))\r\n NbrVente = cur.fetchall()\r\n pageexport.write(x, 0, StructList[indice])\r\n pageexport.write(x, 1, ListCategorie[indice2])\r\n if len(NbrVente) == 0:\r\n pageexport.write(x, y, 'NR')\r\n x+=1\r\n else:\r\n for nbr in NbrVente:\r\n pageexport.write(x, y, round(nbr[0]))\r\n x +=1 \r\n indice2+=1 \r\n indice+=1\r\n y+=1 # on passe a la colonne suivante pour les données suivantes\r\n try: # sauvegarde le fichier et le convertit en csv \r\n classeurexport.save(filename)\r\n csv_file = os.path.splitext(filename)[0] + '.csv'\r\n read_file = pd.read_excel(filename)\r\n read_file.to_csv(csv_file, index = None, header=True)\r\n os.remove(filename)\r\n messagebox.showinfo('Exportation réussie','Votre fichier a bien été chargé')\r\n except IOError:\r\n messagebox.showerror('Exportation échouée','Votre fichier n\\'a pas pu se charger')\r\n print(IOError)\r\n SecondFen.destroy() # quitte l'application\r\n\r\nFirstFen = Tk() # initialisation de la première fenetre\r\nFirstFen.title(\"Structure\")\r\n\r\nLabelStruct = Label(FirstFen, text='Structure :', font = 60)\r\nLabelStruct.grid(row=0,column=0, ipady = 30, padx = 5)\r\n\r\nchkValueStruct = BooleanVar()\r\nchkValueStruct.set(False)\r\n\r\nCheckStruct = Checkbutton(FirstFen, var=chkValueStruct, command=lambda:Is_checkStruct()) \r\nCheckStruct.grid(row=1, column=1, padx=4)\r\n\r\nListRecyclerieBox = [\"tout\"] # initialise la liste de la combobox des structures \r\n\r\ncur.execute(\"SELECT Recyclerie FROM Organisation\") # récupère les structures insérées dans la base de données\r\nOrgaList=cur.fetchall()\r\n\r\nfor row in OrgaList: # insère les recycleries dans la liste\r\n ListRecyclerieBox.append(row[0])\r\n\r\nComboStruct = ttk.Combobox(FirstFen, values = ListRecyclerieBox, width = 28, state='disabled') # combobox qui récupère les recycleries comme valeur\r\nComboStruct.set(\"Choississez la/les structure(s)\") # valeur par défaut du combobox\r\nComboStruct.grid(row=1,column=2)\r\n\r\nBtnAdd = Button(FirstFen, text='Ajouter',command=lambda:addStruct(), state='disabled') # bouton qui ajoute la recylerie sélectionné du combobox dans la listbox\r\nBtnAdd.grid(row=1,column=3)\r\n\r\nBtnDel = Button(FirstFen, text='Supprimer',command=lambda:delStruct(), state='disabled') # bouton qui supprime la recylerie sélectionnée dans la listbox\r\nBtnDel.grid(row=1,column=4,padx=5)\r\n\r\nlistStruct = Listbox(FirstFen, width=50) # listbox qui va contenir les recyleries à étudier\r\nlistStruct.grid(row=1, column=5)\r\n\r\nLabelSecteur = Label(FirstFen, text='Secteur :', font = 60)\r\nLabelSecteur.grid(row=2,column=0, ipady = 70, padx = 5)\r\n\r\nLabelInsee = Label(FirstFen, text='Insee :')\r\nLabelInsee.grid(row=3,column=1)\r\n\r\nchkInseeOne = BooleanVar()\r\nchkInseeOne.set(False)\r\n\r\nchkInseeMany = BooleanVar()\r\nchkInseeMany.set(False)\r\n\r\nCheckOne = Checkbutton(FirstFen, text='1 code :', var=chkInseeOne, command=lambda:Is_check1()) # si on coche, active la ligne pour l'insertion d'1 code sinon reste désactivé \r\nCheckOne.grid(row=3, column=2, padx=4)\r\n\r\nEntryInsee = Entry(FirstFen, state='disabled') #désactivé tant que la checkbox n'est pas coché\r\nEntryInsee.grid(row=3,column=3, padx = 10)\r\n\r\nCheckMany = Checkbutton(FirstFen, text='plusieurs codes :', var=chkInseeMany, command=lambda:Is_check2()) # si on coche, active la ligne pour l'insertion de plusieurs codes sinon reste désactivé\r\nCheckMany.grid(row=4, column=2, padx=4)\r\n\r\nBtnInsee = Button(FirstFen, text='Importer votre fichier', command=lambda:FileInsee(), state='disabled') #désactivé tant que la checkbox n'est pas coché\r\nBtnInsee.grid(row=4,column=3)\r\n\r\nLabelFile = Label(FirstFen, text= '')\r\nLabelFile.grid(row=4, column=4)\r\n\r\nlistInsee = Listbox(FirstFen, height=10)\r\nlistInsee.grid(row=4, column=5, padx = 40)\r\n\r\nBtnNext = Button(FirstFen, text='Suivant', command=lambda:new_window()) # passe à la prochaine fenetre et prend en compte les données inscrites de la première fenetre \r\nBtnNext.grid(row=6,column=5, padx = 40, pady = 20)\r\n\r\nFirstFen.mainloop()","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":39673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"420547183","text":"#!/usr/bin/env python3\n\"\"\"\nDefines the class GRUCell that represents a gated recurrent unit\n\"\"\"\n\n\nimport numpy as np\n\n\nclass GRUCell:\n \"\"\"\n Represents a gated recurrent unit\n\n class constructor:\n def __init__(self, i, h, o)\n\n public instance attributes:\n Wz: update gate weights\n bz: update gate biases\n Wr: reset gate weights\n br: reset gate biases\n Wh: intermediate hidden state and input data weights\n bh: intermediate hidden state and input data biases\n Wy: output weights\n by: output biases\n\n public instance methods:\n def forward(self, h_prev, x_t):\n performs forward propagation for one time step\n \"\"\"\n def __init__(self, i, h, o):\n \"\"\"\n Class constructor\n\n parameters:\n i: dimensionality of the data\n h: dimensionality of the hidden state\n o: dimensionality of the outputs\n\n creates public instance attributes:\n Wz: update gate weights\n bz: update gate biases\n Wr: reset gate weights\n br: reset gate biases\n Wh: intermediate hidden state and input data weights\n bh: intermediate hidden state and input data biases\n Wy: output weights\n by: output biases\n\n weights should be initialized using random normal distribution\n weights will be used on the right side for matrix multiplication\n biases should be initiliazed as zeros\n \"\"\"\n self.bz = np.zeros((1, h))\n self.br = np.zeros((1, h))\n self.Wz = np.random.normal(size=(h + i, h))\n self.Wr = np.random.normal(size=(h + i, h))\n self.bh = np.zeros((1, h))\n self.by = np.zeros((1, o))\n self.Wh = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n\n def softmax(self, x):\n \"\"\"\n Performs the softmax function\n\n parameters:\n x: the value to perform softmax on to generate output of cell\n\n return:\n softmax of x\n \"\"\"\n e_x = np.exp(x - np.max(x, axis=1, keepdims=True))\n softmax = e_x / e_x.sum(axis=1, keepdims=True)\n return softmax\n\n def sigmoid(self, x):\n \"\"\"\n Performs the sigmoid function\n\n parameters:\n x: the value to perform sigmoid on\n\n return:\n sigmoid of x\n \"\"\"\n sigmoid = 1 / (1 + np.exp(-x))\n return sigmoid\n\n def forward(self, h_prev, x_t):\n \"\"\"\n Performs forward propagation for one time step\n\n parameters:\n h_prev [numpy.ndarray of shape (m, h)]:\n contains previous hidden state\n m: the batch size for the data\n h: dimensionality of hidden state\n x_t [numpy.ndarray of shape (m, i)]:\n contains data input for the cell\n m: the batch size for the data\n i: dimensionality of the data\n\n output of the cell should use softmax activation function\n\n returns:\n h_next, y:\n h_next: the next hidden state\n y: the output of the cell\n \"\"\"\n concatenation1 = np.concatenate((h_prev, x_t), axis=1)\n z_gate = self.sigmoid(np.matmul(concatenation1, self.Wz) + self.bz)\n r_gate = self.sigmoid(np.matmul(concatenation1, self.Wr) + self.br)\n\n concatenation2 = np.concatenate((r_gate * h_prev, x_t), axis=1)\n h_next = np.tanh(np.matmul(concatenation2, self.Wh) + self.bh)\n h_next *= z_gate\n h_next += (1 - z_gate) * h_prev\n\n y = self.softmax(np.matmul(h_next, self.Wy) + self.by)\n\n return h_next, y\n","sub_path":"supervised_learning/0x0D-RNNs/2-gru_cell.py","file_name":"2-gru_cell.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"631619103","text":"import scrambler\nimport kociemba\nimport scrambleGenerator\n\n\nmoves_text = open('moves.txt', 'a+')\n\nscramble = scrambleGenerator.gen_scramble()[:-4]\nsolve_string = scrambler.get_cube_string(scramble)\nsolve = kociemba.solve(solve_string)\ndegrees_of_motion = 360\nturns = degrees_of_motion / 360 * 4\n\n# Total turns\nface_turns = {'U': 2, 'R': 2, 'F': 2, 'D': 2, 'L': 2, 'B': 2}\n# Face position\nface_pos = {'U': 2, 'R': 2, 'F': 2, 'D': 2, 'L': 2, 'B': 2}\n\nfor s in solve.split():\n # Checks if R2 etc.. goes over 4 moves\n if s[-1] == '2':\n if face_pos[s[0]] + 2 <= 4:\n face_pos[s[0]] += 2\n else:\n face_pos[s[0]] -= 2\n # Adds 2 moves onto face turns\n face_turns[s[0]] += 2\n\n if s[-1] != \"'\":\n # Checks if the position of the face is 4(unable to rotate further) and does 3 reverse turns to compensate\n if face_pos[s[0]] == 4:\n face_pos[s[0]] -= 3\n face_turns[s[0]] += 3\n\n else:\n face_pos[s[0]] += 1\n face_turns[s[0]] += 1\n\n if s[-1] == \"'\":\n # Checks if the position of the face is 0(unable to rotate further) and does 3 reverse turns to compensate\n if face_pos[s[0]] == 0:\n face_pos[s[0]] += 3\n face_turns[s[0]] += 3\n\n else:\n face_pos[s[0]] -= 1\n face_turns[s[0]] += 1\n\n# Writes the total moves to a text document\nmoves_text.write(f\"{sum(face_turns.values())}, \")\nmoves_text.seek(0)\ntotal_moves = [int(x) for x in moves_text.readline().split(', ')[:-1]]\n# Calculates the average moves\nprint(sum(total_moves) / len(total_moves))\n\nprint(scramble, solve_string, solve, sep = '\\n')\nprint(\"U: {}\\nR: {}\\nF: {}\\nD: {}\\nL: {}\\nB: {}\".format(*face_turns.values()))\nprint(\"Total Moves:\", sum(face_turns.values()))\n# print(\"U: {}\\nR: {}\\nF: {}\\nD: {}\\nL: {}\\nB: {}\".format(*face_pos.values()))\n","sub_path":"PC(Python)/servo_sim.py","file_name":"servo_sim.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"541062133","text":"from sklearn import svm\nfrom sklearn.externals import joblib\nfrom class_feature.feature_extract_system import systemFeature\nfrom class_feature.feature_extract_image import imageFeature\n\nimport os\nimport numpy as np\n\nclass Train():\n def __init__(self, **kwargs):\n self.system_data = kwargs.pop('system_data', None)\n self.image_data = kwargs.pop('image_data', None)\n self.subject_score = kwargs.pop('subject_score', None)\n\n self.features = []\n if self.system_data is not None:\n class_system_feature = systemFeature(data=self.system_data)\n self.system_feature = np.array(class_system_feature.feature_extract())\n\n if self.image_data is not None:\n class_image_feature = imageFeature(data=self.image_data)\n self.image_feature = np.array(class_image_feature.feature_extract())\n self.features = np.concatenate((self.system_feature, self.image_feature), axis=1)\n\n self.pklPath = 'pkl'\n if os.path.isdir(self.pklPath):\n pass\n else:\n os.makedirs(self.pklPath)\n\n def train(self):\n # fit regression model\n svr_brf = svm.SVR(kernel='rbf', C=1e3, gamma=0.1)\n svr_model_rbf = svr_brf.fit(self.features, self.subject_score)\n # save svr model\n pklPath = os.path.join(self.pklPath, 'svr_model_rbf_params.pkl')\n joblib.dump(svr_model_rbf, pklPath)\n\n\n\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"142720655","text":"#!/usr/bin/python\n# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.\nimport time\n\nimport os.path \nfrom os import path\n\nimport Adafruit_CharLCD as LCD\n\nimport sys\n# Raspberry Pi pin configuration:\nlcd_rs = 25 # Note this might need to be changed to 21 for older revision Pi's.\nlcd_en = 24\nlcd_d4 = 23\nlcd_d5 = 17\nlcd_d6 = 18\nlcd_d7 = 22\nlcd_backlight = 4\n\n# Define LCD column and row size for 16x2 LCD.\nlcd_columns = 16\nlcd_rows = 2\n\n# Initialize the LCD using the pins above.\nlcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight)\n\nlcd.clear()\n \nlcd.show_cursor(False)\nlcd.blink(False)\n#word is input\nword = sys.stdin.readlines()\n\n#repeat causes loop, which really only works if input is rolodex.py\n\nfor x in word:\n word = x\n time.sleep(1)\n #just copy-pasting the whole linebreak thing. APPLY LINEBREAK ETC TO INPUT#\n if len(word) < 34:\n if len(word) > 16:\n midpoint = 8\n while len(word[midpoint:]) > 16:\n midpoint = midpoint+1\n space = word.find(' ', midpoint)\n last = word[space+1:]\n first = word[:space]\n message = first + \"\\n\" + last\n else:\n message = word\n #end of linebreak stuff\n#print the broken thing\n print(message)\n#display the broken thing\n lcd.message(message)\n#pause it\n time.sleep(1)\n#if it says to clear it, clear it\n if (sys.argv[1] == \"clear\"):\n lcd.clear()\n#if it says repeat....\n if (sys.argv[1] == \"repeat\"):\n #continue forever except at the date\n while \"rolodex\":\n while (path.exists(\"/tmp/lock\")):\n time.sleep(1)\n #wipe the lcd\n lcd.clear()\n #display the message(which is the broken thing)\n lcd.message(message)\n #pause\n time.sleep(1)\n #end of paste\n#i think this next chunk is redundant so im commenting it out\n#applies linebreak thing to word\n#if len(word) < 34:\n # if len(word) > 16:\n # midpoint = 8\n # while len(word[midpoint:]) > 16:\n # midpoint = midpoint+1\n # space = word.find(' ', midpoint)\n # last = word[space+1:]\n # first = word[:space]\n # message = first + \"\\n\" + last\n # else:\n # message = word\n # print(message)\n# lcd.message(message)\n # time.sleep(1)\n # if (sys.argv[1] == \"clear\"):\n # lcd.clear()\n #if (sys.argv[1] == \"repeat\"):\n #start of fresh copy'\n# while \"rolodex\":\n # while (path.exists(\"/tmp/lock\")):\n # time.sleep(1)\n # lcd.clear()\n # lcd.message(message.rstrip())\n # time.sleep(1)\n #end\n \n#else:\n #message = (len(word))\n # print(message)\n \n#between here and prev comment should set the correct message\n","sub_path":"inputlcd.py","file_name":"inputlcd.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"371513592","text":"import pygame as pg\nfrom .. import constants as c\n\nclass Portal(pg.sprite.Sprite):\n\tdef __init__(self, x, y, name):\n\t\tsuper(Portal, self).__init__()\n\t\tself.image = pg.Surface((c.tile_width, c.tile_height))\n\t\tself.image.fill(c.BLACK)\n\t\tself.rect = pg.Rect(x,y,c.tile_width,c.tile_height)\n\t\tself.name = name","sub_path":"data/components/portal.py","file_name":"portal.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"110431331","text":"counter_pos = 0\r\ncounter_neg = 0\r\ncounter = 0\r\nwhile True:\r\n number = int(input('Enter your number : '))\r\n if number == 0:\r\n break\r\n counter += 1\r\n if number > 0:\r\n counter_pos += 1\r\n else:\r\n counter_neg += 1\r\npers_pos = 100 * counter_pos / counter\r\npers_neg = 100 * counter_neg / counter\r\nprint('pers_pos = %.2f%%, pers_neg = %.2f%%' %(pers_pos,pers_neg))\r\n\r\n\r\n\r\n","sub_path":"homework5_6.py","file_name":"homework5_6.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"273846241","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"list/\", views.list, name=\"list\"),\n path(\"image/(?P[0-9]+)\", views.image, name=\"image\"),\n path(\"tag/(?P.+)\", views.tag, name=\"tag\"),\n path(\"upload/\", views.upload, name=\"upload\"),\n path(\"about/\", views.about, name=\"about\"),\n]\n","sub_path":"gallery_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"133867484","text":"message= \" hello there. my name is Fatih\"\r\n\r\n#message=message.upper() tüm strg i büyük harfe çevirir.\r\n#message=message.lower() tüm strg i küçük harfe çevirir.\r\n#message=message.title() tüm string de her kelimenin baş harfi büyük\r\n#message=message.capitalize() tüm string de sadece ilk kelimenin baş harfi büyük\r\n#message=message.strip() kullanıcının girdiği boşlıkları-space- siler\r\nmessage=message.split() #string i bölerek her bir karakter ayrı ayrı elaman olarak yazılır.\r\n#message=message.split(\".\") #string i noktadan böler\r\nmessage=\"--\".join(message) #ayırdığımız string i arasına -- koyarak tekrar birleştirdik.\r\nprint(message)\r\nprint(message[1])\r\n","sub_path":"string-methods.py","file_name":"string-methods.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"609352179","text":"# -*- coding: utf-8 -*-\n\nimport hmac\nimport hashlib\nimport base64\nimport urllib\nimport time\n\n__all__ =['Credentials', 'Auth']\n\nclass Credentials(object):\n def __init__(self, access_key_id, access_key_secret):\n self.access_key_id = access_key_id.strip()\n self.access_key_secret = access_key_secret.strip()\n\n def __str__(self):\n return \"access_key_id: [%s], access_key_secret: [%s]\" % (self.access_key_id, self.access_key_secret)\n\nclass Auth(object):\n def __init__(self, credentials):\n if not isinstance(credentials, Credentials):\n raise Exception(\"please provide corrent credentials\")\n\n self._credentials = credentials\n\n def gen_user(self):\n return self._credentials.access_key_id\n\n def gen_remote_credentials(self):\n m_dict = dict()\n m_dict[\"accesskey\"]=self._credentials.access_key_id\n m_dict[\"timestamp\"] = str(int(time.time()*1000))\n m_dict[\"signature_type\"] = \"HMAC-MD5\"\n m_dict[\"version\"] = \"1.0\"\n\n base_str=\"\"\n for k in sorted(m_dict.keys()):\n base_str += k\n base_str += \"=\"\n base_str += m_dict[k]\n base_str += \"&\"\n\n sign = self._gen_sign(m_dict)\n m_dict[\"signature\"] = sign\n\n template_str = \"?accessKeyId=%s&signatureMethod=HMAC-MD5&version=1.0×tamp=%s&signature=%s\"\n\n remote_credentials = template_str % (m_dict[\"accesskey\"], m_dict[\"timestamp\"], sign)\n\n return remote_credentials\n\n\n def _gen_sign(self, m_dict):\n base_str=\"\"\n access_key_secret = self._credentials.access_key_secret\n for k in sorted(m_dict.keys()):\n base_str += k\n base_str += \"=\"\n base_str += m_dict[k]\n base_str += \"&\"\n\n base_str = base_str[:-1]\n txt = hmac.new(access_key_secret, base_str, hashlib.md5)\n sign = base64.b64encode(txt.digest()).decode()\n return urllib.quote(sign)\n\n\n","sub_path":"graphcompute/utils/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"102515721","text":"from .base import FunctionalTest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass NewVisitorTest(FunctionalTest):\n\n def test_about_information(self):\n # So that I can learn more about this website\n # As a visitor\n # I want to read a page of explanatory information\n\n # When I visit the homepage I can see the page title\n # and header mention asking questions of candidates\n self.browser.get(self.live_server_url)\n self.assertIn('Ask your candidate', self.browser.title)\n self.check_for_strings_in_page_element('h1', 'Candidate Q&As')\n\n # I can see that the site is specific to the UK general election 2015\n # I can see that the site is a prototype\n self.check_for_strings_in_page_element('body', {\n 'UK general election 2015',\n 'prototype',\n })\n\n # I can see who made the site\n self.check_link_appears_on_page('http://89up.org/')\n self.check_link_appears_on_page('https://www.openrightsgroup.org/')\n self.check_link_appears_on_page('https://democracyclub.org.uk/')\n\n # I can find the source code for the site\n self.check_link_appears_on_page('https://github.com/DemocracyClub/candidate_questions/')\n\n # I can see an email address to write to with quesitons or comments\n self.check_link_appears_on_page('mailto:questions@campaignreply.org')\n\n def test_browse_candidate_answers(self):\n # So that I can see how popular the site is\n # As a visitor\n # I want to browse a list of all candidates\n\n # I visit the homepage and see a link to \"browse all candidate answers\"\n self.browser.get(self.live_server_url)\n link = self.browser.find_element_by_link_text('browse all candidate answers')\n\n # I follow the link and I'm taken to a new page.\n link.click()\n self.assertEqual(\n self.live_server_url + '/candidates/view_answers/',\n self.browser.current_url\n )\n\n # I can see a count of how many candidates have responded\n # I can see a list of candidates, their parties and constituencies\n # I click on a candidate and am taken to a new URL\n # I can navigate back to the candidates list from this new URL\n\n # Satisfied, I go back to sleep\n","sub_path":"functional_tests/test_visitor.py","file_name":"test_visitor.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"266880493","text":"SERVER = \"193.122.121.16\"\n\nKAKAO_APP_ID = \"58e2b8578c74a7039a08d2b7455012a1\"\nKAKAO_REDIRECT_URI = f\"http://{SERVER}/kakao_login\"\n# KAKAO_REDIRECT_URI = \"http://localhost:8000/kakao_login\"\n\nFACEBOOK_APP_ID = \"650104882182241\"\nFACEBOOK_SECRET = \"3a1806fcd6db5e023e0d64db3fd17585\"\nFACEBOOK_REDIRECT_URI = \"https://127.0.0.1:8000/facebook_login\"\nFACEBOOK_REST_API = 'http://localhost:8000/rest-auth/facebook/?method=oauth2'\n\nJWT_OPTAIN_URL = f'http://{SERVER}/api-jwt-auth/'\nJWT_VERIFY_URL = f'http://{SERVER}/api-jwt-auth/verify/'\nJWT_REFRESH_URL = f'http://{SERVER}/api-jwt-auth/refresh/'\n# JWT_OPTAIN_URL = 'http://localhost:8000/api-jwt-auth/'\n# JWT_VERIFY_URL = 'http://localhost:8000/api-jwt-auth/verify/'\n# JWT_REFRESH_URL = 'http://localhost:8000/api-jwt-auth/refresh/'\n\ncomments = {\n \"meal\": {\n \"overeating\": [\n \"지금처럼 많이 먹다가는 몸무게 앞자리가 달라지는 경험을 하실거에요\\n\",\n \"대부분은 살기 위해 먹는데, 당신은 먹기 위해 사는 것이 확실하네요\\n\",\n \"또..또..먹었어요,,? 그만…그만…그만…!!\\n\"\n ],\n \"compliment\": [\n \"이대로 라면 100살까지는 거뜬히 살 수 있을거에요\\n\",\n \"이대로 라면 건강 걱정은 없을겁니다!\\n\",\n \"끼니를 거르지 않는 당신은 이 시대의 건강왕!\\n\"\n ],\n \"starving\": [\n \"지금처럼 조금 먹다가는 살과 함께 건강도 같이 빠질거에요\\n\",\n \"끼니 좀 거르지마! 또 거르면 끼리니가 너를 걸러낼거야!\\n\",\n \"이번주도 제대로 못 챙겨 먹은 당신. 다음주에는 끼리끼니와 함께 더 열심히 먹어봅시다!\\n\"\n ],\n },\n \"drink\": {\n \"2\": [\n \"당신의 간이 지쳐가고 있어요\\n\",\n \"간이 욕한다 욕해,,\\n\",\n \"너는 간이 3개니?\\n\",\n \"이번주는 물보다 술을 많이 마셨네! 대단하다 친구야!\\n\",\n \"맨날 술이야~ 맨날 술이야~\\n\",\n \"매일 술 퍼마시는 너를 보면 끼리니는 술퍼져..\\n\"\n ],\n \"1\": [\n \"이번주 당신의 음주는 아주 바람직하네요!\\n\",\n \"그래 일주일에 한 번은 괜찮지!\\n\"\n ],\n \"0\": [\n \"이번주는 금주에 성공했어요! 짝짝짝!\\n\",\n \"이번주 금주 기념으로 끼리니가 술 한잔 살게~! 밥 한 술~!\\n\"\n ]\n },\n \"coffee\": {\n \"6\": [\n \"당신은 정말 물 마시듯 커피를 드시네요\\n\",\n \"뭐든지 적당히 좀!\\n\",\n \"정신을 깨기 위해서 커피를 마시는건데 이렇게 마시다 머리가 깨지겠어요\\n\",\n \"이번주는 물보다 커피를 더 많이 마셨겠네요\\n\",\n \"이번주에 마신 카페인은 당신을 폐인으로 만들 수도 있는 양이었어요…\\n\"\n ],\n \"5\": [\n \"이번주 당신의 카페인의 엑셀런트! 적당한 카페인은 몸에도 좋다네요!\\n\",\n \"이번주 마신 카페인 정도는 끼리니가 봐줄게!\\n\"\n ],\n \"0\": [\n \"이번주 당신은 금카(0카페인)에 성공했어요!\\n\",\n \"카페인이 필요 없는 당신, 건강은 너의 것!\\n\"\n ]\n },\n \"house\": [\n \"집밥을 많이 먹었다는 것은 좋은 징조에요! 집밥을 먹더라도 다양한 영양소를 먹어야 하는 것 알죠?\\n\",\n \"집밥을 애정하는 당신! 다음주에는 더 다양한 요리에 도전해보는 것은 어떨까요?\\n\"\n ],\n \"out\": [\n \"이번주는 외식이 많았어요! 맛은 있지만 건강에는 안좋을 수도 있어요..내 지갑 사정에도 안좋고..\\n\",\n \"외식을 하더라도 샐러드나 생선과 같이 건강한 외식에 도전해보는 것은 어떨까요?\\n\"\n ],\n \"delivery\": [\n \"배달의민족이 진짜 우리 민족은 아니에요. 다음주에는 건강한 집밥에 도전해보는 것은 어떨까요?\\n\",\n \"자극적인 배달 음식을 많이 먹었다면 이제 건강식에 도전해보는 것은 어떨까요?\\n\",\n \"치킨, 피자, 족발, 보쌈 중 2개 이상을 먹었다면 배민과 잠시 이별해야 할 시간이에요..\\n\"\n ],\n \"simple\": [\n \"간편함이 건강을 보장하지는 않는답니다!\\n\",\n \"편하게 먹으려는 자는 편치 못한 건강을 얻을 것이다\\n\",\n \"빠르게 먹는 것을 좋아하다가 빠르게 갈 수도 있어요!\\n\"\n ]\n}\n","sub_path":"KiriKini/server/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":4618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"540201696","text":"from Tkinter import *\nimport Tkinter\nimport tkMessageBox\nimport MySQLdb\n\n\n#fix new issue\n\n#Open Tkinter windowb\nmaster = Tk()\nstatus = StringVar()\nonOrOff = Tkinter.IntVar()\n#connect to DB\n\ndb = MySQLdb.connect(host=\"192.168.1.26\", \n user=\"mac\", \n passwd=\"swimming\", \n db=\"widgets\")\n\n\n#delete from the list and from the database. The only way the element will be deleted off the list is if \n#the database delete is successfull\ndef delete_widget():\n name = entries[0].get()\n try:\n cur = db.cursor()\n cur.execute(\"\"\"DELETE FROM widget WHERE name=(%s)\"\"\", name)\n db.commit()\n cur.close()\n status.set(\"Deleted\")\n entries[0].configure(state='normal')\n clear_entries()\n update_listbox()\n entries[0].configure(state='disabled')\n entry_disabled()\n except:\n status.set(\"Delete failed\")\n \n#update listbox with the newest entries\ndef update_listbox():\n name = entries[0].get()\n #deletes all elements\n listbox.delete(0, END)\n\n #grab widget names from database and display them\n display_widgets(get_widgets(), listbox)\n\n\n#add widget to the listbox\ndef add_widget():\n if(listbox.get(END) == \"New\"):\n status.set(\"Can only have 1 new entry\")\n else:\n listbox.insert(END, \"New\")\n entries[0].configure(state='normal')\n clear_entries()\n response_text.config(state=NORMAL)\n response_text.delete(0.0, END)\n response_text.config(state=DISABLED)\n size = listbox.size()-1\n listbox.selection_clear(0, \"end\")\n listbox.selection_anchor(\"end\")\n listbox.selection_set(\"end\")\n entry_normal()\n\n\ndef fill_entries(row):\n entries[0].insert(INSERT, row[0])\n entries[1].insert(INSERT, row[2])\n entries[2].insert(INSERT, row[3])\n entries[3].insert(INSERT, row[5])\n entries[4].insert(INSERT, row[1])\n\n #print row[6]\n\n response_text.config(state=NORMAL)\n response_text.delete(0.0, END)\n if(row[6] != None):\n response_text.insert(INSERT, row[6])\n response_text.config(state=DISABLED)\n\n format_text.insert(INSERT, row[4])\n\n if(row[7]):\n checkbutton.select()\n else:\n checkbutton.deselect()\n\ndef selected_element(e):\n status.set(\"\")\n if(listbox.get(listbox.curselection()) != \"New\"):\n clear_entries()\n entry_normal()\n fetch_and_fill_entries()\n entries[0].configure(state='disabled')\n\n\n\n#this will fill the entries with the item thats selected in the listbox\ndef fetch_and_fill_entries():\n try:\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT name, parameters, api_number, url_parameter, format, description, response, onOrOff\n FROM widget where name=(%s)\"\"\", (listbox.get(listbox.curselection())[0]))\n row = cursor.fetchall()[0]\n db.commit()\n cursor.close()\n\n clear_entries()\n fill_entries(row)\n except:\n status.set(\"Select failed\")\n return 1\n\n#when the save button is hit, it saves to the database\ndef save_to_database():\n\n #get responses from entries\n name = entries[0].get()\n parameters = entries[4].get()\n api_num = entries[1].get()\n url_parameter = entries[2].get()\n description = entries[3].get()\n format = format_text.get(0.0, END)\n response = response_text.get(0.0, END)\n\n\n #only insert into table if we have a widget name and api number\n if (name and api_num):\n\n #make selection to see if its in the table\n cur = db.cursor()\n cur.execute(\"\"\"SELECT name from widget where name=(%s)\"\"\", (name))\n db.commit()\n fetched = cur.fetchall()\n cur.close()\n\n #Update instead of insert\n if (len(fetched)):\n\n #checks to make sure that a new widget won't add two of the same name\n if(listbox.get(END) == \"New\" and entries[0].get() == fetched[0][0]):\n status.set(\"Name already exists\")\n return\n\n try:\n #execute update and let us know we updated the table\n cur = db.cursor()\n cur.execute(\"\"\"UPDATE widget \n SET parameters=(%s), api_number=(%s), url_parameter=(%s), format=(%s), \n description=(%s), response=(%s), onOrOff=(%s) where name=(%s)\"\"\",\n (parameters, api_num, url_parameter, format, \n description, response, onOrOff.get(), name))\n db.commit()\n cur.close()\n status.set(\"Updated table\\nTakes a few seconds to get response\")\n update_listbox()\n except:\n status.set(\"Update Failed\")\n\n else: \n #insert into table and let us know we inserted it\n try:\n cur = db.cursor()\n cur.execute(\"\"\"INSERT INTO widget (name,parameters,api_number, url_parameter, format, description, onOrOff) \n VALUES (%s, %s, %s, %s, %s, %s, %s)\"\"\",\n (name, parameters, api_num, url_parameter, format, description, onOrOff.get()))\n db.commit()\n cur.close()\n status.set(\"Inserted into table\\nTakes a few seconds to get response\")\n update_listbox()\n except:\n status.set(\"Insert Failed\") \n entries[0].configure(state='disabled')\n else:\n status.set(\"Fill out necessary fields\")\n\n\n#add an entry field with description of text and it will display it to the screen\ndef add_entry(master, text):\n\n frame = Frame(master)\n\n label = Label(frame, text=text)\n label.pack(side=TOP)\n\n entry = Entry(frame, state='disabled')\n entry.pack(side=TOP)\n\n frame.pack(fill=X, padx=100)\n return entry\n\n#initializes the entry list and returns that list\ndef entry_list():\n entries = []\n Button(master, text='Update DB', command=save_to_database).pack(side=TOP)\n checkbutton = Checkbutton(master, text=\"On\", var=onOrOff)\n checkbutton.pack()\n entries.append(add_entry(master, \"Widget Name*\")) \n entries.append(add_entry(master, \"API Number*\")) \n entries.append(add_entry(master, \"URL Parameters\"))\n entries.append(add_entry(master, \"Description\"))\n entries.append(add_entry(master, \"Parameters\")) \n return entries, checkbutton\n\n#makes the listbox and the add/delete buttons and status message\ndef make_listbox():\n frame = Frame(master)\n #make listbox\n listbox = Listbox(frame, activestyle='none')\n listbox.pack(side=TOP)\n\n #bind a click on the listbox to the function selected_element\n listbox.bind('<>', selected_element)\n\n #display the widgets\n display_widgets(get_widgets(), listbox)\n\n #add and delete buttons and status message\n Button(frame, text='+', command=add_widget).pack(side=LEFT)\n Button(frame, text=\"-\", command=delete_widget).pack(side=LEFT)\n Message(frame, text=\"Status\", textvariable=status).pack(fill=X)\n frame.pack(side=TOP)\n return listbox\n\n#displays the widgets in the listbox\ndef display_widgets(widgets, listbox):\n for widget in widgets:\n listbox.insert(END, widget)\n\n#draws the textboxes for the format and response\ndef make_textboxes():\n frame = Frame(master)\n\n rlabel = Label(frame, text=\"Response\")\n rlabel.pack(side=LEFT)\n\n flabel = Label(frame, text=\"Format\")\n flabel.pack(side=RIGHT)\n\n response_text = Text(frame, width=30, height=20, highlightbackground='black', state='disabled')\n response_text.pack(side=LEFT)\n\n format_text = Text(frame, width=30, height=20, highlightbackground='black', state='disabled')\n format_text.pack(side=RIGHT)\n\n frame.pack(side=TOP)\n\n return response_text, format_text\n\n#Performs a select on the DB to get the names of all the widgets\ndef get_widgets():\n widgets = []\n try:\n cur = db.cursor()\n cur.execute(\"SELECT name from widget where name NOT LIKE 'NULL'\")\n for row in cur.fetchall():\n widgets.append(row)\n db.commit()\n cur.close()\n except:\n status.set(\"Failed to update widgets\")\n return widgets\n\n#clear entires \ndef clear_entries():\n for entry in entries:\n entry.delete(0, END)\n format_text.delete(0.0, END)\n response_text.delete(0.0, END)\n\n#put the entry into a normal state\ndef entry_normal():\n for entry in entries:\n entry.configure(state='normal')\n format_text.config(state=NORMAL)\n\n#disable entries\ndef entry_disabled():\n for entry in entries:\n entry.configure(state='disabled')\n\nlistbox = make_listbox()\nentries, checkbutton = entry_list()\nresponse_text, format_text = make_textboxes()\n\nmainloop( )\n","sub_path":"Cube_Manager.py","file_name":"Cube_Manager.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"462890532","text":"import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\n#ensure pillow is installed as a python package\ndef show_tiff(filename):\n img=mpimg.imread(filename)\n imgplot=plt.imshow(img)\n plt.show()\n\nshow_tiff(r'C:\\Users\\HP\\Desktop\\viz\\tiff1.tiff')\n\n","sub_path":"viz/displaytifffunction.py","file_name":"displaytifffunction.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"72893091","text":"from django.http import Http404\n#当前登录者是否为管理员\ndef superuser_required(func):\n def warper(self,*args,**kwargs):\n judge=False\n try:\n judge = self.request.user.is_superuser\n except : \n judge = self.user.is_superuser\n finally:\n if judge == True:\n return func(self,*args,**kwargs)\n else:\n raise Http404(\"无超级用户���限\")\n return warper\n\n#当前是否登录\ndef login_required(func):\n def warper(self,*args,**kwargs):\n judge=False\n try:\n self.judge = self.request.user.is_authenticated()\n except :\n self.judge = self.user.is_authenticated()\n finally:\n if self.judge:\n return func(self,*args,**kwargs)\n else:\n raise Http404(\"无用户权限\")\n return warper\n","sub_path":"account/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"229224510","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport argparse\r\nimport os, sys\r\nimport glob\r\nimport json, pickle\r\nfrom PIL import Image\r\nfrom collections import OrderedDict\r\n\r\n\r\ndef options():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--path2dataset', default='./datasets/bdd100k/train_img')\r\n parser.add_argument('--path2save', default='./datasets/bdd100k/train_attribute')\r\n\r\n return parser.parse_args()\r\n\r\n\r\ndef set_attribute(opt):\r\n path2images = sorted(glob.glob(os.path.join(opt.path2dataset, '*.jpg')))\r\n path2json = sorted(glob.glob(os.path.join(opt.path2save, '*.json')))\r\n finname_list = []\r\n for json_data in path2json:\r\n finname, ext = os.path.splitext(json_data)\r\n finname_list.append(finname)\r\n\r\n for finname in finname_list:\r\n finname = finname.replace('\\\\', '/').split('/')[-1]\r\n rmname = os.path.join(opt.path2dataset, finname+'.jpg')\r\n path2images.remove(rmname)\r\n numof_data = len(path2images)\r\n\r\n print('---------- Dataset Imformation ----------')\r\n print('numof_data: {}'.format(numof_data))\r\n print('------------------ End ------------------\\n')\r\n\r\n weather_list = ['clear', 'partly cloudy', 'overcast', 'rainy', 'snowy', 'foggy']\r\n timeofday_list = ['daytime', 'dawn/dusk', 'night']\r\n labels = []\r\n for iter, path2image in enumerate(path2images):\r\n img = Image.open(path2image)\r\n plt.imshow(img)\r\n plt.pause(1.0)\r\n\r\n path2image = path2image.replace('\\\\', '/').split('/')[-1]\r\n\r\n print('iter: [{:04d}/{:04d}], path: {}'.format(iter, numof_data, path2image))\r\n weather = input('[clear:0, partly cloudy:1, overcast:2, rainy:3, snowy:4, foggy:5]: ')\r\n timeofday = input('[daytime:0, dawn/dusk:1, night:2]: ')\r\n print()\r\n\r\n ''' when ther is no input '''\r\n if not weather:\r\n weather = input('[clear:0, partly cloudy:1, overcast:2, rainy:3, snowy:4, foggy:5]: ')\r\n if not timeofday:\r\n timeofday = input('[daytime:0, dawn/dusk:1, night:2]: ')\r\n\r\n weather = weather_list[int(weather)]\r\n timeofday = timeofday_list[int(timeofday)]\r\n\r\n dict = {}\r\n dict['name'] = path2image\r\n dict['attributes'] = {}\r\n dict['attributes']['weather'] = weather\r\n dict['attributes']['timeofday'] = timeofday\r\n\r\n redo = input('redo?: ')\r\n if redo == '1':\r\n print('iter: {:04d}, path: {}'.format(iter, path2image))\r\n weather = input('[clear:0, partly cloudy:1, overcast:2, rainy:3, snowy:4, foggy:5]: ')\r\n timeofday = input('[daytime:0, dawn/dusk:1, night:2]: ')\r\n print()\r\n\r\n weather = weather_list[int(weather)]\r\n timeofday = timeofday_list[int(timeofday)]\r\n\r\n dict['attributes']['weather'] = weather\r\n dict['attributes']['timeofday'] = timeofday\r\n\r\n path2image, ext = os.path.splitext(path2image)\r\n path2save = os.path.join(opt.path2save, path2image + '.json')\r\n with open(path2save, 'w') as fw:\r\n json.dump(dict, fw, indent=4)\r\n\r\n\r\n plt.clf()\r\n\r\ndef check_attributes(opt):\r\n image_paths = sorted(glob.glob(os.path.join(opt.path2dataset, '*.jpg')))\r\n json_paths = sorted(glob.glob(os.path.join(opt.path2save, '*.json')))\r\n\r\n numof_data = len(image_paths)\r\n print('---------- Dataset Imformation ----------')\r\n print('numof_data: {}'.format(numof_data))\r\n print('------------------ End ------------------\\n')\r\n\r\n weather_list = ['clear', 'partly cloudy', 'overcast', 'rainy', 'snowy', 'foggy']\r\n timeofday_list = ['daytime', 'dawn/dusk', 'night']\r\n\r\n try:\r\n dir = os.path.join(opt.path2save, 'number_of_checked_files.txt')\r\n g = open(dir, 'r')\r\n start_iter = int(g.read())\r\n g.close()\r\n except:\r\n start_iter = 0\r\n\r\n worry_paths = []\r\n for iter in range(start_iter, numof_data):\r\n image = Image.open(image_paths[iter])\r\n\r\n f = open(json_paths[iter], 'r')\r\n attribute = json.load(f, object_pairs_hook=OrderedDict)\r\n f.close()\r\n\r\n plt.imshow(image)\r\n plt.title('{}, {}'.format(attribute['attributes']['weather'], attribute['attributes']['timeofday']))\r\n plt.pause(1.0)\r\n\r\n print('{:04d}/{:4d} '.format(iter, numof_data), end='')\r\n operate = input('[append worry list:0, change attribute:1, quit:9]: ')\r\n if operate == '0':\r\n worry_paths.append(image_paths[iter].replace('\\\\', '/').split('/')[-1])\r\n operate = input('[change attribute:1, quit:9]: ')\r\n if operate == '1':\r\n weather = input('[clear:0, partly cloudy:1, overcast:2, rainy:3, snowy:4, foggy:5]: ')\r\n timeofday = input('[daytime:0, dawn/dusk:1, night:2]: ')\r\n\r\n weather = weather_list[int(weather)]\r\n timeofday = timeofday_list[int(timeofday)]\r\n\r\n dict = {}\r\n image_name = image_paths[iter].replace('\\\\', '/').split('/')[-1]\r\n dict['name'] = image_name\r\n dict['attributes'] = {}\r\n dict['attributes']['weather'] = weather\r\n dict['attributes']['timeofday'] = timeofday\r\n\r\n image_name, ext = os.path.splitext(image_name)\r\n path2save = os.path.join(opt.path2save, image_name + '.json')\r\n with open(path2save, 'w') as fw:\r\n json.dump(dict, fw, indent=4)\r\n elif operate == '9':\r\n ckpt_dir = os.path.join(opt.path2save, 'number_of_checked_files.txt')\r\n f = open(ckpt_dir, 'w')\r\n f.write(str(iter))\r\n f.close()\r\n\r\n ckpt_dir = os.path.join(opt.path2save, 'worry_paths.txt')\r\n f = open(ckpt_dir, 'a')\r\n for x in worry_paths:\r\n f.write(str(x) + \"\\n\")\r\n f.close()\r\n\r\n exit()\r\n\r\n plt.close()\r\n\r\n\r\ndef main():\r\n opt = options()\r\n\r\n check_attributes(opt)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"utils/attributes_labels.py","file_name":"attributes_labels.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"137563666","text":"# coding:utf-8\n\nimport numpy\nimport random\nfrom re import findall\n\nclass Point(object):\n \"\"\"Class creates Point\"\"\"\n def __init__(self, coord):\n self.x = int(findall('(\\d+)', coord)[0])\n self.y = int(findall('(\\d+)', coord)[1])\n self.radius = 0\n self.neighbors = 0\n def __repr__(self):\n string = 'The point with coordinates ({x}, {y}) has a radius of {r:.3f} and the number of neighbors {n}'\n return string.format(x=self.x, y=self.y, r=self.radius, n=self.neighbors)\n\n\n def calculation_radius(self, tree):\n \"\"\" Calculation of the radius \"\"\"\n cursor = [tree]\n inf = numpy.inf\n\n while len(cursor) > 0:\n node = cursor.pop(0)\n if node is None: continue\n\n distance = find_dist(self, node.ref_point)\n if distance < inf and distance != 0:\n if self.radius > distance or self.radius == 0:\n self.radius = distance\n\n if node.is_none(): continue\n\n if distance < node.radius:\n if distance < node.radius + inf:\n cursor.append(node.inside)\n if distance >= node.radius - inf:\n cursor.append(node.outside)\n else:\n if distance >= node.radius - inf:\n cursor.append(node.outside)\n if distance < node.radius + inf:\n cursor.append(node.inside)\n\n\n def counting_neighbors(self, tree):\n \"\"\" Counting the number of neighbors \"\"\"\n doble_radius = self.radius * 2\n count_neighbors = -1\n cursor = [tree]\n\n while len(cursor) > 0:\n node = cursor.pop(0)\n if node is None: continue\n\n distance = find_dist(self, node.ref_point)\n if distance < doble_radius:\n count_neighbors += 1\n\n if node.is_none(): continue\n\n if distance < node.radius:\n if distance < node.radius + doble_radius:\n cursor.append(node.inside)\n if distance >= node.radius - doble_radius:\n cursor.append(node.outside)\n else:\n if distance >= node.radius - doble_radius:\n cursor.append(node.outside)\n if distance < node.radius + doble_radius:\n cursor.append(node.inside)\n self.neighbors = count_neighbors\n\n\nclass VPTree(object):\n \"\"\"Class creates VP-tree structure\"\"\"\n def __init__(self, points):\n self.inside = None\n self.outside = None\n self.radius = None\n self.ref_point = points.pop(random.randrange(len(points)))\n\n if len(points) < 1: return\n\n distances = [find_dist(self.ref_point, p) for p in points]\n self.radius = numpy.median(distances)\n\n inside_points, outside_points = [], []\n for i, p in enumerate(points):\n if distances[i] >= self.radius:\n outside_points.append(p)\n else:\n inside_points.append(p)\n\n if len(inside_points) > 0:\n self.inside = VPTree(points=inside_points)\n if len(outside_points) > 0:\n self.outside = VPTree(points=outside_points)\n\n def is_none(self):\n return (self.inside is None) and (self.outside is None)\n\n\nfind_dist = lambda p1, p2: numpy.sqrt((p1.x - p2.x)**2 + (p1.y - p2.y)**2)\n","sub_path":"vptree.py","file_name":"vptree.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"626392728","text":"\"\"\"procedural\n\nRevision ID: bfc4d50d84b7\nRevises: 4f68b685e401\nCreate Date: 2019-12-11 15:21:24.551570\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'bfc4d50d84b7'\ndown_revision = '4f68b685e401'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('prescription', sa.Column('frequency', sa.String(length=32), nullable=False))\n op.add_column('prescription', sa.Column('last_notified', sa.DateTime(), nullable=True))\n op.add_column('prescription', sa.Column('time', sa.Integer(), nullable=False))\n op.alter_column('prescription', 'active',\n existing_type=mysql.TINYINT(display_width=1),\n nullable=True,\n existing_server_default=sa.text(\"'1'\"))\n op.alter_column('prescription', 'notify',\n existing_type=mysql.TINYINT(display_width=1),\n nullable=True,\n existing_server_default=sa.text(\"'1'\"))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('prescription', 'notify',\n existing_type=mysql.TINYINT(display_width=1),\n nullable=False,\n existing_server_default=sa.text(\"'1'\"))\n op.alter_column('prescription', 'active',\n existing_type=mysql.TINYINT(display_width=1),\n nullable=False,\n existing_server_default=sa.text(\"'1'\"))\n op.drop_column('prescription', 'time')\n op.drop_column('prescription', 'last_notified')\n op.drop_column('prescription', 'frequency')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/bfc4d50d84b7_procedural.py","file_name":"bfc4d50d84b7_procedural.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"85884884","text":"import os\nimport numpy as np\nfrom keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint\nfrom keras.layers import Input, GlobalAveragePooling2D, Dense\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.models import Model\nfrom keras.utils import to_categorical\nfrom sklearn.metrics import roc_auc_score, accuracy_score, cohen_kappa_score\n\nclass ClassModel:\n \"\"\"The instance of classification model\"\"\"\n\n def __init__(self, batch_size, model_dir=None):\n self.model_dir = model_dir\n self.model = self.__class__.get_model()\n\n @staticmethod\n def get_model():\n \"\"\"Loads the model architecture\"\"\"\n input_tensor = Input(shape=(224, 224, 3))\n base_model = ResNet50(weights='imagenet', include_top=False)\n for layer in base_model.layers:\n layer.trainable=True\n x = base_model.output\n x = GlobalAveragePooling2D(data_format='channels_last')(x)\n x = Dense(9, activation='softmax')(x)\n updatedModel = Model(base_model.input, x)\n updatedModel.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['acc'])\n\n return updatedModel\n\n def train(self, train_generator, valid_generator, batch_size, num_epochs):\n \"\"\"Trains classification model\"\"\"\n callbacks = [ReduceLROnPlateau(min_lr = 0.00001)]\n if self.model_dir:\n callbacks.append(TensorBoard(log_dir = self.model_dir, batch_size = batch_size, write_graph = False))\n callbacks.append(ModelCheckpoint(os.path.join(self.model_dir, 'weights.h'), save_best_only = True))\n\n self.model.fit_generator(generator = train_generator,\n validation_data = valid_generator,\n epochs = num_epochs,\n callbacks = callbacks)\n\n def load_best_checkpoint(self):\n \"\"\"Loads the best checkpoint from training\"\"\"\n model = self.__class__.get_model()\n model.load_weights(os.path.join(self.model_dir, 'weights.h'))\n self.model = model\n\n def predict(self, generator):\n \"\"\"Predicts the class scores based on data from generator\"\"\"\n scores = self.model.predict_generator(generator = generator)\n return scores\n\n def evaluate(self, generator):\n \"\"\"Evaluates the model\"\"\"\n metrics = {}\n scores = self.predict(generator)\n true_labels = generator.labels[:len(scores)]\n true_bin_labels = to_categorical(true_labels, num_classes=9)\n\n pred_bin = np.zeros((len(scores), 9))\n for idx, obs in enumerate(scores):\n pred_bin[idx][np.argmax(obs)] = 1\n\n metrics['accuracy'] = accuracy_score(true_bin_labels, pred_bin)\n metrics['auc'] = roc_auc_score(true_bin_labels, pred_bin)\n\n pred_label = np.zeros((len(scores)))\n for idx, obs in enumerate(scores):\n pred_label[idx] = np.argmax(obs)\n\n metrics['kappa'] = cohen_kappa_score(true_labels, pred_label)\n\n return metrics\n","sub_path":"master_thesis/experiments/class_model.py","file_name":"class_model.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"540508296","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 16 14:39:22 2016\n\n@author: GlennMurphy\n\"\"\"\n\nimport matplotlib.pyplot as plt\nx = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 4, 4, 5, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 9, 9]\nplt.boxplot(x)\nplt.show()\nplt.savefig(\"BoxPlot.png\")\nplt.hist(x,histtype='bar')\nplt.show()","sub_path":"Stat_Analysis/Plotting/BoxPlot.py","file_name":"BoxPlot.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"204507425","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport acabsl\nimport time\nt = .3\n\nacabsl.update()\n\ndef set_col(col,r,g,b,t):\n for w in range(acabsl.NOOFWALLS):\n for y in range(acabsl.WALLSIZEY):\n acabsl.send(col,y,r,g,b,t,w)\n\ndef set_all(r,g,b,t):\n for w in range(acabsl.NOOFWALLS):\n for x in range(acabsl.WALLSIZEX):\n for y in range(acabsl.WALLSIZEY):\n acabsl.send(x,y,r,g,b,t,w)\n\nmidcol = 7\ndcol = 0\n\n#set_all(0,0,0,0.5)\n#acabsl.update()\n#time.sleep(0.5)\n\n#set_col(midcol, 255, 0, 0, t)\n#acabsl.update()\n#time.sleep(t)\n\nwhile True:\n dcol = 0\n while midcol+dcol < acabsl.WALLSIZEX or midcol-dcol >= 0:\n set_col(midcol+dcol, 255, 0, 0, t*3)\n set_col(midcol-dcol, 255, 0, 0, t*3)\n acabsl.update()\n time.sleep(t)\n dcol+=1\n \n dcol = 0\n while midcol+dcol < acabsl.WALLSIZEX or midcol-dcol >= 0:\n set_col(midcol+dcol, 0, 0, 0, t*3)\n set_col(midcol-dcol, 0, 0, 0, t*3)\n acabsl.update()\n time.sleep(t)\n dcol+=1\n \n\n","sub_path":"animations/red-sym-scroll.py","file_name":"red-sym-scroll.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"622629501","text":"from django.utils.translation import ugettext_lazy as _\nfrom django.db.models import Q\nfrom ..models import RequirementsQuestionAnswer\nfrom commons.exceptions import ObjectDoesNotExist, ValidationError\nfrom requirements.models import RequirementHeader, RequirementLine\n\n\nclass RequirementQuestionsAnswersService(object):\n \"\"\"\n This class define all kinds of requirement question & answer related services\n \"\"\"\n\n def __init__(self, user, organization):\n \"\"\"\n Constructor of RequirementQuestionAnswerService class\n :param user: use who wants to create instance\n :param organization: organization from which this user belongs\n \"\"\"\n super(RequirementQuestionsAnswersService, self).__init__()\n self.user = user\n self.organization = organization\n\n def create_requirement_question_answer(self, requirement_id, question_answer, requirement_line_id=None,\n parent_id=None):\n \"\"\"\n It will create requirement question and answer\n :param requirement: Requirement header for that this question thread created\n :param question_answer: text of question and answer\n :param user: use who created this question and answer\n :param organization: organization from which this user belongs\n :param requirement_line: Optional parameter. if question and answer created for a requirement line\n :return RequirementsQuestionAnswer: Model object\n \"\"\"\n if self.organization.organization_type == 0 and parent_id is None:\n raise ValidationError(_(\n \"Client can not open a new thread for requirement question answer for requirement id: {0}, user: {1}, organization: {2}\".format(\n requirement_id, self.user.email, self.organization.name)))\n\n try:\n requirement = RequirementHeader.objects.get(pk=requirement_id)\n except RequirementHeader.DoesNotExist:\n raise ObjectDoesNotExist(\n _(\"Requirement not found for id {0}\".format(requirement_id)))\n\n requirement_line = None\n if requirement_line_id:\n try:\n requirement_line = RequirementLine.objects.get(\n pk=requirement_line_id)\n except RequirementLine.DoesNotExist:\n raise ObjectDoesNotExist(\n _(\"Requirement line not found for id {0}\".format(requirement_line_id)))\n\n parent = None\n if parent_id:\n try:\n parent = RequirementsQuestionAnswer.objects.get(pk=parent_id)\n except RequirementsQuestionAnswer.DoesNotExist:\n raise ObjectDoesNotExist(\n _(\"Requirement Question Answer Parent not found. id {0}\".format(parent_id)))\n\n return RequirementsQuestionAnswer.objects.create(requirement_header=requirement,\n question_answer=question_answer,\n parent=parent,\n requirement_line=requirement_line,\n user=self.user,\n organization=self.organization,\n is_reviewed=False)\n\n def update_requirement_question_answer(self, id, question_answer):\n \"\"\"\n It will update requirement question and answer text\n :param id: Primary key of the requirement question and answer\n :param question_answer: text of question and answer that need to update\n :return RequirementsQuestionAnswer: Model object\n \"\"\"\n try:\n requirement_question_answer = RequirementsQuestionAnswer.objects.get(\n pk=id, user=self.user, organization=self.organization)\n except RequirementsQuestionAnswer.DoesNotExist:\n raise ObjectDoesNotExist(\n _(\"Requirement Question Answer not found for id {0}\".format(id)))\n\n requirement_question_answer.question_answer = question_answer\n requirement_question_answer.save()\n\n return requirement_question_answer\n\n def get_requirement_question_answer_by_id(self, id):\n \"\"\"\n It will return requirement question and answer by primary key\n :param id: Primary key of the requirement question and answer\n :return RequirementsQuestionAnswer: Model object\n \"\"\"\n requirement_question_answer = None\n try:\n requirement_question_answer = RequirementsQuestionAnswer.objects.get(\n pk=id, user=self.user, organization=self.organization)\n except RequirementsQuestionAnswer.DoesNotExist:\n raise ObjectDoesNotExist(\n _(\"Requirement Question Answer not found for id {0}\".format(id)))\n return requirement_question_answer\n\n def get_requirement_question_answer_by_requirement(self, requirement_id):\n \"\"\"\n It will return requirement question and answer by requirement\n :param requirement: Requirement header that question answer we need return\n :return RequirementsQuestionAnswer: list object\n \"\"\"\n try:\n requirement = RequirementHeader.objects.get(pk=requirement_id)\n except RequirementHeader.DoesNotExist:\n raise ObjectDoesNotExist(\n _(\"Requirement not found for id {0}\".format(requirement_id)))\n\n if requirement.organization != self.organization:\n raise ValidationError(\n _(\"Other company users are not allowed to fetch all question answer for requirement id: {}\".format(\n requirement_id)))\n\n return RequirementsQuestionAnswer.objects.filter(requirement_header__id=requirement_id, parent=None).filter(\n Q(organization=self.organization) |\n Q(is_reviewed=True)\n )\n\n def get_requirement_question_answer_by_requirement_organization(self, requirement_id):\n \"\"\"\n It will return requirement question and answer by requirement and organization =True\n :param requirement: Requirement header that question answer we need return\n :return RequirementsQuestionAnswer: list object\n \"\"\"\n return RequirementsQuestionAnswer.objects.filter(requirement_header__id=requirement_id,\n organization=self.organization,\n parent=None)\n\n def get_requirement_question_answer_children(self, parent):\n \"\"\"\n It will return requirement question and answer's children\n :param parent: self reference\n :return RequirementsQuestionAnswer: list object\n \"\"\"\n if self.organization and self.organization.organization_type == 1:\n return self._get_requirement_question_answer_for_partner(parent)\n\n elif self.organization and self.organization.organization_type == 0:\n return self._get_requirement_question_answer_for_client(parent)\n\n elif not self.organization and self.user.is_superuser:\n return self._get_requirement_question_answer_for_admin(parent)\n\n def _get_requirement_question_answer_for_partner(self, parent):\n return RequirementsQuestionAnswer.objects.filter(parent=parent).filter(\n Q(is_reviewed=True) | Q(organization=self.organization) | Q(user__is_superuser=True)).order_by(\n \"date_created\")\n\n def _get_requirement_question_answer_for_client(self, parent):\n return RequirementsQuestionAnswer.objects.filter(parent=parent).filter(\n Q(is_reviewed=True, user__is_superuser=False) |\n Q(organization=self.organization) |\n Q(user__is_superuser=True, is_released=True)).order_by(\n \"date_created\")\n\n def _get_requirement_question_answer_for_admin(self, parent):\n return RequirementsQuestionAnswer.objects.filter(parent=parent).order_by(\n \"date_created\")\n","sub_path":"epad/questions_answers/services/requirements_questions_answer.py","file_name":"requirements_questions_answer.py","file_ext":"py","file_size_in_byte":8080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"644557050","text":"def merge(aList, bList):\n res = []\n i, j = 0, 0\n a, b = len(aList), len(bList)\n while i < a and j < b:\n if aList[i] < bList[j]:\n res.append(aList[i])\n i += 1\n else:\n res.append(bList[j])\n j += 1\n while i < a:\n res.append(aList[i])\n i += 1\n while j < b:\n res.append(bList[j])\n j += 1\n return res\n\n\nlistList = input().strip('[|]').split('],[')\nlistList = [[int(i) for i in j.split(',')] for j in listList] # 视为双端队列\nwhile len(listList) != 1:\n listList.append(merge(listList[0], listList[1]))\n listList.pop(0)\n listList.pop(0)\nprint(listList[0], end='\\n')\n","sub_path":"Code/CodeRecords/2534/60799/242276.py","file_name":"242276.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"449188586","text":"#encoding: utf-8\nimport csv\nimport pandas\nimport re\nimport os, sys\n\n#Valores constantes para acceder en order a los datos del .csv desde 2013 hasta 2016\n\nCOMUNIDAD = 0\nANIO = 1\nTRIMESTRE = 2\nTIPO_DE_DELITO = 3\nDENUNCIAS = 4\nTRIMESTRE_ACTUAL = 1\n\n#Diccionario para traducir los nombres de las comunidades\n\ndictComunidades = {\n \"Andalucía\" : {\"ANDALUCÍA\", \"Andalucia\", \"ANDALUCÖA\"},\n \"Aragón\" : {\"ARAGÓN\", \"ARAGàN\", \"Aragon\"},\n \"Principado de Asturias\" : {\"ASTURIAS (PRINCIPADO DE)\", \"Asturias\"},\n \"Islas Baleares\" : {\"BALEARS (ILLES)\", \"Baleares\"},\n \"Canarias\" : {\"CANARIAS\", \"Canarias\"},\n \"Cantabria\" : {\"CANTABRIA\", \"Cantabria\"},\n \"Castilla y León\" : {\"CASTILLA Y LEàN\", \"CASTILLA Y LEON\", \"CastillaYLeon\", \"CASTILLA Y LEÓN\"},\n \"Castilla-La Mancha\" : {\"CASTILLA - LA MANCHA\", \"CastillaLaMancha\"},\n \"Cataluña\" : {\"CATALU¥A\", \"CATALUÑA\", \"Cataluña\"},\n \"Comunidad Valenciana\" : {\"COMUNITAT VALENCIANA\", \"Valencia\"},\n \"Extremadura\" : {\"EXTREMADURA\", \"Extremadura\"},\n \"Galicia\" : {\"GALICIA\", \"Galicia\"},\n \"Comunidad de Madrid\" : {\"MADRID (COMUNIDAD DE)\", \"Madrid\"},\n \"Región de Murcia\" : {\"MURCIA (REGIàN DE)\", \"MURCIA (REGION DE)\", \"Murcia\", \"MURCIA (REGIÓN DE)\"},\n \"Comunidad Foral de Navarra\" : {\"NAVARRA (COMUNIDAD FORAL DE)\", \"Navarra\"},\n \"País Vasco\" : {\"PAÖS VASCO\", \"PAÍS VASCO\", \"PaisVasco\"},\n \"La Rioja\" : {\"RIOJA (LA)\", \"LaRioja\"},\n \"Ceuta\" : {\"CIUDAD AUTàNOMA DE CEUTA\", \"CIUDAD AUTÓNOMA DE CEUTA\", \"Ceuta\"},\n \"Melilla\" : {\"CIUDAD AUTàNOMA DE MELILLA\", \"CIUDAD AUTÓNOMA DE MELILLA\", \"Melilla\"}\n}\n\n#Diccionario con los tipos de crimenes clasificados\n\ndictDelitos = {\n \"Homicidios dolosos y asesinatos consumados\" : {\"2.-HOMICIDIOS DOLOSOS Y ASESINATOS CONSUMADOS (EU)\", \"Homicidios dolosos y asesinatos consumados\"},\n \"Hurtos\" : {\"8.-HURTOS\", \"Hurtos\"},\n \"Robos con fuerza, violencia o intimidación\" : {\"3.1.-ROBO CON VIOLENCIA E INTIMIDACIàN (EU)\", \"3.-DELINCUENCIA VIOLENTA (EU)\", \"4.-ROBOS CON FUERZA\", \"Robos con fuerza en domicilios, establecimientos y otras instalaciones\", \"Robos con violencia e intimidación\", \"Robos con fuerza, violencia o intimidación\"},\n \"Sustracciones de vehículos\" : {\"5.-SUSTRACCIàN VEHÖCULOS A MOTOR (EU)\", \"5.-SUSTRACCIÓN VEHÍCULOS A MOTOR (EU)\", \"Sustracciones de vehículos\"},\n \"Tráfico de drogas\" : {\"6.-TRµFICO DE DROGAS (EU)\", \"6.-TRÁFICO DE DROGAS (EU)\", \"Tráfico de drogas\"},\n \"Otros\" : {\"7.-DA¥OS\", \"1.-DELITOS Y FALTAS (EU)\", \"Agresión sexual con penetración\"}\n}\n\ndiccSegundoFormato = {\n \"2017\" : {\"Trimestre 3\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0},\n \"Trimestre 4\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0}},\n \"2018\" : { \"Trimestre 1\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0},\n \"Trimestre 2\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0},\n \"Trimestre 3\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0},\n \"Trimestre 4\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0}\n },\n \"2019\" : { \"Trimestre 1\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0},\n \"Trimestre 2\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0},\n \"Trimestre 3\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0},\n \"Trimestre 4\" : {\"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0}\n }\n}\n\ndef inicializarDicDelCom():\n diccContadorComunidades = {\n \"Andalucía\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Aragón\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Principado de Asturias\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Islas Baleares\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Canarias\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Cantabria\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Castilla y León\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Castilla-La Mancha\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Cataluña\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Comunidad Valenciana\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Extremadura\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Galicia\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Comunidad de Madrid\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Región de Murcia\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Comunidad Foral de Navarra\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"País Vasco\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"La Rioja\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Ceuta\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n },\n \"Melilla\" : {\n \"Homicidios dolosos y asesinatos consumados\" : 0,\n \"Hurtos\" : 0,\n \"Robos con fuerza, violencia o intimidación\" : 0,\n \"Sustracciones de vehículos\" : 0,\n \"Tráfico de drogas\" : 0,\n \"Otros\" : 0\n }\n }\n return diccContadorComunidades\n\ndef encontrarComunidad(comunidad_mal):\n global dictComunidades, COMUNIDAD ,ANIO , TIPO_DE_DELITO, DENUNCIAS, TRIMESTRE_ACTUAL, diccSegundoFormato\n rst = \"Comunidad no encontrada\"\n for comunidad, comunidade in dictComunidades.items(): \n if(comunidad_mal in dictComunidades[comunidad]):\n rst = comunidad\n break\n if(rst == \"\"):\n print(\"No se ha encontrado la comunidad: \" + comunidad_mal + \" en el conjunto de datos\")\n return rst\n\ndef encontrarDelito(delito_mal):\n rst = \"Delito no encontrado\"\n for delito, delite in dictDelitos.items(): \n #print(comunidad, \":\", provincia)\n if(delito_mal in dictDelitos[delito]):\n rst = delito\n #print(\"La provincia \" + province + \" esta en la comunidad \" + comunidad + \"\\n\")\n break\n if(rst == \"\"):\n print(\"No se ha encontrado el delito: \" + delito_mal + \" en el conjunto de datos\")\n return rst\n\ndef cambiaTrimestre(trimestre):\n return trimestre != TRIMESTRE_ACTUAL\n\ndef separarDatos(d):\n datoComillas = re.split(r';', d)\n if len(datoComillas) == 4:\n dat1 = datoComillas[0]\n dat2 = re.split(r'\"',datoComillas[1])\n dat3 = re.split(r'\"',datoComillas[2])\n dat4 = re.split(r'\"',datoComillas[3])\n return [dat1, dat2[1], dat3[1], dat4[1]]\n else:\n return []\n\ndef procesarArchivo():\n result = pandas.DataFrame(columns=['Comunidad','Año','Trimestre','Tipo de delito','Denuncias'])\n files = os.listdir('C:/Users/David/Desktop/Mineria Dataset Criminalidad/Fuente Dataset')\n fileNames = []\n fileNames2 = []\n for file in files:\n if (file.endswith('trimestre.csv')):\n fileNames.append('C:/Users/David/Desktop/Mineria Dataset Criminalidad/Fuente Dataset/' + file)\n elif (file.endswith('.csv')):\n fileNames2.append('C:/Users/David/Desktop/Mineria Dataset Criminalidad/Fuente Dataset/' + file)\n for fileAct in fileNames:\n nombreArchivo = re.split(r'/', str(fileAct))[6]\n anio = re.split(r'_', str(nombreArchivo))[1]\n trimestre = 'Trimestre ' + re.split(r'_', str(nombreArchivo))[2]\n diccContadorComunidades = inicializarDicDelCom()\n data = pandas.read_csv(fileAct, encoding='latin-1')\n for i in range(0, len(data)):\n d = data.iloc[i].name\n datos_separados = re.split(r';', d)\n comunidad = encontrarComunidad(datos_separados[0])\n tipo = encontrarDelito(datos_separados[1])\n if (comunidad != \"Comunidad no encontrada\" and tipo != \"Delito no encontrado\"):\n resultado=re.sub('[\\.-]','', datos_separados[3])\n diccContadorComunidades[comunidad][tipo] += int(resultado)\n for comunidad, tipo_dicc in diccContadorComunidades.items():\n for tipo_delito, cantidad in tipo_dicc.items():\n if (comunidad != \"Comunidad no encontrada\" and tipo != \"Delito no encontrado\"):\n trimestre_limpio = re.split(r' ', str(trimestre))[1]\n result = result.append({'Comunidad' : comunidad, 'Año' : anio, 'Trimestre' : trimestre_limpio, 'Tipo de delito' : tipo_delito, 'Denuncias' : cantidad},ignore_index=True)\n for fileAct in fileNames2:\n a = re.split(r'/', str(fileAct))[6]\n comunidad = re.sub(r'.csv', '', a)\n comunidad = re.split(r'_', str(comunidad))[1]\n comunidad = encontrarComunidad(comunidad)\n data = pandas.read_csv(fileAct, error_bad_lines=False)\n diccContadorComunidades = inicializarDicDelCom()\n for i in range(0, len(data)):\n d = data.iloc[i, 0]\n res = separarDatos(d)\n if res != []:\n tipo = encontrarDelito(res[2])\n if (comunidad != \"Comunidad no encontrada\" and tipo != \"Delito no encontrado\"):\n resultado=re.sub('[\\.-]','', res[3])\n diccSegundoFormato[res[0]][res[1]][tipo] += int(resultado)\n if (comunidad != \"Comunidad no encontrada\" and tipo != \"Delito no encontrado\"):\n for anio, trimestre in diccSegundoFormato.items():\n for trimestre_actual, delito in trimestre.items():\n for tipo_delito, cantidad in delito.items():\n if (comunidad != \"Comunidad no encontrada\" and tipo != \"Delito no encontrado\"):\n trimestre_limpio = re.split(r' ', str(trimestre_actual))[1]\n result = result.append({'Comunidad' : comunidad, 'Año' : anio, 'Trimestre' : trimestre_actual, 'Tipo de delito' : tipo_delito, 'Denuncias' : cantidad},ignore_index=True)\n data.drop(data.columns, axis=1)\n result.sort_values(by=['Año', 'Trimestre'], inplace=True)\n result.to_csv('Criminalidad.csv', index=False) \n\n\n\nprocesarArchivo()","sub_path":"Proyecto Grupo 7/Scripts Pyhton de Limpieza/scriptCriminalidad.py","file_name":"scriptCriminalidad.py","file_ext":"py","file_size_in_byte":16571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"118717780","text":"import random\ndef main():\n # Code here\n print(\"Hello Player!\")\n nameString = input(\"What is your name? \")\n answ=input(\"What operation do you want to use? \")\n num1=input(\"Enter a number \")\n num2=input(\"Enter a second number \")\n if answ == \"Addition\":\n print(str(int(num1)+int(num2)))\n elif answ == \"Subtraction\":\n print(str(int(num1)-(num2)))\n elif answ == \"Multiplication\":\n print(str(int(num1)*int(num2)))\n elif answ == \"Division\":\n print(str(int(num1)/int(num2)))\n else:\n print(\"That is not an operation!\")\n number1 = input(\"Enter a number \")\n sol=int(number1)*2\n print(\"The number you entered when multiplied by 2 is now \"+ str(sol))\n number2 = input(\"Now enter an even number \")\n sol2=int(number2)/2\n print(\"Your number divided by 2 is now \"+ str(sol2))\n print(\"Bye!!!\")\n return 0\n \n \nif __name__ == \"__main__\":\n main()","sub_path":"Operations.py","file_name":"Operations.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"322869975","text":"from django.http import Http404\nfrom views import BaseView\nfrom custom.menu import MenuItem as Item, Menu\nfrom django.utils.translation import ugettext as _\nfrom db.files.models import UploadFile\nfrom django.db.models import Count\nfrom .models import BookScrapy\nfrom db.images.models import Image\n\n\nclass DBView(BaseView):\n template_name = \"db/index.html\"\n\n def get_context_data(self, **kwargs):\n ctx = super(DBView, self).get_context_data(**kwargs)\n ctx[\"query\"] = self.request.GET.get('q', '').strip()\n ctx['menu'].select(\"db\")\n ctx['leftMenu'] = Menu(\n Item(_(\"Films\"), 'db:films:main', \"films\"),\n Item(_(\"Books\"), 'db:books', \"books\"),\n Item(_(\"Music\"), 'db:music:main', \"music\"),\n Item(_(\"Images\"), 'db:images:main', \"images\"),\n Item(_(\"Torrents\"), 'db:torrents:index', \"torrents\"),\n Item(_(\"Files\"), 'db:files', \"files\"),\n ).select(\"recent\")\n return ctx\n\n def get(self, request, **kwargs):\n ctx = self.get_context_data(**kwargs)\n user = ctx['user']\n if not user.has_perm('db.view'):\n raise Http404\n return self.render_to_response(ctx)\n\n\nclass Books(DBView):\n template_name = \"db/books.html\"\n\n def get_context_data(self, **kwargs):\n ctx = super(Books, self).get_context_data(**kwargs)\n ctx[\"leftMenu\"].select(\"books\")\n ctx['search_help_text'] = _('Search: book / author')\n ctx[\"books\"] = BookScrapy.objects.exclude(title__isnull=True) \\\n .exclude(logo_url__isnull=True) \\\n .exclude(freebookspot_id__isnull=True) \\\n .annotate(null_date=Count('date_added')) \\\n .order_by('-null_date', '-date_added')[:10]\n return ctx\n\n def get(self, request, **kwargs):\n ctx = self.get_context_data(**kwargs)\n user = ctx['user']\n if not user.has_perm('db.view'):\n raise Http404\n return self.render_to_response(ctx)\n\n\nclass Files(DBView):\n template_name = \"db/files.html\"\n\n def get_context_data(self, **kwargs):\n ctx = super(Files, self).get_context_data(**kwargs)\n ctx[\"leftMenu\"].select(\"files\")\n return ctx\n\n def get(self, request, **kwargs):\n ctx = self.get_context_data(**kwargs)\n ctx['uploads'] = UploadFile.objects.all().annotate(\n null_position=Count('date_uploaded')).order_by('-null_position', '-date_uploaded')\n user = ctx['user']\n if not user.has_perm('db.view'):\n raise Http404\n return self.render_to_response(ctx)\n","sub_path":"src/db/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"465052832","text":"import openpyxl\r\nimport mysql.connector\r\nimport sys\r\n\r\n\r\n#dat = sys.argv[1]\r\ndef file_written(query,templatepath,sheetname):\r\n try:\r\n template = templatepath\r\n workbk = openpyxl.load_workbook(template)\r\n try:\r\n cnx = mysql.connector.connect(host='localhost', user='rjiladmin', password='TestServer@123', port='3307', database='epc')\r\n# print('connected to localhost')\r\n cursor = cnx.cursor()\r\n query = query\r\n cursor.execute(query)\r\n worksht = workbk[sheetname]\r\n rowv=0\r\n #for row in worksht.iter_rows(min_row=1, max_col=3, max_row=2):\r\n list_s = list(cursor)\r\n for s in list_s:\r\n columnv = 0\r\n rowv=rowv+1\r\n print(len(s))\r\n for i in range(0,len(s)):\r\n columnv = columnv + 1\r\n print(rowv + 1)\r\n print(columnv)\r\n print(s[i])\r\n worksht.cell(row=rowv + 1, column=columnv).value = s[i]\r\n #time.sleep(2)\r\n except mysql.connector.Error as ex:\r\n print(ex)\r\n finally:\r\n cnx.close()\r\n #sys.exit(0)\r\n workbk.save(templatepath)\r\n workbk.close()\r\n except NameError as ex:\r\n print(ex)\r\n\r\nquery_free_m=\"\"\"select name,total, free, 100*(cast(Free as signed)/cast(Total as signed)) as freeRatio from epc.lcs_free_m \"\"\"\r\nfile_written(query_free_m,'C:\\\\mylog\\\\lcs_hc\\\\LCS_HC.xlsx',\"Free_Memory\")\r\n\r\n\r\nquery_free_m=\"\"\"select Name, Filesystem, Mounted_on,Used_Per from epc.lcs_diskspace ;\"\"\"\r\nfile_written(query_free_m,'C:\\\\mylog\\\\lcs_hc\\\\LCS_HC.xlsx',\"DiskSpace\")\r\n\r\nquery_free_m=\"\"\"select Name, replace(replace(Replace(status1,'Status : [1;31m',''),'(local)',''),'status : [1;31m','') Stat from epc.lcs_systemcheck \"\"\"\r\nfile_written(query_free_m,'C:\\\\mylog\\\\lcs_hc\\\\LCS_HC.xlsx',\"SystemStatus\")","sub_path":"firstone/lcs_file_written.py","file_name":"lcs_file_written.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"319976553","text":"from typing import List\n\n\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n # binary search for row and column\n if not matrix or not matrix[0]:\n return False\n rows, columns = len(matrix), len(matrix[0])\n left, right = 0, rows - 1\n while left <= right:\n mid = (left + right) // 2\n if matrix[mid][0] < target:\n left = mid + 1\n elif matrix[mid][0] == target:\n return True\n else:\n right = mid - 1\n row = right\n if row < 0 or row >= rows:\n return False\n left, right = 0, columns - 1\n while left <= right:\n mid = (left + right) // 2\n if matrix[row][mid] < target:\n left = mid + 1\n elif matrix[row][mid] == target:\n return True\n else:\n right = mid - 1\n return False\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.searchMatrix(matrix=[[1, 3, 5, 7], [10, 11, 16, 20], [23, 30, 34, 50]], target=13))\n","sub_path":"LeetCode31DaysChallenge-202010/Search a 2D Matrix.py","file_name":"Search a 2D Matrix.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"567044807","text":"#\n# Copyright (c) 2008-2015 Thierry Florac \n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n\n__docformat__ = 'restructuredtext'\n\n\n# import standard library\nimport logging\nlogger = logging.getLogger('PyAMS (utils)')\n\nimport venusian\n\n# import interfaces\n\n# import packages\nfrom zope.interface import implementedBy\n\n\nclass ContextAdapter(object):\n \"\"\"Context adapter\"\"\"\n\n def __init__(self, context):\n self.context = context\n\n\nclass ContextRequestAdapter(object):\n \"\"\"Context + request adapter\"\"\"\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n\nclass ContextRequestViewAdapter(object):\n \"\"\"Context + request + view adapter\"\"\"\n\n def __init__(self, context, request, view):\n self.context = context\n self.request = request\n self.view = view\n\n\nclass adapter_config(object):\n \"\"\"Function or class decorator to declare an adapter\"\"\"\n\n venusian = venusian\n\n def __init__(self, **settings):\n if 'for_' in settings:\n if settings.get('context') is None:\n settings['context'] = settings.pop('for_')\n self.__dict__.update(settings)\n\n def __call__(self, wrapped):\n settings = self.__dict__.copy()\n depth = settings.pop('_depth', 0)\n\n def callback(context, name, ob):\n adapts = settings.get('context')\n if adapts is None:\n adapts = getattr(ob, '__component_adapts__', None)\n if adapts is None:\n raise TypeError(\"No for argument was provided for %r and \"\n \"can't determine what the factory adapts.\" % ob)\n if not isinstance(adapts, tuple):\n adapts = (adapts,)\n\n provides = settings.get('provides')\n if provides is None:\n intfs = list(implementedBy(ob))\n if len(intfs) == 1:\n provides = intfs[0]\n if provides is None:\n raise TypeError(\"Missing 'provided' argument\")\n\n config = context.config.with_package(info.module)\n logger.debug(\"Registering adapter {0} for {1} providing {2}\".format(str(ob),\n str(adapts),\n str(provides)))\n config.registry.registerAdapter(ob, adapts, provides, settings.get('name', ''))\n\n info = self.venusian.attach(wrapped, callback, category='pyams_adapter',\n depth=depth + 1)\n\n if info.scope == 'class':\n # if the decorator was attached to a method in a class, or\n # otherwise executed at class scope, we need to set an\n # 'attr' into the settings if one isn't already in there\n if settings.get('attr') is None:\n settings['attr'] = wrapped.__name__\n\n settings['_info'] = info.codeinfo # fbo \"action_method\"\n return wrapped\n","sub_path":"src/pyams_utils/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"368920344","text":"import socket\nimport sys\nimport pickle\nimport select\nfrom time import sleep\n\nclass Network():\n def __init__(self, controller, password):\n self.__controller = controller\n self.__password = password\n self.__server = False\n self.__connected = False\n try:\n self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error as err: \n print(\"socket creation failed with error %s\" %(err))\n sys.exit()\n self.__recv_buf = bytes()\n self.get_local_ip_addr()\n\n\n def server(self, port):\n self.__server = True\n while True:\n try:\n self.__sock.bind(('', port))\n break\n except OSError as err:\n print(err)\n print(\"waiting, will retry in 10 seconds\")\n sleep(10)\n \n # put the socket into listening mode \n self.__sock.listen(5)\n print(\"listening for incoming connection...\")\n\n while True: \n # Establish connection with client. \n c_sock, addr = self.__sock.accept()\n #print('Got connection from', addr)\n msg = c_sock.recv(1024)\n txt = msg.decode()\n if txt == self.__password:\n c_sock.send(\"OK\\n\".encode())\n break\n else:\n c_sock.close()\n # swap the socket names so send/recv functions don't care if we're client or server\n self.__listen_sock = self.__sock\n self.__sock = c_sock\n self.__connected = True\n \n\n def client(self, ip, port):\n self.__sock.connect((ip, port))\n self.__sock.send(self.__password.encode())\n msg = self.__sock.recv(3)\n txt = msg.decode()\n if txt == \"OK\\n\":\n self.__connected = True\n else:\n print(\"handshake failed\\n\")\n\n def get_local_ip_addr(self):\n # ugly hacky way to find our IP address\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # connect to nrg.cs.uc.ac.uk\n s.connect((\"128.16.66.166\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip\n\n @property\n def connected(self):\n return self.__connected\n\n def send(self, msg):\n send_bytes = pickle.dumps(msg)\n lenbytes = len(send_bytes).to_bytes(2, byteorder='big')\n self.__sock.send(lenbytes + send_bytes)\n\n def send_maze(self, maze):\n msg = [\"maze\", maze]\n self.send(msg)\n\n def check_for_messages(self, now):\n rd, wd, ed = select.select([self.__sock],[],[],0)\n if not rd:\n pass\n else:\n try:\n recv_bytes = self.__sock.recv(10000)\n except ConnectionResetError as e:\n print(\"Remote game has quit: \", e)\n sys.exit()\n self.__recv_buf += recv_bytes # concat onto whatever is left from prev receive\n recv_len = int.from_bytes(self.__recv_buf[0:2], byteorder='big')\n while (len(self.__recv_buf) - 2 >= recv_len):\n self.parse_msg(self.__recv_buf[2:recv_len+2])\n self.__recv_buf = self.__recv_buf[recv_len+2:]\n if len(self.__recv_buf) > 2:\n recv_len = int.from_bytes(self.__recv_buf[0:2], byteorder='big')\n \n \n def parse_msg(self, buf):\n msg = pickle.loads(buf)\n if msg[0] == \"maze\":\n maze = msg[1]\n self.__controller.received_maze(maze)\n elif msg[0] == \"newpacman\":\n #A pacman has arrived message\n self.foreign_pacman_arrived(msg[1])\n elif msg[0] == \"pacmanleft\":\n #A pacman has left message\n self.foreign_pacman_left(msg[1])\n elif msg[0] == \"pacmandied\":\n #A pacman has left message\n self.foreign_pacman_died(msg[1])\n elif msg[0] == \"pacmanhome\":\n #Pacman go home!\n self.pacman_go_home(msg[1])\n elif msg[0] == \"pacman\":\n #A pacman update message\n self.pacman_update(msg[1])\n elif msg[0] == \"ghost\":\n #A ghost update message\n self.ghost_update(msg[1])\n elif msg[0] == \"ghosteaten\":\n #The foreign pacman ate our ghost!\n self.foreign_pacman_ate_ghost(msg[1])\n elif msg[0] == \"eat\":\n #A food update message\n self.eat(msg[1])\n elif msg[0] == \"score\":\n #A score update message\n self.score_update(msg[1])\n elif msg[0] == \"status\":\n #A status update message\n self.status_update(msg[1])\n else:\n print(\"Unknown message type: \", msg[0])\n \n\n def foreign_pacman_arrived(self, msg):\n #print(\"received pacman_arrived\")\n self.__controller.foreign_pacman_arrived()\n\n def send_foreign_pacman_arrived(self):\n #print(\"send pacman_arrived\")\n payload = []\n msg = [\"newpacman\", payload]\n self.send(msg)\n\n def foreign_pacman_left(self, msg):\n #print(\"received pacman_left\")\n self.__controller.foreign_pacman_left()\n\n def send_foreign_pacman_left(self):\n #print(\"send pacman_left\")\n payload = []\n msg = [\"pacmanleft\", payload]\n self.send(msg)\n\n def foreign_pacman_died(self, msg):\n #print(\"received pacman_died\")\n self.__controller.foreign_pacman_died()\n\n def send_foreign_pacman_died(self):\n #print(\"send pacman_died\")\n payload = []\n msg = [\"pacmandied\", payload]\n self.send(msg)\n\n def pacman_go_home(self, msg):\n self.__controller.pacman_go_home()\n\n def send_pacman_go_home(self):\n #print(\"send pacman_go_home\")\n payload = []\n msg = [\"pacmanhome\", payload]\n self.send(msg)\n\n def pacman_update(self, msg):\n #print(\"received pacman_update\")\n pos = msg[0] #position in pixels\n dir = msg[1] #direction enum\n speed = msg[2]\n self.__controller.foreign_pacman_update(pos, dir, speed)\n\n def send_pacman_update(self, pos, dir, speed):\n #print(\"send pacman_update\")\n payload = [pos, dir, speed]\n msg = [\"pacman\", payload]\n self.send(msg)\n \n def ghost_update(self, msg):\n #print(\"received ghost_update\")\n ghostnum = msg[0]\n pos = msg[1] #position in pixels\n dirn = msg[2] #direction enum\n speed = msg[3] \n mode = msg[4] \n self.__controller.remote_ghost_update(ghostnum, pos, dirn, speed, mode)\n\n def send_ghost_update(self, ghostnum, pos, dirn, speed, mode):\n #print(\"send ghost_update\")\n payload = [ghostnum, pos, dirn, speed, mode]\n msg = [\"ghost\", payload]\n self.send(msg)\n\n def send_foreign_pacman_ate_ghost(self, ghostnum):\n payload = [ghostnum] # probably shouldn't be a list - inefficient\n msg = [\"ghosteaten\", payload]\n self.send(msg)\n\n def foreign_pacman_ate_ghost(self, msg):\n ghostnum = msg[0]\n self.__controller.foreign_pacman_ate_ghost(ghostnum)\n \n def eat(self, msg):\n pos = msg[0]\n is_foreign = msg[1]\n is_powerpill = msg[2]\n if is_foreign:\n # A foreign pacman ate food on our screen\n self.__controller.foreign_eat(pos, is_powerpill)\n else:\n # Food was eaten on the remote screen\n self.__controller.remote_eat(pos, is_powerpill)\n\n def send_eat(self, pos, is_foreign, is_powerpill):\n payload = [pos, is_foreign, is_powerpill]\n msg = [\"eat\", payload]\n self.send(msg)\n\n def score_update(self, msg):\n score = msg[0]\n self.__controller.update_remote_score(score)\n\n def send_score_update(self, score):\n payload = [score] # probably shouldn't be a list\n msg = [\"score\", payload]\n self.send(msg)\n \n def status_update(self, msg):\n status = msg[0]\n self.__controller.remote_status_update(status)\n\n def send_status_update(self, status):\n payload = [status] # probably shouldn't be a list\n msg = [\"status\", payload]\n self.send(msg)\n \n","sub_path":"Assignments/assignment5/multi_player/src/pa_network.py","file_name":"pa_network.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"319183266","text":"import pyLDAvis.gensim\r\nfrom nltk.corpus import stopwords\r\nfrom time import sleep\r\nimport time\r\nimport warnings\r\nfrom pprint import pprint\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\r\nfrom sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD\r\nimport pyLDAvis.sklearn\r\nfrom gensim.models import CoherenceModel\r\nfrom gensim.utils import simple_preprocess\r\nimport gensim.corpora as corpora\r\nimport gensim\r\nimport networkx as nx\r\nimport itertools as it\r\nimport en_core_web_sm\r\nfrom collections import Counter\r\nfrom spacy import displacy\r\nimport spacy\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport matplotlib\r\nfrom nltk import word_tokenize, pos_tag, pos_tag_sents\r\nimport requests\r\nimport string\r\nimport urllib\r\nimport collections\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport pandas as pd\r\nimport numpy as np\r\nimport nltk\r\nnltk.download('averaged_perceptron_tagger')\r\nnltk.download('stopwords')\r\nstop = stopwords.words('english')\r\n# %matplotlib inline\r\nplt.show()\r\nnlp = en_core_web_sm.load()\r\nplt.show()\r\n# Sklearn\r\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\n\r\n# code\r\nbase_url = \"https://www.google.co.uk/search\"\r\nqueries = [\"brexit\", \"what is brexit\", \"what does brexit mean\"]\r\nserp_queries = []\r\nserp_urls = []\r\nclean_links = []\r\nlinks = []\r\nserp_titles = []\r\nserp_descriptions = []\r\n\r\nfor q in queries:\r\n query = urllib.parse.quote_plus(q)\r\n serp_queries.append(query)\r\n\r\nfor q in serp_queries:\r\n url = \"{base_url}?q={query}&num={num}&cr={serp_country}\".format(\r\n base_url=base_url, query=q, num=\"10\", serp_country=\"GB\")\r\n serp_urls.append(url)\r\n response = requests.get(url)\r\n soup = BeautifulSoup(response.text, \"html.parser\")\r\n result_div = soup.find_all('div', attrs={'class': 'ZINbbc'})\r\n for r in result_div:\r\n # Checks if each element is present, else, raise exception\r\n try:\r\n link = r.find('a', href=True)\r\n title = r.find('div', attrs={'class': 'vvjwJb'}).get_text()\r\n description = r.find('div', attrs={'class': 's3v9rd'}).get_text()\r\n # Check to make sure everything is present before appending\r\n if link != '' and title != '' and description != '':\r\n links.append(link['href'])\r\n serp_titles.append(title)\r\n serp_descriptions.append(description)\r\n # Next loop if one element is not present\r\n except:\r\n continue\r\n to_remove = []\r\n for i, l in enumerate(links):\r\n clean = re.search('\\/url\\?q\\=(.*)\\&sa', l)\r\n\r\n # Anything that doesn't fit the above pattern will be removed\r\n if clean is None:\r\n to_remove.append(i)\r\n continue\r\n clean_links.append(clean.group(1))\r\n # print(clean_links)\r\n\r\ndedupe_clean_links = list(dict.fromkeys(clean_links))\r\n############\r\nparagraphs = []\r\ntitles = []\r\nscraped_content = []\r\nscraped_titles = []\r\nfor url in dedupe_clean_links:\r\n # time.sleep(0.01)\r\n response = requests.get(url, time.sleep(2)) # headers=headers\r\n soup2 = BeautifulSoup(response.content, \"html.parser\")\r\n paragraphs = soup2.find_all('p')\r\n titles = soup2.find_all(re.compile('^h[1-4]$'))\r\n\r\n for paragraph in paragraphs:\r\n paragraphs = [paragraph.text]\r\n paragraphs = paragraph.get_text()\r\n scraped_content.append(paragraphs)\r\n\r\n for title in titles:\r\n titles = [title.text]\r\n titles = title.get_text()\r\n scraped_titles.append(titles)\r\nscraped_content = list(map(str.strip, scraped_content))\r\nscraped_titles = list(map(str.strip, scraped_titles))\r\n###########\r\n\r\n\r\ndef count_tags(input_tag):\r\n tag_count = {}\r\n for word, tag in input_tag:\r\n if tag in tag_count:\r\n tag_count[tag] += 1\r\n else:\r\n tag_count[tag] = 1\r\n return(tag_count)\r\n\r\n\r\ndf = pd.DataFrame.from_dict({'Serp_Content': scraped_content}).astype(str)\r\ndf['Serp_Content'] = df['Serp_Content'].fillna(\r\n \"\").str.lower().str.replace('[^\\w\\s]', '')\r\ndf['Serp_Content'] = df['Serp_Content'].apply(\r\n lambda x: [item for item in x.split() if item not in stop]).map(pos_tag)\r\ndf['Serp_Content_PoS_Counts'] = df['Serp_Content'].map(count_tags)\r\n# df.head()\r\n#######################\r\nvocabulary = {}\r\nfor row in df['Serp_Content']:\r\n for word, tag in row:\r\n if word in vocabulary:\r\n if tag in vocabulary[word]:\r\n vocabulary[word][tag] += 1\r\n else:\r\n vocabulary[word][tag] = 1\r\n else:\r\n vocabulary[word] = {tag: 1}\r\n\r\nvocabulary_df = pd.DataFrame.from_dict(vocabulary, orient='index')\r\nvocabulary_df.fillna(value=0, inplace=True)\r\n\r\nvocabulary_df.rename(columns={'NN': 'Noun'}, inplace=True)\r\nvocabulary_df.rename(columns={'NNS': 'Noun'}, inplace=True)\r\nvocabulary_df.rename(columns={'NNP': 'Noun'}, inplace=True)\r\nvocabulary_df.rename(columns={'NNPS': 'Noun'}, inplace=True)\r\nvocabulary_df.rename(columns={'RB': 'Adverb'}, inplace=True)\r\nvocabulary_df.rename(columns={'RBR': 'Adverb'}, inplace=True)\r\nvocabulary_df.rename(columns={'RBS': 'Adverb'}, inplace=True)\r\nvocabulary_df.rename(columns={'VB': 'Verb'}, inplace=True)\r\nvocabulary_df.rename(columns={'VBD': 'Verb'}, inplace=True)\r\nvocabulary_df.rename(columns={'VBG': 'Verb'}, inplace=True)\r\nvocabulary_df.rename(columns={'VBG': 'Verb'}, inplace=True)\r\nvocabulary_df.rename(columns={'VBN': 'Verb'}, inplace=True)\r\nvocabulary_df.rename(columns={'VBP': 'Verb'}, inplace=True)\r\nvocabulary_df.rename(columns={'VBZ': 'Verb'}, inplace=True)\r\nvocabulary_df.rename(columns={'JJ': 'Adjective'}, inplace=True)\r\nvocabulary_df.rename(columns={'JJR': 'Adjective'}, inplace=True)\r\nvocabulary_df.rename(columns={'JJS': 'Adjective'}, inplace=True)\r\n\r\npos_list = ['Noun', 'Adverb', 'Verb', 'Adjective']\r\n\r\nvocabulary_df = vocabulary_df[pos_list]\r\nvocabulary_df = vocabulary_df.groupby(lambda x: x, axis=1).sum()\r\nvocabulary_df = vocabulary_df.sort_values('Noun', ascending=False)\r\n\r\nvocabulary_df.head()\r\n###################\r\nsize4 = 10 # Change this number to how many words you want to show in your visual\r\ntag4 = 'Noun' # Change this to the PoS tag that you want to analyse\r\nax = vocabulary_df[tag4].sort_values().tail(size4).plot(\r\n kind='barh', figsize=(12, 9), color='xkcd:purple')\r\nax.tick_params(axis=\"y\", labelsize=18)\r\nax.set_title('Top 10 Most Frequent Ranking Content Nouns (ALL)', fontsize='20')\r\nax.set_ylabel('Ranking Nouns', fontsize='12')\r\nax.set_xlabel('Count', fontsize='12')\r\n#ax.title.set_position([.5, 1.01])\r\nsize5 = 10 # Change this number to how many words you want to show in your visual\r\ntag5 = 'Verb' # Change this to the PoS tag that you want to analyse\r\nax = vocabulary_df[tag5].sort_values().tail(size5).plot(\r\n kind='barh', figsize=(12, 9), color='xkcd:purple')\r\nax.tick_params(axis=\"y\", labelsize=18)\r\nax.set_title('Top 10 Most Frequent Ranking Content Verbs (ALL)', fontsize='20')\r\nax.set_ylabel('Ranking Verbs', fontsize='12')\r\nax.set_xlabel('Count', fontsize='12')\r\nax.title.set_position([.5, 1.01])\r\nsize6 = 10 # Change this number to how many words you want to show in your visual\r\ntag6 = 'Adjective' # Change this to the PoS tag that you want to analyse\r\nax = vocabulary_df[tag6].sort_values().tail(size6).plot(\r\n kind='barh', figsize=(12, 9), color='xkcd:purple')\r\nax.tick_params(axis=\"y\", labelsize=18)\r\nax.set_title(\r\n 'Top 10 Most Frequent Ranking Content Adjectives (ALL)', fontsize='20')\r\nax.set_ylabel('Ranking Adjectives', fontsize='12')\r\nax.set_xlabel('Count', fontsize='12')\r\nax.title.set_position([.5, 1.01])\r\nscraped_content_2 = scraped_content.copy()\r\nscraped_content_3 = ''.join(map(str, scraped_content_2))\r\nscraped_content_3 = scraped_content_3.strip(string.punctuation)\r\nscraped_content_3 = scraped_content_3.replace(\",\", \" \").replace(\r\n \"[\", \" \").replace(\"]\", \" \").replace(\"\\\\\", \" \")\r\nentities = nlp(scraped_content_3)\r\nlabels = [x.label_ for x in entities.ents]\r\nentity_map = dict([(str(x), x.label_) for x in nlp(str(entities)).ents])\r\n\r\n# entity_map\r\n#displacy.render(nlp(str(entities)), jupyter=True, style='ent')\r\n\r\n\r\ndef count_entities(input_tag):\r\n tag_count = {}\r\n for word, tag in input_tag:\r\n if tag in tag_count:\r\n tag_count[tag] += 1\r\n else:\r\n tag_count[tag] = 1\r\n return(tag_count)\r\n\r\n\r\ndf2 = pd.DataFrame.from_dict({'Serp_Content': scraped_content})\r\ndf2['Serp_Content'] = df2['Serp_Content'].astype(\r\n str).str.replace('[^\\w\\s]', '')\r\n\r\n\r\ndef spacy_entity(df):\r\n df2 = nlp(df)\r\n df3 = [[w.text, w.label_] for w in df2.ents]\r\n return df3\r\n\r\n\r\ndf2['Serp_Content'] = df2['Serp_Content'].apply(spacy_entity)\r\ndf2['Entity_Counts'] = df2['Serp_Content'].map(count_entities)\r\n\r\nent_vocabulary = {}\r\nfor row in df2['Serp_Content']:\r\n for word, tag in row:\r\n if word in ent_vocabulary:\r\n if tag in ent_vocabulary[word]:\r\n ent_vocabulary[word][tag] += 1\r\n else:\r\n ent_vocabulary[word][tag] = 1\r\n else:\r\n ent_vocabulary[word] = {tag: 1}\r\n\r\nent_df = pd.DataFrame.from_dict(ent_vocabulary, orient='index')\r\nent_df.fillna(value=0, inplace=True)\r\n\r\nent_df.sort_values('ORG', ascending=False).head(10)\r\n###########\r\nfor token, tag in df.Serp_Content[0]:\r\n if re.match(r'NN*|JJ*', tag):\r\n print(token, tag)\r\n\r\nnoun_phrases = [[token for token, tag in sent if re.match(r'NN*|JJ*', tag)]\r\n for sent in df.Serp_Content]\r\nedgelist = [\r\n edge for phrase in noun_phrases for edge in it.combinations(phrase, 2)]\r\n\r\nG = nx.Graph(edgelist)\r\nindex = nx.betweenness_centrality(G)\r\nfor component in list(nx.connected_components(G)):\r\n if len(component) < 5:\r\n for node in component:\r\n G.remove_node(node)\r\n\r\nsorted_index = sorted(index.items(), key=lambda x: x[1], reverse=True)\r\n\r\n# Top 20 noun phrases by betweenness centrality:\r\n# for word, centr in sorted_index[:20]:\r\n#print (word, centr)\r\n\r\nremove = [node for node, degree in dict(G.degree()).items() if degree < 2]\r\n\r\n# %pylab inline\r\n# %config InlineBackend.figure_format = 'png'\r\n#plt.rc('figure', figsize=(24, 18))\r\n#G.remove_nodes_from([n for n in index if index[n] == .0])\r\n# G.remove_nodes_from(remove)\r\n#node_size = [index[n]*20000 for n in G]\r\n#pos = nx.spring_layout(G)\r\n#nx.draw_networkx(G, pos, node_size=node_size, edge_color='Y', alpha=.8, linewidths=0, font_color='Black', font_size='10', font_weight='bold')\r\n\r\n############\r\nresonance_df = pd.DataFrame(sorted_index, columns=['Word', 'Resonance'])\r\n\r\nresonance_df.sort_values('Resonance', ascending=False).head()\r\n############\r\nscraped_titles_df = pd.DataFrame.from_dict(\r\n {'Serp_Titles': scraped_titles}).astype(str)\r\nscraped_titles_df['Serp_Titles'] = scraped_titles_df['Serp_Titles'].fillna(\"\").str.lower(\r\n).str.replace('[^\\w\\s]', '').apply(lambda x: [item for item in x.split() if item not in stop])\r\nscraped_titles_list = list(\r\n filter(None, scraped_titles_df.Serp_Titles.tolist()))\r\nserp_titles_counts = collections.Counter(max_df=0.90)\r\n\r\nfor phrase in scraped_titles_list:\r\n #serp_titles_counts.update(nltk.ngrams(phrase, 1))\r\n serp_titles_counts.update(nltk.ngrams(phrase, 2))\r\n serp_titles_counts.update(nltk.ngrams(phrase, 3))\r\nserp_titles_trends = serp_titles_counts.most_common(50)\r\n\r\nranking_titles_trends = pd.DataFrame(\r\n serp_titles_trends, columns=[\"Titles\", \"Count\"])\r\n\r\n\r\ndef remove_brackets(Titles):\r\n fixed_keyword = ' '.join(Titles)\r\n return fixed_keyword\r\n\r\n\r\nranking_titles_trends['Titles'] = ranking_titles_trends.Titles.apply(\r\n remove_brackets)\r\n\r\nranking_titles_trends.head()\r\n#############\r\ntopics_df = pd.DataFrame.from_dict(\r\n {'Serp_Content': scraped_content}).astype(str)\r\ndata = topics_df.Serp_Content.values.tolist()\r\ndata = [re.sub('\\s+', ' ', sent) for sent in data]\r\ndata = [re.sub(\"\\'\", \"\", sent) for sent in data]\r\ndata = [re.sub('\\S*@\\S*\\s?', '', sent) for sent in data]\r\n\r\n\r\ndef sent_to_words(sentences):\r\n for sentence in sentences:\r\n # deacc=True removes punctuation\r\n yield(gensim.utils.simple_preprocess(str(sentence), deacc=True))\r\n\r\n\r\ndata_words = list(sent_to_words(data))\r\n\r\n\r\ndef lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n \"\"\"https://spacy.io/api/annotation\"\"\"\r\n texts_out = []\r\n for sent in texts:\r\n doc = nlp(\" \".join(sent))\r\n texts_out.append(\" \".join([token.lemma_ if token.lemma_ not in [\r\n '-PRON-'] else '' for token in doc if token.pos_ in allowed_postags]))\r\n return texts_out\r\n\r\n\r\n# Initialize spacy 'en' model, keeping only tagger component (for efficiency)\r\n# Run in terminal: python3 -m spacy download en\r\nnlp = spacy.load('en', disable=['parser', 'ner'])\r\n\r\n# Do lemmatization keeping only Noun, Adj, Verb, Adverb\r\ndata_lemmatized = lemmatization(data_words, allowed_postags=[\r\n 'NOUN', 'ADJ', 'VERB', 'ADV'])\r\n\r\nvectorizer = CountVectorizer(analyzer='word',\r\n min_df=3, # minimum reqd occurences of a word\r\n stop_words='english', # remove stop words\r\n lowercase=True, # convert all words to lowercase\r\n token_pattern='[a-zA-Z0-9]{3,}', # num chars > 3\r\n # max_features=50000, # max number of uniq words\r\n max_df=0.90\r\n )\r\n\r\ndata_vectorized = vectorizer.fit_transform(data_lemmatized)\r\ndata_dense = data_vectorized.todense()\r\n##############################\r\n# Build LDA Model - can take a while!\r\nlda_model = LatentDirichletAllocation(n_components=10, # Number of topics\r\n max_iter=10, # Max learning iterations\r\n learning_method='online',\r\n random_state=100, # Random state\r\n batch_size=128, # n docs in each learning iter\r\n evaluate_every=-1, # compute perplexity every n iters, default: Don't\r\n n_jobs=-1, # Use all available CPUs\r\n )\r\n\r\n# Define Search Param\r\nsearch_params = {'n_components': [10, 15, 20,\r\n 25, 30], 'learning_decay': [.5, .7, .9]}\r\n\r\n# Init the Model\r\nlda = LatentDirichletAllocation()\r\n\r\n# Init Grid Search Class\r\nmodel = GridSearchCV(lda, param_grid=search_params)\r\n\r\n# Do the Grid Search\r\nmodel.fit(data_vectorized)\r\n\r\n# Best Model\r\nbest_lda_model = model.best_estimator_\r\n\r\n# Model Parameters\r\nprint(\"Best Model's Params: \", model.best_params_)\r\n#########################\r\ntopicnames = [\"Topic\" + str(i) for i in range(best_lda_model.n_components)]\r\ndf_topic_keywords = pd.DataFrame(best_lda_model.components_)\r\n\r\n# Assign Column and Index\r\ndf_topic_keywords.columns = vectorizer.get_feature_names()\r\ndf_topic_keywords.index = topicnames\r\n\r\n\r\ndef show_topics(vectorizer=vectorizer, lda_model=lda_model, n_words=20):\r\n keywords = np.array(vectorizer.get_feature_names())\r\n topic_keywords = []\r\n for topic_weights in lda_model.components_:\r\n top_keyword_locs = (-topic_weights).argsort()[:n_words]\r\n topic_keywords.append(keywords.take(top_keyword_locs))\r\n return topic_keywords\r\n\r\n\r\ntopic_keywords = show_topics(\r\n vectorizer=vectorizer, lda_model=best_lda_model, n_words=20)\r\n\r\n# Topic - Keywords Dataframe\r\ndf_topic_keywords = pd.DataFrame(topic_keywords)\r\ndf_topic_keywords.columns = ['Word '+str(i)\r\n for i in range(df_topic_keywords.shape[1])]\r\ndf_topic_keywords.index = ['Topic '+str(i)\r\n for i in range(df_topic_keywords.shape[0])]\r\nTopics = [\"Topic Label 1\", \"Topic Label 2\", \"Topic Label 3\", \"Topic Label 4\", \"Topic Label 5\",\r\n \"Topic Label 6\", \"Topic Label 7\", \"Topic Label 8\", \"Topic Label 9\", \"Topic Label 10\"]\r\ndf_topic_keywords[\"Topics\"] = Topics\r\ndf_topic_keywords.head()\r\n###########################\r\nserp_dict = pd.DataFrame.from_dict({'SERP_Titles': serp_titles, 'SERP_Descriptions': serp_descriptions}).astype(\r\n str).drop_duplicates(subset=['SERP_Titles'], keep=False)\r\nserp_dict[\"SERP_Combination\"] = serp_dict[\"SERP_Titles\"].map(\r\n str) + serp_dict[\"SERP_Descriptions\"]\r\nserp_dict[\"SERP_Combination\"] = serp_dict[\"SERP_Combination\"].fillna(\"\").str.lower().str.replace(\r\n '[^\\w\\s]', '').apply(lambda x: [item for item in x.split() if item not in stop]).map(pos_tag)\r\nserp_dict[\"SERP_Combination_Counts\"] = serp_dict[\"SERP_Combination\"].map(\r\n count_tags)\r\nserp_language_vocabulary = {}\r\nfor row in serp_dict[\"SERP_Combination\"]:\r\n for word, tag in row:\r\n if word in serp_language_vocabulary:\r\n if tag in serp_language_vocabulary[word]:\r\n serp_language_vocabulary[word][tag] += 1\r\n else:\r\n serp_language_vocabulary[word][tag] = 1\r\n else:\r\n serp_language_vocabulary[word] = {tag: 1}\r\n\r\nserp_language_vocabulary_df = pd.DataFrame.from_dict(\r\n serp_language_vocabulary, orient='index')\r\nserp_language_vocabulary_df.fillna(value=0, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'NN': 'Noun'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'NNS': 'Noun'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'NNP': 'Noun'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'NNPS': 'Noun'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'RB': 'Adverb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'RBR': 'Adverb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'RBS': 'Adverb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'VB': 'Verb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'VBD': 'Verb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'VBG': 'Verb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'VBG': 'Verb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'VBN': 'Verb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'VBP': 'Verb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'VBZ': 'Verb'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'JJ': 'Adjective'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'JJR': 'Adjective'}, inplace=True)\r\nserp_language_vocabulary_df.rename(columns={'JJS': 'Adjective'}, inplace=True)\r\nserp_language_vocabulary_df = serp_language_vocabulary_df[pos_list]\r\nserp_language_vocabulary_df = serp_language_vocabulary_df.groupby(\r\n lambda x: x, axis=1).sum()\r\nserp_language_vocabulary_df = serp_language_vocabulary_df.sort_values(\r\n 'Noun', ascending=False)\r\nserp_language_vocabulary_df.head()\r\n##################\r\nscraped_content_string = ' '.join(str(e) for e in scraped_content)\r\nscraped_title_string = ' '.join(str(e) for e in scraped_titles)\r\nscraped_concat_string = scraped_content_string + scraped_title_string\r\nquestion_extraction = re.compile(r'[A-Za-z][\\w\\s]+[?]')\r\nquestion = question_extraction.findall(scraped_concat_string)\r\nquestions = []\r\nquestions.append(question)\r\nquestion_df = pd.DataFrame(questions)\r\nquestion_df = question_df.transpose()\r\nquestion_df['totalwords'] = question_df[0].str.split().str.len()\r\nquestion_df = question_df.drop(\r\n question_df[(question_df.totalwords < 4) | (question_df.totalwords > 10)].index)\r\nquestion_df = question_df.rename(\r\n columns={0: 'Question', 'totalwords': 'Question_Word_Length'})\r\nquestion_df = question_df.drop_duplicates(subset=\"Question\")\r\nnlp = spacy.load('en', disable=['parser', 'ner'])\r\n\r\n\r\ndef predict_topic(text, nlp=nlp):\r\n global sent_to_words\r\n global lemmatization\r\n# Step 1: Clean with simple_preprocess\r\n mytext_2 = list(sent_to_words(text))\r\n# Step 2: Lemmatize\r\n mytext_3 = lemmatization(mytext_2, allowed_postags=[\r\n 'NOUN', 'ADJ', 'VERB', 'ADV'])\r\n# Step 3: Vectorize transform\r\n mytext_4 = vectorizer.transform(mytext_3)\r\n# Step 4: LDA Transform\r\n topic_probability_scores = best_lda_model.transform(mytext_4)\r\n topic = df_topic_keywords.iloc[np.argmax(\r\n topic_probability_scores), 1:14].values.tolist()\r\n\r\n # Step 5: Infer Topic\r\n infer_topic = df_topic_keywords.iloc[np.argmax(\r\n topic_probability_scores), -1]\r\n\r\n #topic_guess = df_topic_keywords.iloc[np.argmax(topic_probability_scores), Topics]\r\n return infer_topic, topic, topic_probability_scores\r\n\r\n\r\ndef apply_predict_topic(text):\r\n text = [text]\r\n infer_topic, topic, prob_scores = predict_topic(text=text)\r\n return(infer_topic)\r\n\r\n\r\ndef get_topic_labels(text):\r\n text = [text]\r\n infer_topic, topic, prob_scores = predict_topic(text=text)\r\n return(topic)\r\n\r\n\r\nquestion_df[\"Topic_Grouping_Label\"] = question_df['Question'].apply(\r\n apply_predict_topic)\r\nquestion_df[\"Topic_Wordings\"] = question_df['Question'].apply(get_topic_labels)\r\nquestion_df = question_df.drop(columns=['Question_Word_Length'])\r\nquestion_df.head(15)\r\n##############\r\n#from google.colab import files\r\n#writer = pd.ExcelWriter('SERP_Research_Test.xlsx')\r\n#vocabulary_df.to_excel(writer, sheet_name='Ranking Vocabulary')\r\n#ent_df.sort_values('ORG', ascending=False).to_excel(writer, sheet_name='Ranking Entities')\r\n#resonance_df.to_excel(writer, sheet_name='Topical Resonance')\r\n#ranking_titles_trends.to_excel(writer, sheet_name='Title Co-Occurrence')\r\n#df_topic_keywords.to_excel(writer, sheet_name='Topic Modelling Groupings')\r\n#serp_language_vocabulary_df.to_excel(writer, sheet_name='Language Trends In SERPs')\r\n#question_df.to_excel(writer, sheet_name='Questions Answered')\r\n# writer.save()\r\n","sub_path":"seo.py","file_name":"seo.py","file_ext":"py","file_size_in_byte":21883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"315252030","text":"# -*- coding: utf-8 -*-\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Timeline, Pie, Line, Page\n\nfrom zvt.api.account import get_account, get_position\nfrom zvt.utils.time_utils import to_time_str\n\n\ndef draw_account(trader_name):\n df_account = get_account(trader_name=trader_name)\n df_position = get_position(trader_name=trader_name)\n\n xdata = [to_time_str(timestamp) for timestamp in df_account.index]\n ydata = df_account.loc[:, 'all_value'].values.tolist()\n\n line = (\n Line()\n .add_xaxis(xdata)\n .add_yaxis(\"市值曲线\", ydata)\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"Grid-Line\", pos_top=\"48%\"),\n legend_opts=opts.LegendOpts(pos_top=\"48%\"),\n )\n )\n\n time_line = Timeline()\n for timestamp in df_position.index:\n positions = zip(df_position.loc[timestamp, ['security_id']].values.tolist(),\n df_position.loc[timestamp, ['value']].values.tolist())\n security_positions = [(x[0], y[0]) for x, y in positions]\n print(security_positions)\n pie = Pie().add(\"持仓\", security_positions)\n time_line.add(pie, to_time_str(timestamp))\n\n page = Page()\n page.add(line, time_line)\n\n return page\n\n\nif __name__ == '__main__':\n d = draw_account('fooltrader')\n d.render()\n","sub_path":"zvt/charts/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"272752346","text":"# encoding: utf-8\n'''\nnode0--+-hop-+-node1\n | |\nnode2--+ +-node3\n | |\n | |\nnode8--+ +-node9\n\n while True:\n [node0 => hop]connect\n [node2 => hop]connect\n ...\n [node8 => hop]connect\n [hop => node1]connect\n [hop => node3]connect\n ...\n [hop => node9]connect\n [node0 => hop]open_channel\n [node2 => hop]open_channel\n [hop => node2]open_channel\n [hop => node4]open_channel\n ...\n for PAY_COUNT_MAX:\n [node1]invoice\n [node0]pay-node1\n [node3]invoice\n [node2]pay-node3\n close_all\n ...\n'''\n\nimport subprocess\nimport time\nimport sys\nimport json\nimport traceback\nimport os\nimport signal\nimport random\nimport string\n\nimport paho.mqtt.client\nimport socket\nimport threading\nimport configparser\n\n\nclass PayCount:\n def __init__(self):\n self.invoice_count = 0\n self.pay_count = 0\n self.last_fail_pay_count = -1 # 前回payでNGが返ってきたときのpay_count\n self.fail_cont_count = 0 # failが連続した回数\n self.fail_count = 0\n\n\nconfig = configparser.ConfigParser()\n\n# MQTT\nMQTT_HOST = ''\nMQTT_PORT = 0\nTOPIC_PREFIX = ''\n\n# random requester name\nRANDNAME = ''.join([random.choice(\n string.ascii_letters + string.digits) for i in range(16)])\n\n# const variable\nFUNDING_NONE = 0\nFUNDING_WAIT = 1\nFUNDING_FUNDED = 2\nFUNDING_CLOSING = 3\n\nPAY_START_BLOCK = 0\nPAY_FAIL_BLOCK = 0\n\n# 使うノード数\nNODE_NUM = 11\n\n# 'status'がこの秒数以上来なかったらテストを停止する\nNODE_NOT_EXIST_SEC = NODE_NUM * 20\n\n# close前の待ち時間\nNODE_CLOSE_SEC = 30\n\n# array_node_id[]のインデックス\n# 偶数番n(payer)とn+1(payee)がセットになる\nNODE0 = 0\nNODE1 = 1\nNODE2 = 2\nNODE3 = 3\nNODE4 = 4\nNODE5 = 5\nNODE6 = 6\nNODE7 = 7\nNODE8 = 8\nNODE9 = 9\nHOP = 10\n\n# ログ用のラベル\nNODE_LABEL = [\n 'node0', 'node1', 'node2', 'node3', 'node4',\n 'node5', 'node6', 'node7', 'node8', 'node9',\n 'hop ',\n]\n\n# [0]が[1]に向けてconnectする\n# close_all()も同じ方向でcloseする\nNODE_CONNECT = [\n [NODE0, HOP], [NODE2, HOP], [NODE4, HOP], [NODE6, HOP], [NODE8, HOP],\n [HOP, NODE1], [HOP, NODE3], [HOP, NODE5], [HOP, NODE7], [HOP, NODE9],\n]\nNODE_OPEN = NODE_CONNECT\n\n# [config]open時のamount\nNODE_OPEN_AMOUNT = 0\n\n# [config]送金回数。この回数だけ送金後、mutual closeする。\nPAY_COUNT_MAX = 0\n\n# [config]今のところ送金完了が分からないので、一定間隔で送金している\nPAY_INVOICE_ELAPSE = 0\n\n# invoiceでの要求額\nPAY_AMOUNT_MSAT = 1000\n\n# 送信失敗が連続してテストを終了するカウント\nFAIL_CONT_MAX = 3\n\n# global variable\narray_node_id = [''] * NODE_NUM\ndict_recv_node = dict()\ndict_status_node = dict()\ndict_amount = dict()\ndict_paycount = dict()\n\narray_connected_node = []\n\nthread_request = None\nloop_reqester = True\n\nfunded_block_count = 0 # 全チャネルがnormal operationになったときのblockcount\nis_funding = FUNDING_NONE # FUNDING_xxx\n\n\n# MQTT: connect\ndef on_connect(client, user_data, flags, response_code):\n del user_data, flags, response_code\n client.subscribe(TOPIC_PREFIX + '/#')\n th1 = threading.Thread(target=poll_time, args=(client,), name='poll_time',\n daemon=True)\n th1.start()\n th2 = threading.Thread(target=notifier, args=(client,), name='notifier',\n daemon=True)\n th2.start()\n print('MQTT connected')\n\n\n# MQTT: message subscribed\ndef on_message(client, _, msg):\n global dict_recv_node, dict_status_node, thread_request, loop_reqester,\\\n is_funding\n\n # topic\n # 'request/' + node_id : requester --> responser\n # 'response/' + node_id : responser ==> requester\n # 'stop/ + node_id : requester --> responser'\n ret, recv_id = proc_topic(client, msg)\n if not ret:\n if (len(recv_id) != 0) and msg.topic.startswith(TOPIC_PREFIX + '/notify/'):\n print('yet: ' + nodeid2label(recv_id))\n return\n\n # payload\n proc_payload(client, msg, recv_id)\n\n # status\n proc_status(client, msg, recv_id)\n\n\ndef notifier(client):\n while True:\n # notify\n conn_dict = {\"connect\": json_node_connect()}\n for node in array_node_id:\n # print('notify: ' + node)\n client.publish(TOPIC_PREFIX + '/notify/' + node,\n json.dumps(conn_dict))\n\n if is_funding == FUNDING_NONE:\n # print('connected list:', array_connected_node)\n connect_all(client)\n\n # https://stackoverflow.com/questions/12919980/nohup-is-not-writing-log-to-output-file\n sys.stdout.flush()\n\n time.sleep(5)\n\n\n# check status health\n# 起動して30秒以内にテスト対象のnode全部がstatusを送信すること\n# テスト対象のnodeは、120秒以内にstatusを毎回送信すること(通信が詰まっているときがあるのか、60秒で失敗することがあった))\ndef poll_time(client):\n global dict_recv_node\n SAME_LIMIT_SECOND = 30 * 60 # 同じ状態が継続できる上限(FUNDING_FUNDED以外)\n LOOP_SECOND = 30 # 監視周期\n\n bak_funding = FUNDING_NONE\n same_status = 0\n stop_order = False\n reason = ''\n while not stop_order:\n time.sleep(LOOP_SECOND)\n\n print('*** is_funding=' + str(is_funding))\n\n # check health\n if len(dict_recv_node) < NODE_NUM:\n reason = 'not all node found: ' + str(dict_recv_node)\n stop_order = True\n break\n for node in dict_recv_node:\n if time.time() - dict_recv_node[node] > NODE_NOT_EXIST_SEC:\n reason = 'node not exist:' + node\n stop_order = True\n break\n if (bak_funding == is_funding) and (is_funding != FUNDING_FUNDED):\n same_status += 1\n print('same status: ' + str(same_status))\n if same_status > SAME_LIMIT_SECOND / LOOP_SECOND:\n reason = 'too many same status: ' + str(is_funding)\n stop_order = True\n break\n else:\n same_status = 0\n bak_funding = is_funding\n if stop_order:\n errlog_print(reason)\n stop_all(client, reason)\n\n\n# topic\n# check our testing node_ids\ndef proc_topic(client, msg):\n global dict_recv_node\n\n if msg.topic == TOPIC_PREFIX + '/stop/' + RANDNAME:\n print('STOP!')\n kill_me()\n\n ret = False\n mine = False\n recv_id = ''\n try:\n if msg.topic.startswith(TOPIC_PREFIX + '/response/') or msg.topic.startswith(TOPIC_PREFIX + '/status/'):\n if msg.topic.rfind('/') != -1:\n recv_id = msg.topic[msg.topic.rfind('/') + 1:]\n for i in range(NODE_NUM):\n if array_node_id[i] == recv_id:\n mine = True\n dict_recv_node[recv_id] = time.time()\n break\n if mine and (len(dict_recv_node) == NODE_NUM):\n ret = True\n except:\n print('traceback.format_exc():\\n%s' % traceback.format_exc())\n print('topic=', msg.topic)\n\n return ret, recv_id\n\n\n# payload\ndef proc_payload(client, msg, recv_id):\n payload = ''\n try:\n # payload\n payload = str(msg.payload, 'utf-8')\n if len(payload) == 0:\n return\n if msg.topic.startswith(TOPIC_PREFIX + '/response/'):\n message_response(client, json.loads(payload), msg, recv_id)\n elif msg.topic.startswith(TOPIC_PREFIX + '/status/'):\n message_status(client, json.loads(payload), msg, recv_id)\n except:\n print('traceback.format_exc():\\n%s' % traceback.format_exc())\n print('payload=', payload)\n\n\n# process for status\ndef proc_status(client, msg, recv_id):\n global dict_status_node, thread_request, loop_reqester,\\\n is_funding, funded_block_count\n\n if len(dict_status_node) != NODE_NUM:\n return\n\n try:\n if thread_request is None:\n all_normal = True\n all_none = True\n # print(' proc_status-------------')\n for node in dict_status_node:\n for status in dict_status_node[node]['status']:\n # print(' proc_status=' + status[0] + ': ' + nodeid2label(status[1]))\n if status[0] != 'Status.NORMAL':\n all_normal = False\n if status[0] != 'Status.NONE':\n all_none = False\n if all_normal:\n funded_block_count = getblockcount() # announcement計測用\n is_funding = FUNDING_FUNDED\n loop_reqester = True\n thread_request = threading.Thread(target=requester,\n args=(client,),\n name='requester',\n daemon=True)\n thread_request.start()\n print('all_normal: start requester thread: ' +\n str(funded_block_count))\n elif all_none and is_funding == FUNDING_CLOSING:\n print('all_none: close done')\n is_funding = FUNDING_NONE\n else:\n all_normal = True\n for node in dict_status_node:\n for status in dict_status_node[node]['status']:\n if status[0] != 'Status.NORMAL':\n all_normal = False\n break\n if not all_normal:\n print('stop requester thread')\n loop_reqester = False\n thread_request.join()\n thread_request = None\n return\n except:\n print('traceback.format_exc():\\n%s' % traceback.format_exc())\n\n\n###############################################################################\n\n# request check\ndef requester(client):\n global dict_paycount\n\n while loop_reqester:\n blk = getblockcount()\n if blk - funded_block_count < PAY_START_BLOCK:\n print('wait confirm: ' + str(blk - funded_block_count))\n time.sleep(10)\n continue\n pay_max_count = 0\n for lp in range(int(NODE_NUM / 2)):\n payer_idx = lp * 2\n payee_idx = payer_idx + 1\n payer_node = array_node_id[payer_idx]\n if payer_node not in dict_paycount:\n dict_paycount[payer_node] = PayCount()\n pay_count = dict_paycount[payer_node].pay_count\n if pay_count < PAY_COUNT_MAX:\n # request invoice\n log_print('[REQ]invoice(' + NODE_LABEL[payer_idx] + ')')\n client.publish(TOPIC_PREFIX + '/request/' + array_node_id[payee_idx],\n '{\"method\":\"invoice\",'\n '\"params\":[ ' + str(PAY_AMOUNT_MSAT) + ',\"' + NODE_LABEL[payer_idx]+'\" ]}')\n else:\n pay_max_count += 1\n if pay_max_count == int(NODE_NUM / 2):\n # 一定回数送金要求したらチャネルを閉じる\n log_print('[REQ]close all')\n time.sleep(NODE_CLOSE_SEC)\n close_all(client)\n for pay_obj in dict_paycount.values():\n pay_obj.pay_count = 0\n pay_obj.invoice_count = 0\n break\n else:\n time.sleep(PAY_INVOICE_ELAPSE)\n print('exit requester')\n\n\ndef proc_invoice_got(client, json_msg, msg, recv_id):\n global dict_paycount\n\n invoice = json_msg['result'][1]\n target = json_msg['result'][2]\n idx = label2idx(target)\n\n dict_paycount[array_node_id[idx]].invoice_count += 1\n log_print('[RESPONSE]invoice-->[REQ]pay:' + target +\n ': ' + invoice)\n client.publish(TOPIC_PREFIX + '/request/' + array_node_id[idx],\n '{\"method\":\"pay\",'\n '\"params\":[ \"' + invoice + '\" ]}')\n\n\n###############################################################################\n\ndef connect_all(client):\n global is_funding, array_connected_node\n\n if len(dict_status_node) != NODE_NUM:\n return\n\n for node_conn in NODE_CONNECT:\n connector_idx = node_conn[0]\n connectee_idx = node_conn[1]\n connector = array_node_id[connector_idx]\n connectee = array_node_id[connectee_idx]\n pair = (connector, connectee)\n if pair not in array_connected_node:\n log_print('[REQ]connect: ' + NODE_LABEL[connector_idx] +\n '=>' + NODE_LABEL[connectee_idx])\n ipaddr = dict_status_node[connectee]['ipaddr']\n port = dict_status_node[connectee]['port']\n client.publish(TOPIC_PREFIX + '/request/' + connector,\n '{\"method\":\"connect\", \"params\":['\n '\"' + connectee + '\", '\n '\"' + ipaddr + '\", ' + str(port) + ' ]}')\n\n\ndef open_all(client):\n global is_funding\n\n log_print('open_all')\n for node_open in NODE_OPEN:\n opener_idx = node_open[0]\n openee_idx = node_open[1]\n opener = array_node_id[opener_idx]\n openee = array_node_id[openee_idx]\n print('[REQ]open: ' + NODE_LABEL[opener_idx] +\n ' => ' + NODE_LABEL[openee_idx])\n client.publish(TOPIC_PREFIX + '/request/' + opener,\n '{\"method\":\"openchannel\",\"params\":[ \"' + openee +\n '\", ' + str(NODE_OPEN_AMOUNT) + ' ]}')\n is_funding = FUNDING_WAIT\n\n\ndef close_all(client):\n global is_funding, array_connected_node\n\n log_print('close_all')\n for node_close in NODE_CONNECT:\n closer_idx = node_close[0]\n closee_idx = node_close[1]\n closer = array_node_id[closer_idx]\n closee = array_node_id[closee_idx]\n print('[REQ]close: ' + NODE_LABEL[closer_idx] +\n '=>' + NODE_LABEL[closee_idx])\n client.publish(TOPIC_PREFIX + '/request/' + closer,\n '{\"method\":\"closechannel\",'\n '\"params\":[ \"' + closee + '\" ]}')\n is_funding = FUNDING_CLOSING\n array_connected_node = []\n\n\ndef stop_all(client, reason):\n for node in array_node_id:\n print('stop: ' + node)\n client.publish(TOPIC_PREFIX + '/stop/' + node, reason)\n client.publish(TOPIC_PREFIX + '/stop/' + RANDNAME, reason)\n log_print('send stop: ' + reason)\n\n\n# message: topic=\"response/#\"\ndef message_response(client, json_msg, msg, recv_id):\n global is_funding, dict_paycount, funded_block_count, array_connected_node\n\n recv_name = nodeid2label(recv_id)\n ret = True\n reason = ''\n res_command = json_msg['result'][0]\n res_result = json_msg['result'][1]\n if res_command == 'connect':\n direction = recv_name +\\\n ' => ' + nodeid2label(json_msg['result'][2])\n if res_result == 'OK':\n log_print('[RESPONSE]connected: ' + direction)\n pair = (recv_id, json_msg['result'][2])\n if pair not in array_connected_node:\n array_connected_node.append(pair)\n if (len(array_connected_node) == len(NODE_CONNECT)) and (is_funding != FUNDING_WAIT):\n open_all(client)\n else:\n log_print('fail connect[' + res_result + ']: ' + direction)\n # ret = False # close直後はありがちなので、スルー\n time.sleep(5)\n\n elif res_command == 'openchannel':\n direction = recv_name +\\\n ' => ' + nodeid2label(json_msg['result'][2])\n if res_result == 'OK':\n log_print('[RESPONSE]funding start: ' + direction)\n else:\n reason = 'funding fail[' + res_result + ']: ' + direction\n ret = False\n\n elif res_command == 'closechannel':\n direction = recv_name +\\\n ' => ' + nodeid2label(json_msg['result'][2])\n if res_result == 'OK':\n log_print('[RESPONSE]closing start: ' + direction)\n else:\n reason = 'closing fail[' + res_result + ']: ' + direction\n ret = False\n\n elif res_command == 'invoice':\n if res_result == 'NG':\n reason = 'fail invoice'\n ret = False\n else:\n proc_invoice_got(client, json_msg, msg, recv_id)\n\n elif res_command == 'pay':\n invoice = json_msg['result'][2]\n if recv_id not in dict_paycount:\n dict_paycount[recv_id] = PayCount()\n pay_obj = dict_paycount[recv_id]\n\n def pay_reason():\n return '(' + recv_name +\\\n '): ' + res_result +\\\n ', invoice_count=' + str(pay_obj.invoice_count) +\\\n ', pay_count=' + str(pay_obj.pay_count) +\\\n ', last_fail_pay_count=' + str(pay_obj.last_fail_pay_count) +\\\n ', fail_count=' + str(pay_obj.fail_count) +\\\n ', fail_cont_count=' + str(pay_obj.fail_cont_count) +\\\n ': ' + invoice\n\n if res_result == 'OK':\n pay_obj.pay_count += 1\n log_print('[RESPONSE]pay ' + pay_reason())\n pay_obj.fail_cont_count = 0\n print(' pay_count=' + str(pay_obj.pay_count))\n else:\n blk = getblockcount()\n # announcementは 6 confirm以降で展開なので、少し余裕を持たせる\n if blk - funded_block_count > PAY_FAIL_BLOCK:\n pay_obj.fail_count += 1\n reason = 'pay fail' + pay_reason()\n print(reason)\n if pay_obj.last_fail_pay_count == pay_obj.pay_count:\n # 連続してNG\n pay_obj.fail_cont_count += 1\n if pay_obj.fail_cont_count >= FAIL_CONT_MAX:\n # 連続NG数が許容を超えた\n errlog_print('too many failure')\n ret = False\n else:\n # 単発NG\n pay_obj.last_fail_pay_count = pay_obj.pay_count\n pay_obj.fail_cont_count = 0\n else:\n print('pay through' + pay_reason())\n\n if not ret:\n errlog_print(reason)\n stop_all(client, reason)\n\n\n# message: topic=\"status/#\"\ndef message_status(client, json_msg, msg, recv_id):\n global dict_status_node\n\n recv_name = nodeid2label(recv_id)\n if recv_id not in dict_paycount:\n dict_paycount[recv_id] = PayCount()\n print(recv_name +\n ': invoice_count=' + str(dict_paycount[recv_id].invoice_count) +\n ', pay_count=' + str(dict_paycount[recv_id].pay_count) +\n ', last_fail_pay_count=' + str(dict_paycount[recv_id].last_fail_pay_count) +\n ', fail_cont_count=' + str(dict_paycount[recv_id].fail_cont_count) +\n ', fail_count=' + str(dict_paycount[recv_id].fail_count))\n if dict_paycount[recv_id].pay_count > 0:\n if recv_id in dict_status_node:\n print('--------------------------')\n for stat in json_msg['status']:\n # print('DBG: stat ' + stat[0] + ':' + stat[1])\n if stat[0] == 'Status.NORMAL':\n for old in dict_status_node[recv_id]['status']:\n if stat[1] == old[1] and old[0] == 'Status.NORMAL':\n print('AMT:' + stat[1] +\n ' old=' + str(old[2]) +\n ', new=' + str(stat[2]) +\n ', diff=' + str(stat[2] - old[2]))\n break\n else:\n continue\n print('--------------------------')\n dict_status_node[recv_id] = json_msg\n\n\ndef kill_me():\n # https://stackoverflow.com/questions/12919980/nohup-is-not-writing-log-to-output-file\n sys.stdout.flush()\n os.kill(os.getpid(), signal.SIGKILL)\n\n\ndef log_print(msg):\n print('#####################')\n print('# ' + msg)\n print('#####################')\n\n\ndef errlog_print(msg):\n print('!!!!!!!!!!!!!!!!!!!!!')\n print('! ' + msg)\n print('!!!!!!!!!!!!!!!!!!!!!')\n\n\ndef nodeid2label(id):\n num = 0\n for node in array_node_id:\n if node == id:\n return NODE_LABEL[num]\n num += 1\n return '???(' + id + ')'\n\n\ndef label2idx(label):\n return NODE_LABEL.index(label)\n\n\ndef json_node_connect():\n json_conn = []\n for lists in NODE_CONNECT:\n pair = [array_node_id[lists[0]], array_node_id[lists[1]]]\n json_conn.append(pair)\n return json_conn\n\n\ndef getblockcount():\n cnt = linux_cmd_exec('bitcoin-cli getblockcount')\n if cnt is not None:\n print(' getblockcount=' + cnt)\n return int(cnt)\n else:\n return 0\n\n\ndef linux_cmd_exec(cmd):\n # print('cmd:', cmd.split(' '))\n ret = ''\n try:\n ret = subprocess.check_output(cmd.split(' ')).strip().decode('utf-8')\n except subprocess.CalledProcessError as e:\n print('!!! error happen(errcode=%d) !!!' % e.returncode)\n return ret\n\n\ndef main():\n # MQTT brokerと接続\n g_mqtt = paho.mqtt.client.Client(protocol=paho.mqtt.client.MQTTv311)\n g_mqtt.connect(MQTT_HOST, port=MQTT_PORT, keepalive=60)\n g_mqtt.on_connect = on_connect\n g_mqtt.on_message = on_message\n g_mqtt.loop_forever()\n\n\n###############################################################################\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 + NODE_NUM:\n print('usage: ' + sys.argv[0] + ' INI_SECTION NODE0 NODE1 ... NODE9 HOP')\n sys.exit()\n for i in range(NODE_NUM):\n if len(sys.argv[2 + i]) != 66:\n print('invalid length: ' + str(i) + ': ' + sys.argv[2 + i])\n sys.exit()\n\n config.read('./config.ini')\n testname = sys.argv[1]\n print('testname= ' + testname)\n\n MQTT_HOST = config.get('MQTT', 'BROKER_URL')\n MQTT_PORT = config.getint('MQTT', 'BROKER_PORT')\n TOPIC_PREFIX = config.get(testname, 'TOPIC_PREFIX')\n NODE_OPEN_AMOUNT = config.getint(testname, 'NODE_OPEN_AMOUNT')\n PAY_COUNT_MAX = config.getint(testname, 'PAY_COUNT_MAX')\n PAY_INVOICE_ELAPSE = config.getint(testname, 'PAY_INVOICE_ELAPSE')\n PAY_START_BLOCK = config.getint(testname, 'PAY_START_BLOCK')\n PAY_FAIL_BLOCK = config.getint(testname, 'PAY_FAIL_BLOCK')\n\n # 引数とnode_idの対応\n cnt = 0\n for i in sys.argv[2:]:\n array_node_id[cnt] = i\n cnt += 1\n\n for num in range(NODE_NUM):\n print(' ' + NODE_LABEL[num] + '= ' + array_node_id[num])\n main()\n","sub_path":"mqtt_req6.py","file_name":"mqtt_req6.py","file_ext":"py","file_size_in_byte":22623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"24193542","text":"import numpy as np\nfrom scipy.interpolate import LinearNDInterpolator\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import griddata\nfrom scipy.spatial import cKDTree\nfrom scipy.interpolate.interpnd import _ndim_coords_from_arrays\nfrom matplotlib.colors import LightSource, Normalize\n\nfrom .matutils import mkvc\n\n\ndef plot2Ddata(\n xyz,\n data,\n vec=False,\n nx=100,\n ny=100,\n ax=None,\n mask=None,\n level=None,\n figname=None,\n ncontour=10,\n dataloc=False,\n contourOpts={},\n scale=\"linear\",\n clim=None,\n):\n \"\"\"\n\n Take unstructured xy points, interpolate, then plot in 2D\n\n :param numpy.array xyz: data locations\n :param numpy.array data: data values\n :param bool vec: plot streamplot?\n :param float nx: number of x grid locations\n :param float ny: number of y grid locations\n :param matplotlib.axes ax: axes\n :param numpy.array mask: mask for the array\n :param float level: level at which to draw a contour\n :param string figname: figure name\n :param float ncontour: number of :meth:`matplotlib.pyplot.contourf`\n contours\n :param bool dataloc: plot the data locations\n :param dict controuOpts: :meth:`matplotlib.pyplot.contourf` options\n :param numpy.array clim: colorbar limits\n\n \"\"\"\n if ax is None:\n fig = plt.figure()\n ax = plt.subplot(111)\n\n xmin, xmax = xyz[:, 0].min(), xyz[:, 0].max()\n ymin, ymax = xyz[:, 1].min(), xyz[:, 1].max()\n x = np.linspace(xmin, xmax, nx)\n y = np.linspace(ymin, ymax, ny)\n X, Y = np.meshgrid(x, y)\n xy = np.c_[X.flatten(), Y.flatten()]\n if vec is False:\n F = LinearNDInterpolator(xyz[:, :2], data)\n DATA = F(xy)\n DATA = DATA.reshape(X.shape)\n if scale == \"log\":\n DATA = np.log10(abs(DATA))\n cont = ax.contourf(X, Y, DATA, ncontour, **contourOpts)\n if level is not None:\n if scale == \"log\":\n level = np.log10(level)\n CS = ax.contour(X, Y, DATA, level, colors=\"k\", linewidths=2)\n\n else:\n # Assume size of data is (N,2)\n datax = data[:, 0]\n datay = data[:, 1]\n Fx = LinearNDInterpolator(xyz[:, :2], datax)\n Fy = LinearNDInterpolator(xyz[:, :2], datay)\n DATAx = Fx(xy)\n DATAy = Fy(xy)\n DATA = np.sqrt(DATAx ** 2 + DATAy ** 2).reshape(X.shape)\n DATAx = DATAx.reshape(X.shape)\n DATAy = DATAy.reshape(X.shape)\n if scale == \"log\":\n DATA = np.log10(abs(DATA))\n\n cont = ax.contourf(X, Y, DATA, ncontour, **contourOpts)\n ax.streamplot(X, Y, DATAx, DATAy, color=\"w\")\n if level is not None:\n CS = ax.contour(X, Y, DATA, level, colors=\"k\", linewidths=2)\n\n if dataloc:\n ax.plot(xyz[:, 0], xyz[:, 1], \"k.\", ms=2)\n plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n if figname:\n plt.axis(\"off\")\n fig.savefig(figname, dpi=200)\n if level is None:\n return cont, ax\n else:\n return cont, ax, CS\n\n\ndef plotLayer(\n sig, LocSigZ, xscale=\"log\", ax=None, showlayers=False, xlim=None, **kwargs\n):\n \"\"\"Plot a layered earth model\"\"\"\n sigma = np.repeat(sig, 2, axis=0)\n z = np.repeat(LocSigZ[1:], 2, axis=0)\n z = np.r_[LocSigZ[0], z, LocSigZ[-1]]\n\n if xlim is None:\n sig_min = sig.min() * 0.5\n sig_max = sig.max() * 2\n else:\n sig_min, sig_max = xlim\n\n if xscale == \"linear\" and sig.min() == 0.0:\n if xlim is None:\n sig_min = -sig.max() * 0.5\n sig_max = sig.max() * 2\n\n if ax is None:\n plt.xscale(xscale)\n plt.xlim(sig_min, sig_max)\n plt.ylim(z.min(), z.max())\n plt.xlabel(\"Conductivity (S/m)\", fontsize=14)\n plt.ylabel(\"Depth (m)\", fontsize=14)\n plt.ylabel(\"Depth (m)\", fontsize=14)\n if showlayers is True:\n for locz in LocSigZ:\n plt.plot(\n np.linspace(sig_min, sig_max, 100),\n np.ones(100) * locz,\n \"b--\",\n lw=0.5,\n )\n return plt.plot(sigma, z, \"k-\", **kwargs)\n\n else:\n ax.set_xscale(xscale)\n ax.set_xlim(sig_min, sig_max)\n ax.set_ylim(z.min(), z.max())\n ax.set_xlabel(\"Conductivity (S/m)\", fontsize=14)\n ax.set_ylabel(\"Depth (m)\", fontsize=14)\n if showlayers is True:\n for locz in LocSigZ:\n ax.plot(\n np.linspace(sig_min, sig_max, 100),\n np.ones(100) * locz,\n \"b--\",\n lw=0.5,\n )\n return ax.plot(sigma, z, \"k-\", **kwargs)\n\n\ndef plotDataHillside(\n x,\n y,\n z,\n axs=None,\n fill=True,\n contour=0,\n vmin=None,\n vmax=None,\n clabel=True,\n cmap=\"RdBu_r\",\n ve=1.0,\n alpha=1.0,\n alphaHS=1.0,\n distMax=1000,\n midpoint=None,\n azdeg=315,\n altdeg=45,\n):\n\n ls = LightSource(azdeg=azdeg, altdeg=altdeg)\n\n if x.ndim == 1:\n # Create grid of points\n vectorX = np.linspace(x.min(), x.max(), 1000)\n vectorY = np.linspace(y.min(), y.max(), 1000)\n\n X, Y = np.meshgrid(vectorX, vectorY)\n\n # Interpolate\n d_grid = griddata(np.c_[x, y], z, (X, Y), method=\"cubic\")\n\n # Remove points beyond treshold\n tree = cKDTree(np.c_[x, y])\n xi = _ndim_coords_from_arrays((X, Y), ndim=2)\n dists, indexes = tree.query(xi)\n\n # Copy original result but mask missing values with NaNs\n d_grid[dists > distMax] = np.nan\n\n else:\n\n X, Y, d_grid = x, y, z\n\n class MidPointNorm(Normalize):\n def __init__(self, midpoint=None, vmin=None, vmax=None, clip=False):\n Normalize.__init__(self, vmin, vmax, clip)\n self.midpoint = midpoint\n\n def __call__(self, value, clip=None):\n if clip is None:\n clip = self.clip\n\n result, is_scalar = self.process_value(value)\n\n self.autoscale_None(result)\n\n if self.midpoint is None:\n self.midpoint = np.mean(value)\n vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint\n\n if not (vmin < midpoint < vmax):\n raise ValueError(\"midpoint must be between maxvalue and minvalue.\")\n elif vmin == vmax:\n result.fill(0) # Or should it be all masked? Or 0.5?\n elif vmin > vmax:\n raise ValueError(\"maxvalue must be bigger than minvalue\")\n else:\n vmin = float(vmin)\n vmax = float(vmax)\n if clip:\n mask = np.ma.getmask(result)\n result = np.ma.array(\n np.clip(result.filled(vmax), vmin, vmax), mask=mask\n )\n\n # ma division is very slow; we can take a shortcut\n resdat = result.data\n\n # First scale to -1 to 1 range, than to from 0 to 1.\n resdat -= midpoint\n resdat[resdat > 0] /= abs(vmax - midpoint)\n resdat[resdat < 0] /= abs(vmin - midpoint)\n\n resdat /= 2.0\n resdat += 0.5\n result = np.ma.array(resdat, mask=result.mask, copy=False)\n\n if is_scalar:\n result = result[0]\n return result\n\n def inverse(self, value):\n if not self.scaled():\n raise ValueError(\"Not invertible until scaled\")\n vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint\n\n if cbook.iterable(value):\n val = ma.asarray(value)\n val = 2 * (val - 0.5)\n val[val > 0] *= abs(vmax - midpoint)\n val[val < 0] *= abs(vmin - midpoint)\n val += midpoint\n return val\n else:\n val = 2 * (val - 0.5)\n if val < 0:\n return val * abs(vmin - midpoint) + midpoint\n else:\n return val * abs(vmax - midpoint) + midpoint\n\n im, CS = [], []\n if axs is None:\n axs = plt.subplot()\n\n if fill:\n extent = x.min(), x.max(), y.min(), y.max()\n im = axs.contourf(\n X,\n Y,\n d_grid,\n 50,\n vmin=vmin,\n vmax=vmax,\n cmap=cmap,\n norm=MidPointNorm(midpoint=midpoint),\n alpha=alpha,\n )\n\n axs.imshow(\n ls.hillshade(d_grid, vert_exag=ve, dx=1.0, dy=1.0),\n cmap=\"gray\",\n alpha=alphaHS,\n extent=extent,\n origin=\"lower\",\n )\n\n if contour > 0:\n CS = axs.contour(X, Y, d_grid, int(contour), colors=\"k\", linewidths=0.5)\n\n if clabel:\n plt.clabel(CS, inline=1, fontsize=10, fmt=\"%i\")\n return im, CS\n\n\ndef plotModelSections(\n mesh,\n m,\n normal=\"x\",\n ind=0,\n vmin=None,\n vmax=None,\n subFact=2,\n scale=1.0,\n xlim=None,\n ylim=None,\n vec=\"k\",\n title=None,\n axs=None,\n actv=None,\n contours=None,\n fill=True,\n orientation=\"vertical\",\n cmap=\"pink_r\",\n contourf=False,\n colorbar=False,\n):\n\n \"\"\"\n Plot section through a 3D tensor model\n \"\"\"\n # plot recovered model\n nC = mesh.nC\n\n if vmin is None:\n vmin = m[np.isnan(m) != True].min()\n\n if vmax is None:\n vmax = m[np.isnan(m) != True].max()\n\n if len(m) == 3 * nC:\n m_lpx = m[0:nC]\n m_lpy = m[nC : 2 * nC]\n m_lpz = m[2 * nC :]\n\n if actv is not None:\n m_lpx[actv != True] = np.nan\n m_lpy[actv != True] = np.nan\n m_lpz[actv != True] = np.nan\n\n amp = np.sqrt(m_lpx ** 2.0 + m_lpy ** 2.0 + m_lpz ** 2.0)\n\n m_lpx = (m_lpx).reshape(mesh.vnC, order=\"F\")\n m_lpy = (m_lpy).reshape(mesh.vnC, order=\"F\")\n m_lpz = (m_lpz).reshape(mesh.vnC, order=\"F\")\n amp = amp.reshape(mesh.vnC, order=\"F\")\n else:\n\n if actv is not None:\n m[actv != True] = np.nan\n\n amp = m.reshape(mesh.vnC, order=\"F\")\n\n xx = mesh.gridCC[:, 0].reshape(mesh.vnC, order=\"F\")\n zz = mesh.gridCC[:, 2].reshape(mesh.vnC, order=\"F\")\n yy = mesh.gridCC[:, 1].reshape(mesh.vnC, order=\"F\")\n\n if axs is None:\n fig, axs = plt.figure(), plt.subplot()\n\n if normal == \"x\":\n xx = yy[ind, :, :].T\n yy = zz[ind, :, :].T\n model = amp[ind, :, :].T\n\n if len(m) == 3 * nC:\n mx = m_lpy[ind, ::subFact, ::subFact].T\n my = m_lpz[ind, ::subFact, ::subFact].T\n\n elif normal == \"y\":\n xx = xx[:, ind, :].T\n yy = zz[:, ind, :].T\n model = amp[:, ind, :].T\n\n if len(m) == 3 * nC:\n mx = m_lpx[::subFact, ind, ::subFact].T\n my = m_lpz[::subFact, ind, ::subFact].T\n\n elif normal == \"z\":\n\n if actv is not None:\n actIndFull = np.zeros(mesh.nC, dtype=bool)\n actIndFull[actv] = True\n else:\n actIndFull = np.ones(mesh.nC, dtype=bool)\n\n actIndFull = actIndFull.reshape(mesh.vnC, order=\"F\")\n\n model = np.zeros((mesh.nCx, mesh.nCy))\n mx = np.zeros((mesh.nCx, mesh.nCy))\n my = np.zeros((mesh.nCx, mesh.nCy))\n for ii in range(mesh.nCx):\n for jj in range(mesh.nCy):\n\n zcol = actIndFull[ii, jj, :]\n model[ii, jj] = amp[ii, jj, np.where(zcol)[0][-ind]]\n\n if len(m) == 3 * nC:\n mx[ii, jj] = m_lpx[ii, jj, np.where(zcol)[0][-ind]]\n my[ii, jj] = m_lpy[ii, jj, np.where(zcol)[0][-ind]]\n\n xx = xx[:, :, ind].T\n yy = yy[:, :, ind].T\n model = model.T\n\n if len(m) == 3 * nC:\n mx = mx[::subFact, ::subFact].T\n my = my[::subFact, ::subFact].T\n\n im2, cbar = [], []\n if fill:\n if contourf:\n im2 = axs.contourf(xx, yy, amp, 10, vmin=vmin, vmax=vmax, cmap=cmap)\n else:\n if mesh.dim == 3:\n im2 = mesh.plotSlice(\n mkvc(amp),\n ind=ind,\n normal=normal.upper(),\n ax=axs,\n clim=[vmin, vmax],\n pcolorOpts={\"clim\": [vmin, vmax], \"cmap\": cmap},\n )[0]\n else:\n im2 = mesh.plotImage(\n mkvc(amp),\n ax=axs,\n clim=[vmin, vmax],\n pcolorOpts={\"clim\": [vmin, vmax], \"cmap\": cmap, \"alpha\": alpha},\n )[0]\n\n if colorbar:\n cbar = plt.colorbar(\n im2,\n orientation=orientation,\n ax=axs,\n ticks=np.linspace(vmin, vmax, 4),\n format=\"${%.3f}$\",\n shrink=0.5,\n )\n\n if contours is not None:\n axs.contour(xx, yy, model, contours, colors=\"k\")\n\n if len(m) == 3 * nC:\n\n axs.quiver(\n mkvc(xx[::subFact, ::subFact]),\n mkvc(yy[::subFact, ::subFact]),\n mkvc(mx),\n mkvc(my),\n pivot=\"mid\",\n scale_units=\"inches\",\n scale=scale,\n linewidths=(1,),\n edgecolors=(vec),\n headaxislength=0.1,\n headwidth=10,\n headlength=30,\n )\n\n axs.set_aspect(\"equal\")\n\n if xlim is not None:\n axs.set_xlim(xlim[0], xlim[1])\n\n if ylim is not None:\n axs.set_ylim(ylim[0], ylim[1])\n\n if title is not None:\n axs.set_title(title)\n\n return axs, im2, cbar\n\n\n# def vizCond(mesh, model, axs=None, normal = 'z', ind = 0, xlim=None, ylim=None, vmin=None, contours=None, fill=True, vmax=None,subFact=None, scale=1., savefig=False, cmap = 'jet_r', figname=\"Conductivity.png\"):\n\n\n# axs, im, cbar = plotModelSections(mesh, model, normal=normal,\n# ind=ind, axs=axs, cmap=cmap, subFact=subFact,\n# xlim=xlim, scale = scale, vec ='w',\n# ylim=ylim, contours=contours, fill=fill,\n# vmin=vmin, vmax=vmax)\n\n\n# if normal=='x':\n# axs.set_title(str(int(mesh.vectorCCx[ind])) + ' E')\n# # Add lakes and hydro\n# # for file in pline[:11]:\n# # trace = np.loadtxt(file, skiprows=1, delimiter=',')\n# # ax2.plot(trace[:,1], trace[:,2], 'k', ms=1)\n# # ax2.text(trace[0,1], trace[0,2],file[28:-4])\n\n# elif normal=='y':\n# axs.set_title(str(int(mesh.vectorCCy[ind])) + ' N')\n# # Add lakes and hydro\n# # for file in pline[11:]:\n# # trace = np.loadtxt(file, skiprows=1, delimiter=',')\n# # ax2.plot(trace[:,0], trace[:,2], 'k', ms=1)\n# # ax2.text(trace[0,0], trace[0,2],file[28:-4])\n\n# else:\n# axs.set_title('Depth: -' + str(np.sum(mesh.hz[-ind:-1])+mesh.hz[-ind]/2) + ' m')\n\n# return axs, im, cbar\n\n\ndef plotProfile(\n xyzd,\n a,\n b,\n npts,\n data=None,\n fig=None,\n ax=None,\n plotStr=\"k\",\n coordinate_system=\"local\",\n):\n \"\"\"\n Plot the data and line profile inside the spcified limits\n \"\"\"\n\n def linefun(x1, x2, y1, y2, nx, tol=1e-3):\n dx = x2 - x1\n dy = y2 - y1\n\n if np.abs(dx) <= tol:\n y = np.linspace(y1, y2, nx)\n x = np.ones_like(y) * x1\n elif np.abs(dy) <= tol:\n x = np.linspace(x1, x2, nx)\n y = np.ones_like(x) * y1\n else:\n x = np.linspace(x1, x2, nx)\n slope = (y2 - y1) / (x2 - x1)\n y = slope * (x - x1) + y1\n return x, y\n\n if fig is None:\n fig = plt.figure(figsize=(6, 9))\n\n plt.rcParams.update({\"font.size\": 14})\n\n if ax is None:\n ax = plt.subplot()\n\n x, y = linefun(a[0], b[0], a[1], b[1], npts)\n distance = np.sqrt((x - a[0]) ** 2.0 + (y - a[1]) ** 2.0)\n dline = griddata(xyzd[:, :2], xyzd[:, -1], (x, y), method=\"cubic\")\n\n if coordinate_system == \"xProfile\":\n distance += a[0]\n elif coordinate_system == \"yProfile\":\n distance += a[1]\n\n ax.plot(distance, dline, plotStr)\n\n if data is not None:\n\n # if len(plotStr) == len(data):\n for ii, d in enumerate(data):\n\n dline = griddata(xyzd[:, :2], d, (x, y), method=\"cubic\")\n\n if plotStr[ii]:\n ax.plot(distance, dline, plotStr[ii])\n else:\n ax.plot(distance, dline)\n\n ax.set_xlim(distance.min(), distance.max())\n\n # ax.set_xlabel(\"Distance (m)\")\n # ax.set_ylabel(\"Magnetic field (nT)\")\n\n # ax.text(distance.min(), dline.max()*0.8, 'A', fontsize = 16)\n # ax.text(distance.max()*0.97, out_linei.max()*0.8, 'B', fontsize = 16)\n # ax.legend((\"Observed\", \"Simulated\"), bbox_to_anchor=(0.5, -0.3))\n # ax.grid(True)\n\n return ax\n","sub_path":"geoapps/simpegPF/Utils/PlotUtils.py","file_name":"PlotUtils.py","file_ext":"py","file_size_in_byte":16909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"137584897","text":"# Dijkstra's algorithm for finding the shortest path in a graph\n# Programmed 2019-01-28 \n\ndef make_graph(file):\n try:\n f = open(file, 'r')\n except IOError:\n raise(\"File does not exist!\")\n\n line_list = f.readlines()\n\n # Found on mukeshmithrakumar github, thought this was clean..\n # populate the graph using data from the text file via dictionary comprehensions\n G = {int(line.split()[0]): {(int(tup.split(',')[0])): int(tup.split(',')[1])\n for tup in line.split()[1:] if tup} for line in line_list if line}\n\n f.close()\n return G\n\n\ndef dijkstra(G, start, end):\n shortest_distance = {}\n predecessor = {}\n unseenNodes = G\n infinity = float('inf')\n path = []\n\n for node in unseenNodes:\n shortest_distance[node] = infinity\n\n shortest_distance[start] = 0\n\n\n while unseenNodes:\n minNode = None\n\n # priority-queue? -> Hittar bästa noden hittils\n for node in unseenNodes:\n if minNode is None:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n\n\n for childNode, weight in G[minNode].items():\n if weight + shortest_distance[minNode] < shortest_distance[childNode]:\n shortest_distance[childNode] = weight + shortest_distance[minNode]\n predecessor[childNode] = minNode\n\n unseenNodes.pop(minNode)\n\n print(\"pred\")\n print(predecessor)\n currentNode = end\n\n while currentNode != start:\n try:\n path.insert(0, currentNode)\n currentNode = predecessor[currentNode]\n except KeyError:\n print('Path not reachable')\n break\n path.insert(0,start)\n\n return path, shortest_distance[end]\n\n\n\n\n#G = make_graph('dijkstraData.txt')\n\nG = {1:{2:10, 3:20},\n 2:{4:40},\n 3:{4:5},\n 4:{}}\nprint(f'Current graph is: {G}')\npath, shortest = dijkstra(G, 1, 4)\n\nprint(path)\nprint(shortest)\n","sub_path":"Algorithms/graphtheory/dijkstra/djikstra.py","file_name":"djikstra.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"555280093","text":"def hasNumber(string):\r\n for character in string:\r\n if character.isdigit():\r\n return True\r\n else:\r\n pass\r\n return False\r\n\r\ndef isNumber(integer):\r\n try:\r\n integer = int(integer)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\nabilities = [\"NONE\", \"STRENGTH\", \"SPEED\", \"HP\", \"ARMOR\"]\r\ntypes = [\"ONETIME\", \"POTION\", \"ARMOR\", \"WEAPON\", \"ADD ON\"]\r\n\r\nclass item: \r\n def fAbility(self):\r\n print(abilities)\r\n self.ability = input(\"Jaká je schopnost předmětu? > \")\r\n if self.ability.upper() not in abilities:\r\n print(\"ERROR - Schopnost není v nabídce. Prosím zkuste to znovu.\")\r\n self.fAbility()\r\n else:\r\n if self.ability.upper() == \"NONE\":\r\n self.bonus = 0\r\n pass\r\n else:\r\n self.bonus = input(\"Kolik dává schopnost bonus? (+) > \")\r\n while not isNumber(self.bonus):\r\n print(\"ERROR - Bonus musí býz celé číslo. Prosím zkuste to znovu.\")\r\n self.bonus = input(\"Kolik dává schopnost bonus? (+) > \")\r\n\r\n def createItem(self):\r\n self.name = input(\"Jméno předmětu > \")\r\n while hasNumber(self.name):\r\n print(\"ERROR - Jméno předmětu nesmí obsahovat číslo. Prosím zkuste to znovu.\")\r\n self.name = str(input(\"Jméno předmětu > \"))\r\n\r\n print(types)\r\n self.itemType = input(\"Typ předmětu > \")\r\n while self.itemType.upper() not in types:\r\n print(\"ERROR - Typ předmětu není v nabídce. Prosím zkuste to znovu.\")\r\n print(types)\r\n self.itemType = input(\"Typ předmětu > \") \r\n\r\n self.level = input(\"Úroveň předmětu > \")\r\n while not isNumber(self.level):\r\n print(\"ERROR - Úroveň předmětu musí být reprezentována celým číslem. Prosím zkuste to znovu.\")\r\n self.level = input(\"Úroveň předmětu > \")\r\n\r\n self.fAbility()\r\n\r\n def saveItem(self):\r\n itemsFile = open(\"items.txt\", \"a+\")\r\n itemsFile.write(\"${};\\ntype:{};\\nlevel:{};\\nability:{} +{}\\n\".format(self.name, self.itemType.upper(), self.level, self.ability.upper(), self.bonus))\r\n itemsFile.close()\r\n \r\nclass pickItem:\r\n def findItem(self, name):\r\n file = open(\"items.txt\", \"r\")\r\n allItems = file.read()\r\n sortedItems = allItems.split(\"$\")\r\n for i in sortedItems:\r\n if i.split(\";\")[0] == name:\r\n self.name = name\r\n self.type = i.split(\";\")[1].split(\":\")[1]\r\n self.lvl = int(i.split(\";\")[2].split(\":\")[1])\r\n rawAbility = i.split(\";\")[3].split(\":\")[1]\r\n self.ability = rawAbility.split(\" \")[0]\r\n self.bonus = int(rawAbility.split(\" \")[1])\r\n file.close()\r\n return True\r\n else:\r\n file.close()\r\n return False\r\n\r\n \r\nif __name__ == \"__main__\":\r\n addItem = item()\r\n addItem.createItem()\r\n addItem.saveItem()\r\n\r\n\r\n#item = pickItem()\r\n#if item.findItem(\"dýka\"):\r\n# print(item.ability)\r\n# print(item.bonus)\r\n#else:\r\n# print(\"item not found\")\r\n \r\n \r\n","sub_path":"items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"579199153","text":"\"\"\"Utility code for to synchronize the accounting ledger with transactions.\n\nRead the transaction data from the traditional Chez Bob database tables, and\ninsert daily aggregate amounts into the main finance ledger.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport datetime\n\nfrom decimal import Decimal\n\nfrom chezbob.finance.models import Account, Split, Transaction, DepositBalances\nfrom chezbob.cashout.models import Entity, CashOut\n\ndry_run = False\n\n# Keep this synchronized with any changes to the account list in the database.\nacct_bank = Account.objects.get(id=1)\nacct_cash = Account.objects.get(id=7)\nacct_deposits = Account.objects.get(id=2)\nacct_donations = Account.objects.get(id=12)\nacct_purchases = Account.objects.get(id=4)\nacct_social_restricted = Account.objects.get(id=21)\nacct_social_donations = Account.objects.get(id=20)\nacct_writeoff = Account.objects.get(id=13)\n\ncashout_entity_soda = Entity.objects.get(id=1)\ncashout_entity_box = Entity.objects.get(id=2)\n\n# A description of the transactions that should automatically be created to\n# reflect various types of activity. In each list, the first item is the\n# description, followed by a list of splits to include, as (multiplier,\n# account) pairs.\nauto_transactions = {\n 'deposit': [\n \"Deposits\", (+1, acct_cash), (-1, acct_deposits)],\n 'donate': [\n \"Donations\", (+1, acct_deposits), (-1, acct_donations)],\n 'purchase': [\n \"Purchases\", (+1, acct_deposits), (-1, acct_purchases)],\n 'socialhour': [\n \"Social Hour Donations\",\n (+1, acct_deposits), (-1, acct_social_donations),\n (+1, acct_social_restricted), (-1, acct_bank)],\n 'writeoff': [\n \"Debt Written Off\", (+1, acct_writeoff), (-1, acct_deposits)],\n 'refund': [\n \"Refunds\", (+1, acct_deposits), (-1, acct_cash)],\n}\n\n\ndef insert_transaction(date, type, amount):\n info = auto_transactions[type]\n\n print(\"Insert %s, %s\" % (info[0], amount))\n\n if dry_run:\n return\n\n t = Transaction(date=date, description=info[0], auto_generated=True)\n t.save()\n for i in info[1:]:\n amt = i[0] * amount\n s = Split(transaction=t, account=i[1], amount=amt)\n s.save()\n\n\n# A running total of Bank of Bob liabilities (in dollars)\nbob_liabilities = Decimal(\"0.00\")\n\n\ndef update_day(date, amounts):\n old_transactions = list(Transaction.objects.filter(date=date,\n auto_generated=True))\n\n # Should separated Bank of Bob liabilities (positive/negative) be\n # recomputed?\n update_bob_liabilities = False\n try:\n d = DepositBalances.objects.get(date=date)\n if d.positive - d.negative != bob_liabilities:\n update_bob_liabilities = True\n except DepositBalances.DoesNotExist:\n update_bob_liabilities = True\n\n for ty in sorted(amounts.keys()):\n info = auto_transactions[ty]\n\n # Search for an existing transaction, so we can update only if needed\n old = None\n for o in old_transactions:\n if o.description == info[0]:\n old = o\n break\n\n if old is not None:\n old_transactions.remove(old)\n\n # Compare the existing transaction with the actual amount needed.\n mismatch = False\n splits = list(old.split_set.all())\n needed_splits = info[1:]\n for s in splits:\n amt = s.amount\n if amt == amounts[ty]:\n factor = +1\n elif amt == -amounts[ty]:\n factor = -1\n else:\n mismatch = True\n break\n if (factor, s.account) in needed_splits:\n needed_splits.remove((factor, s.account))\n else:\n mismatch = True\n break\n if needed_splits != []:\n mismatch = True\n\n # If the transaction doesn't match what is needed, delete it. It\n # will then be recreated below.\n if amounts[ty] == 0 or mismatch:\n print(\"Deleting\", old)\n if not dry_run:\n old.delete()\n old = None\n\n # If we're updating transactions for this date, recompute Bank\n # of Bob balances as well, just to be safe\n update_bob_liabilities = True\n\n if old is None and amounts[ty] != 0:\n insert_transaction(date, ty, amounts[ty])\n update_bob_liabilities = True\n\n # Recompute positive/negative Bank of Bob balances and insert if needed\n if update_bob_liabilities:\n from django.db import connection\n cursor = connection.cursor()\n cursor.execute(\"\"\"SELECT SUM(balance)\n FROM (SELECT userid, SUM(xactvalue) AS balance\n FROM transactions WHERE xacttime::date <= %s\n GROUP BY userid) AS balances\n WHERE balance > 0\"\"\", (date,))\n positive = cursor.fetchone()[0]\n\n cursor.execute(\"\"\"SELECT -SUM(balance)\n FROM (SELECT userid, SUM(xactvalue) AS balance\n FROM transactions WHERE xacttime::date <= %s\n GROUP BY userid) AS balances\n WHERE balance < 0\"\"\", (date,))\n negative = cursor.fetchone()[0]\n\n print(\"Update balances summary: +%s -%s\" % (positive, negative))\n # FIXME: Deletion through the database API didn't seem to work (problem\n # with date as a primary key?)\n cursor.execute(\"DELETE FROM finance_deposit_summary WHERE date=%s\",\n (date,))\n d = DepositBalances(date=date, positive=positive, negative=negative)\n d.save()\n\n # If there were any transactions found that weren't matched at all, report\n # them\n if old_transactions:\n print(\"Unmatched transactions:\", old_transactions)\n\n\ndef sync_day(date):\n global bob_liabilities\n\n from django.db import connection\n cursor = connection.cursor()\n\n print(date)\n cursor.execute(\"\"\"SELECT xactvalue, xacttype FROM transactions\n WHERE xacttime::date = %s\"\"\", (date,))\n\n (sum_deposit, sum_donate, sum_purchase,\n sum_socialhour, sum_writeoff, sum_refund) = tuple([Decimal(\"0.00\")] * 6)\n\n for (amt, desc) in cursor.fetchall():\n bob_liabilities += amt\n category = desc.split()[0]\n if category in (\"INIT\", \"TRANSFER\", \"REIMBURSE\"):\n continue\n elif category == \"ADD\":\n sum_deposit += amt\n elif category == \"BUY\":\n sum_purchase -= amt\n elif category == \"DONATION\":\n sum_donate -= amt\n elif category == \"SOCIAL\":\n sum_socialhour -= amt\n elif category == \"WRITEOFF\":\n sum_writeoff += amt\n elif category == \"REFUND\":\n sum_refund -= amt\n elif category == \"WITHDRAW\": # Is the right response here?\n sum_deposit -= amt\n else:\n raise ValueError(\"Unknown transaction: \" + desc)\n\n update_day(date, {'deposit': sum_deposit,\n 'donate': sum_donate,\n 'purchase': sum_purchase,\n 'socialhour': sum_socialhour,\n 'writeoff': sum_writeoff,\n 'refund': sum_refund})\n\n\ndef sync():\n global bob_liabilities\n bob_liabilities = Decimal(\"0.00\")\n\n from django.db import connection\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"SELECT MIN(xacttime::date), MAX(xacttime::date)\n FROM transactions\"\"\")\n (start_date, end_date) = cursor.fetchone()\n\n date = start_date\n while date <= end_date:\n sync_day(date)\n date += datetime.timedelta(days=1)\n\n\ndef check_cash():\n from django.db import connection\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"SELECT MIN(xacttime::date) FROM transactions\"\"\")\n (last_date,) = cursor.fetchone()\n\n cursor.execute(\"\"\"SELECT sum(amount)\n FROM finance_splits s JOIN finance_transactions t\n ON (s.transaction_id = t.id)\n WHERE account_id = %s AND date < %s\"\"\",\n [acct_cash.id, last_date])\n (balance,) = cursor.fetchone()\n\n cash_deltas = {'soda': Decimal(\"0.00\"), 'chezbob': Decimal(\"0.00\")}\n\n print(\"Starting cash: %s on %s\" % (balance, last_date))\n print()\n\n source_totals = {}\n for cashout in CashOut.objects.filter(\n datetime__gte=last_date).order_by('datetime'):\n print(cashout)\n cursor.execute(\"\"\"SELECT source, sum(xactvalue)\n FROM transactions\n WHERE (xacttype = 'ADD' OR xacttype = 'REFUND')\n AND xacttime >= %s AND xacttime < %s\n GROUP BY source\"\"\",\n [last_date, cashout.datetime])\n for (source, amt) in cursor.fetchall():\n print(\" Deposit: %s (%s)\" % (amt, source))\n balance += amt\n if source is not None:\n source_totals[source] = source_totals.get(\n source, Decimal(\"0.00\")) + amt\n\n cursor.execute(\"\"\"SELECT sum(amount)\n FROM finance_splits s JOIN finance_transactions t\n ON (s.transaction_id = t.id)\n WHERE account_id = %s AND NOT auto_generated\n AND date::timestamp >= %s\n AND date::timestamp < %s\"\"\",\n [acct_cash.id, last_date, cashout.datetime])\n (other,) = cursor.fetchone()\n if other is None:\n other = Decimal(\"0.00\")\n balance += other\n print(\" Other: %s\" % (other,))\n\n cashcount = False\n for c in cashout.cashcount_set.all():\n if c.entity in (\n cashout_entity_soda, cashout_entity_box) and c.total > 0:\n print(\" Cash Count: %s (%s)\" % (c.total, c.entity.name))\n cashcount = True\n if c.entity == cashout_entity_soda:\n cash_deltas['soda'] += c.total\n else:\n cash_deltas['chezbob'] += c.total\n if cashcount:\n print(\" Expected:\")\n for (s, t) in source_totals.items():\n print(\" %s %s\" % (t, s))\n if s not in cash_deltas:\n cash_deltas[s] = Decimal(\"0.00\")\n cash_deltas[s] -= t\n source_totals.clear()\n\n print(\" Cumulative Errors:\")\n for (s, t) in cash_deltas.items():\n print(\" %s %s\" % (t, s))\n\n if abs(balance) >= 20:\n print(\"**********\")\n print(\" BALANCE: %s\" % (balance,))\n\n last_date = cashout.datetime\n","sub_path":"django/chezbob/util/sync_db.py","file_name":"sync_db.py","file_ext":"py","file_size_in_byte":10980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"279639234","text":"import configparser\nimport requests\nimport json\nimport datetime\nimport time\n\n# function to send real_time data to PowerBI\ndef send_powerbi(powerbi_url, metric, value):\n data = {}\n data['datetime'] = datetime.datetime.now().isoformat(timespec='seconds')\n data['metrics'] = metric\n data['value'] = value\n response = requests.post(powerbi_url, data)\n return response\n\n\nconfig = configparser.ConfigParser(interpolation=None)\n# set interpolation to none to avoid interpreting % in URL as reference variable\n# if not set, error will be raised due to %2 and %3D in POWERBI_URL\n\nconfig.read('config.ini')\nprint('sections:', config.sections())\n\nmain_config = config['main']\nfor key in main_config:\n print(key, main_config.get(key))\n\nkafka_jmx = main_config.get('kafka_jmx')\nprint(kafka_jmx)\n\nr = requests.get(kafka_jmx)\nprint(r.status_code)\n\npowerbi_url = main_config.get('powerbi_url')\nprint(powerbi_url)\n\"\"\" # test send_powerbi() function: send 1 data points 5seconds apart to PowerBI\nfor i in range(1,11):\n send_powerbi(powerbi_url, metric='mem_used', value=i)\n time.sleep(1) \"\"\"\n\n\npayload = {}\npayload['datetime'] = datetime.datetime.now().isoformat(timespec='seconds')\npayload['metrics'] = 'mem used'\npayload['value'] = 6\n#data = json.dumps(data)\npayload = [payload]\nprint(payload)\n\nresponse = requests.post(powerbi_url, json=payload)\nprint(response.status_code)\nprint(response.text)\n\n\n\n","sub_path":"test-pbi-singlePOST.py","file_name":"test-pbi-singlePOST.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"363972943","text":"# Nelder Mead Algorithm - Introduction to derivative free optimisation\n# chapter 8\nimport numpy as np\nimport timeit\nimport logging\nfrom solver import Solver\nfrom evalpoint import EvalPoint\n\n\nclass NelderMead(Solver):\n\n def __init__(self, problem, simplex, **kwargs):\n \"\"\" **kwargs contains the arguments of the stopping criteria\n max fun eval\n max time\n tolerance. \"\"\"\n self.__problem = problem\n self.__nvar = problem.nvar\n self.__simplex = simplex\n self.__feval = 0\n self.__time = timeit.default_timer()\n self.__rho = simplex.volume\n super(NelderMead, self).__init__('Nelder Mead')\n logger_name = kwargs.get('logger_name', None)\n self.__logger = logging.getLogger(logger_name)\n self.__logger.addHandler(logging.NullHandler())\n\n @property\n def feval(self):\n \"Return the number of function evaluations.\"\n return self.__feval\n\n @feval.setter\n def feval(self, f):\n \"Set the number of function evaluations to f.\"\n self.__feval = f\n\n @property\n def time(self):\n \"Return the cpu time.\"\n return self.__time\n\n @time.setter\n def time(self, t):\n \"Set the cpu time to t.\"\n self.__time = t\n\n @property\n def rho(self):\n \"Return the rho of the simplex.\"\n return self.__rho\n\n @rho.setter\n def rho(self, v):\n \"Set the rho of the simplex to v.\"\n self.__rho = v\n\n @property\n def problem(self):\n \"Return the problem.\"\n return self.__problem\n\n @property\n def nvar(self):\n \"Return the number of variables. \"\n return self.problem.nvar\n\n @nvar.setter\n def nvar(self, n):\n \"Set the number of variables.\"\n self.problem.nvar = n\n\n @property\n def simplex(self):\n \"Return the simplex.\"\n return self.__simplex\n\n @simplex.setter\n def simplex(self, S):\n \"Set the simplex.\"\n self.__simplex = S\n\n def order_vertices(self):\n \"Sorts the vertices of the simplex according the their values. \"\n self.__logger.debug('--- Sort vertices ---')\n order_val = {}\n for i in xrange(self.nvar + 1):\n self.problem.eval(self.simplex.vertices[i])\n val = self.simplex.vertices[i].output\n if val in order_val.keys():\n order_val[val].append(self.simplex.vertices[i])\n else:\n order_val[val] = [self.simplex.vertices[i]]\n values = sorted(order_val)\n S = []\n for i in xrange(len(values)):\n list_temp = order_val[values[i]]\n S = S + list_temp\n self.simplex.vertices = S\n\n def reflect(self, y):\n \" Replace last point of the simplex by y. \"\n self.__logger.debug('--> Reflect ')\n if isinstance(y, EvalPoint):\n self.simplex.vertices[-1] = y\n return\n self.simplex.vertices[-1] = EvalPoint(np.array(y))\n return\n\n def expand(self, delta_e, y_c, y_r, y_n, f_r):\n self.__logger.debug('--> Expand ')\n y_e = y_c + delta_e*(y_c - y_n)\n evalp_e = EvalPoint(np.array(y_e))\n self.problem.eval(evalp_e)\n f_e = evalp_e.output\n self.__logger.debug('%7.4e' % f_e)\n if f_e <= f_r:\n self.reflect(y_e)\n else:\n self.reflect(y_r)\n\n def outside_contract(self, y_c, y_n, delta_oc, f_r):\n self.__logger.debug('--> Outside contraction')\n y_oc = y_c + delta_oc*(y_c - y_n)\n evalp_oc = EvalPoint(np.array(y_oc))\n self.problem.eval(evalp_oc)\n f_oc = evalp_oc.output\n if f_oc <= f_r:\n self.reflect(y_oc)\n else:\n self.shrink()\n\n def inside_contract(self, y_c, y_n, delta_ic, f_n):\n self.__logger.debug('--> Inside contraction')\n y_ic = y_c + delta_ic*(y_c - y_n)\n evalp_ic = EvalPoint(np.array(y_ic))\n self.problem.eval(evalp_ic)\n f_ic = evalp_ic.output\n if f_ic < f_n:\n self.reflect(y_ic)\n else:\n self.shrink()\n\n def shrink(self):\n self.__logger.debug('--> Shrink ')\n gamma_s = 0.5\n y_0 = self.simplex.vertices[0].input\n S = [(y_0 + gamma_s*(x.input - y_0)) for x in self.simplex.vertices]\n self.simplex.vertices = S\n\n def solve(self, **kwargs):\n\n # Initialization\n self.__logger.debug(' -----------------> Starting Nelder Mead')\n self.time = timeit.default_timer()\n t0 = self.time\n delta_ic = -0.5\n delta_oc = 0.5\n delta_r = 1\n delta_e = 2\n\n # Order the vertices\n self.order_vertices()\n\n # Check stopping criteria\n self.time = timeit.default_timer() - self.time\n (stop, flag) = super(NelderMead, self).check_termination(**kwargs)\n self.__logger.debug('stop = %s' % stop)\n self.__logger.debug('flag = %s' % flag)\n\n while not stop:\n Y = self.simplex.vertices\n y_n = Y[-1].input\n self.__logger.debug(' compute centroid ')\n y_c = self.simplex.centroid.input\n y_r = [y_c[i] + delta_r*(y_c[i] - y_n[i])\n for i in xrange(len(y_c))]\n evalp_r = EvalPoint(np.array(y_r))\n self.problem.eval(evalp_r)\n f_r = evalp_r.output\n self.problem.eval(Y[0])\n f_0 = Y[0].output\n self.problem.eval(Y[-2])\n f_n_1 = Y[-2].output\n self.feval += 3\n\n # Choose the transformation to apply to the simplex\n if (f_0 <= f_r) and (f_r < f_n_1):\n self.reflect(y_r)\n if f_r < f_0:\n self.expand(delta_e, y_c, y_r, y_n, f_r)\n self.feval += 1\n if f_n_1 <= f_r:\n self.problem.eval(Y[-1])\n f_n = Y[-1].output\n if f_r < f_n:\n self.outside_contract(y_c, y_n, delta_oc, f_r)\n else:\n self.inside_contract(y_c, y_n, delta_ic, f_n)\n self.feval += 2\n\n # Check stopping criteria\n self.rho = self.simplex.volume\n self.time = timeit.default_timer() - t0\n self.__logger.info('Feval : %d, Vol : %7.4e Func_val : %7.4e'\n % (self.feval, self.rho, f_0))\n self.order_vertices()\n (stop, flag) = super(NelderMead, self).check_termination(**kwargs)\n\n self.__logger.info(' \\n flag = %s' % flag)\n return self.simplex.vertices[0]\n","sub_path":"DFO/dfo/neldermead.py","file_name":"neldermead.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"520851817","text":"from django.http import JsonResponse\nfrom .models import Todo\nfrom .forms import TodoForm\nfrom .serializers import TodoSerializer\nfrom rest_framework.decorators import api_view\n\n@api_view(['GET'])\ndef fetchAllTodos(req):\n objs = list(Todo.objects.filter(user=req.user))\n objs.sort(key=lambda x: x.category, reverse=True)\n todos = []\n for obj in objs:\n todos.append(TodoSerializer(obj).data)\n return JsonResponse(todos, safe=False)\n\n@api_view(['GET'])\ndef fetchTodo(req, id):\n try:\n todo = Todo.objects.get(id=id)\n return JsonResponse(TodoSerializer(todo).data)\n except:\n return JsonResponse({'msg' : 'No data found'})\n\n@api_view(['POST'])\ndef addTodo(req):\n if(req.method == \"POST\"):\n form = TodoForm(req.POST)\n print(form.is_valid())\n if form.is_valid():\n instance = form.save(commit = False)\n instance.user = req.user\n instance.save()\n return JsonResponse({'msg' : 'Submission succeeded'})\n return JsonResponse({'msg' : 'Submission failed'})\n\n@api_view(['POST'])\ndef deleteTodos(req):\n if(req.method == \"POST\"):\n ids = req.POST.get('list', False)\n\n if not ids:\n return JsonResponse({'msg' : 'ID is not provided'})\n \n ids = ids.split(',')\n db = list(Todo.objects.values_list('id', flat=True))\n count = 0\n try:\n for id in ids:\n if int(id) in db:\n todo = Todo.objects.get(id=id)\n todo.delete()\n count += 1\n return JsonResponse({'msg' : f'{count} item(s) deleted'})\n except:\n return JsonResponse({'msg' : 'Deletion Failed'})\n\n@api_view(['POST'])\ndef updateTodo(req, id):\n try:\n if(req.method == \"POST\"):\n todo = Todo.objects.get(id=id)\n form = TodoForm(req.POST, instance = todo) \n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = req.user\n instance.save()\n return JsonResponse({'msg' : 'Updation succeeded'})\n return JsonResponse({'msg' : 'All fields are required'})\n except:\n return JsonResponse({'msg' : 'Updation failed'})\n","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"199003912","text":"from collections import deque\r\ninput = open(0).readline\r\n\r\n# dfs 를 쓸 때 필요함\r\n# sys.setrecursionlimit(10000)\r\n\r\ndx, dy = [1, 0, -1, 0], [0, 1, 0, -1]\r\n\r\ndef dfs(x, y):\r\n global graph, visited\r\n \r\n if not (0 <= x \", closeSettings)\n\n # настройки направления\n def bc(b, i):\n global r, l, u, d\n for e in b:\n e.config(relief=RAISED)\n b[i].config(relief=SUNKEN)\n if i == 0:\n self._settings[\"image\"] = r\n self._settings[\"vectors\"] = [1, 0]\n elif i == 3:\n self._settings[\"image\"] = l\n self._settings[\"vectors\"] = [-1, 0]\n elif i == 2:\n self._settings[\"image\"] = u\n self._settings[\"vectors\"] = [0, -1]\n elif i == 1:\n self._settings[\"image\"] = d\n self._settings[\"vectors\"] = [0, 1]\n self.clear()\n bf = Frame(self._settings[\"window\"])\n Label(bf, text=\"Направление: \").pack(side=LEFT)\n buts = []\n buts.append(Button(bf, image=r, relief=SUNKEN, command=lambda: bc(buts, 0)))\n buts.append(Button(bf, image=d, command=lambda: bc(buts, 1)))\n buts.append(Button(bf, image=u, command=lambda: bc(buts, 2)))\n buts.append(Button(bf, image=l, command=lambda: bc(buts, 3)))\n for b in buts:\n b.pack(side=\"left\")\n bf.pack(anchor=W)\n # координаты\n posf = Frame(self._settings[\"window\"])\n Label(posf, text=\"Позиция: (\").pack(side=LEFT)\n xie = PosEntry(posf, \"1\", width=2, from_=1, to=30)\n self._settings[\"x\"] = xie.get\n xie.pack(side=LEFT)\n Label(posf, text=\"; \").pack(side=LEFT)\n yie = PosEntry(posf, \"1\", width=2, from_=1, to=30)\n self._settings[\"y\"] = yie.get\n yie.pack(side=LEFT)\n Label(posf, text=\")\").pack(side=LEFT)\n posf.pack(anchor=W)\n # размер поля\n sizef = Frame(self._settings[\"window\"])\n Label(sizef, text=\"Размер поля: \").pack(side=LEFT)\n se = ValidEntry(sizef, self._settings[\"N\"](), width=2, from_=3, to=30)\n self._settings[\"N\"] = se.get\n se.pack(side=LEFT)\n sizef.pack(anchor=W)\n #-#-#-#-#-#-#-#-#-#-#-#\n self._settings[\"window\"].geometry(\"+{}+{}\". format(x, y))\n self._settings[\"window\"].deiconify()\n self._settings[\"window\"].focus_set()\n\n def execute(self, timeout=False):\n self._iter = False\n if(timeout is not False):\n self.timeout = timeout\n code.config(state=DISABLED, relief=FLAT, bg=\"gray95\")\n self.clear()\n if self._parse():\n self._iter = True\n try:\n self._mainexec(self._pcode[\"main\"])\n except RuntimeError as e:\n showerror(\"Ошибка!\", e)\n code.config(state=NORMAL, relief=SUNKEN, bg=\"white\")\n self._iter = False\n return True\n\n def __onespacing(self):\n \"delete all useless space and tabulations\"\n end=code.index(END)\n newcode=[]\n for i in range(1,int(float(end))):\n i=str(i)\n newcode.append(code.get(i+\".0\",i+\".end\").strip())\n for i in range(len(newcode)):\n if newcode[i].find(\" \")>0:\n tc=newcode[i].split(\" \")\n tc=filter(lambda x: x!=\"\",tc)\n tc=\" \".join(tc)\n newcode[i]=tc\n newcode=list(filter(lambda x: x!=\"\",newcode))\n return \"\\n\".join(newcode)\n\n def _parse(self):\n tcode=self.__onespacing()\n maincode,procedures=tcode.partition(\"кон\")[::2]\n maincode,procedures=list(filter(lambda x: x!=\"\",maincode.partition(\"нач\\n\")[2].split(\"\\n\"))),list(map(lambda x: x.split(\"\\n\"),procedures.split(\"конец\")[:-1]))\n self._pcode={'main':maincode}\n pproc={}\n for proc in procedures:\n proc=list(filter(lambda x:x!=\"\",proc))\n t=proc[0].split(\" \")\n if t[0] == \"процедура\" and t[1]!=\"\" and len(t) == 2:\n pproc[proc[0][10:]]=proc[1:]\n else:\n showerror(\"Ошибка!\", \"Неправильно определение процедуры (\"+proc[0]+\")\")\n return False\n self._pcode[\"procedures\"]=pproc\n return True\n\n def _mainexec(self,cmds):\n i=0\n while i0:\n l-=1\n tcmds.append(tmpcmd)\n else:\n cond=cmds[i][3:].rstrip()\n if cond == \"пока не стена\":\n cond=True\n j=float('Inf')\n elif cond == \"пока стена\":\n cond=False\n j=float('Inf')\n elif cond.endswith(\"раз\") or cond.endswith(\"раза\"):\n try:\n j=int(cond[:-4])\n cond=None\n except ValueError:\n raise RuntimeError(\"Неверное условие \\\"\"+cond+\"\\\"\")\n return False\n else:\n raise RuntimeError(\"Неверное условие \\\"\"+cond+\"\\\"\")\n return False\n ti=0\n while ti0:\n l-=1\n tcmds.append(tmpcmd)\n else:\n elseindex=False\n if tcmds.count(\"иначе\")>0:\n elseindex=tcmds.index(\"иначе\")\n if self.__wallcheck() == cond:\n if elseindex is not False:\n self._mainexec(tcmds[:elseindex])\n else:\n self._mainexec(tcmds)\n elif elseindex is not False:\n self._mainexec(tcmds[elseindex+1:])\n break\n else:\n tcmds.append(tmpcmd)\n i+=len(tcmds)+2\n else:\n print(cmds[i])\n if not self._cmdexec(cmds[i]):\n return False\n i+=1\n return True\n\n def __wallcheck(self):\n x,y=c.coords(\"arrow\")\n n=self._settings[\"N\"]()\n if 0<=x+CELLSIZE*vectors[0] version:\n if \"yes\" == askquestion(\"Скачать обновление?\",\"Доступна новая версия. Скачать?\"):\n from os import system\n t = Thread(target=system, args=('start \"updating...\" /D updater updater.exe '+version,))\n t.daemon = True\n t.start()\n win.destroy()\n else:\n showinfo(\"Обновление\",\"Вы используете последнюю версию!\");\n\n\ndef maincode():\n code.insert(\n END,\n \"\"\"алг название\nкомментарий\nнач\n...\nкон\n\nпроцедура название\n...\nконец\"\"\")\n\n# Настройка окна\nwin = Tk()\nwin.title('ГрИ \"Персонаж\" - '+version)\nwin.minsize(1000, 660)\nwin.iconbitmap(bitmap=\"resources/icon.ico\")\n# Персонаж\narrim = Image.open(\"resources/sprite.png\")\nu = ImageTk.PhotoImage(arrim.crop((0, 0, 16, 28)))\nr = ImageTk.PhotoImage(arrim.crop((0, 28, 16, 2*28)))\nd = ImageTk.PhotoImage(arrim.crop((0, 28*2, 16, 28*3)))\nl = ImageTk.PhotoImage(arrim.crop((0, 28*3, 16, 28*4)))\nctrl = CTRL()\n# Настройка меню\nmain_menu = Menu(win) # Главное меню\nfile = Menu(main_menu, tearoff=0)\nfile.add_command(label=\"Открыть\", command=lambda: ctrl.file(False))\nfile.add_command(label=\"Сохранить\", command=lambda: ctrl.file(True))\nmain_menu.add_cascade(label=\"Файл\", menu=file)\ndebug=Menu(main_menu,tearoff=0) #меню отладки\ndebug.add_command(label=\"Медленно\", command=lambda: ctrl._iter or Thread(name=\"exec\",target=ctrl.execute,kwargs={\"timeout\":0.3}).start())\ndebug.add_command(label=\"Средне\",command=lambda: ctrl._iter or Thread(name=\"exec\",target=ctrl.execute,kwargs={\"timeout\":0.15}).start())\ndebug.add_command(label=\"Быстро\",command=lambda: ctrl._iter or Thread(name=\"exec\",target=ctrl.execute,kwargs={\"timeout\":0.075}).start())\ndebug.add_command(label=\"Мгновенно\",command=lambda: ctrl._iter or Thread(name=\"exec\",target=ctrl.execute,kwargs={\"timeout\":0}).start())\ndebug.add_command(label=\"Остановить\", command=ctrl.stop)\nmain_menu.add_cascade(label=\"Отладка\", menu=debug)\nmain_menu.add_command(label=\"Настройки\", command=ctrl.settings)\nmain_menu.add_command(label=\"Что нового\", command=wnew)\nmain_menu.add_command(label=\"Обновление\", command=lambda: Thread(name=\"update\",target=checkver).start())\nthanks = Menu(main_menu, tearoff=0) #ссылки на страницы вк тестиров =D\nthanks.add_command(label=\"SerYy1\", command=lambda: webbrowser_open('http://vk.com/skryabinsergey'))\nthanks.add_command(label=\"ussay0909\", command=lambda: webbrowser_open('http://vk.com/id99365586'))\nthanks.add_command(label=\"Der Ratte\")\nthanks.add_command(label=\"Здесь можете быть и Вы\",command=lambda: showinfo(\"Информация\",\"\"\"Принимайте участие в тестировании программы, и Ваш ник и ссылка на персональные данные(по надобности) появится в этом списке. Для участия в тестировании просто отправляйте отзывы, предложения и найденые Вами баги через меню \"Обратная связь\". Желаем успехов!\"\"\"))\nmain_menu.add_cascade(label=\"Благодарности\", menu=thanks)\nfb = Feedback()\nmain_menu.add_command(label=\"Обратная связь\", command=fb.create_window)\nwin.config(menu=main_menu)\n# Кнопки\ncontrols = Frame(win, width=10)\nct = BooleanVar()\nCheckbutton(controls,text=\"Управление\",variable=ct).pack(anchor=\"w\")\nstep=Button(controls,text=\"шаг\",command=lambda : not ct.get() and ctrl.code(\"шаг\") or ctrl.go(True))\nskip=Button(controls,text=\"прыжок\",command=lambda : not ct.get() and ctrl.code(\"прыжок\") or ctrl.go(False))\nright=Button(controls,text=\"вправо\",command=lambda : not ct.get() and ctrl.code(\"вправо\") or ctrl.rotate(True))\nleft=Button(controls,text=\"влево\",command=lambda : not ct.get() and ctrl.code(\"влево\") or ctrl.rotate(False))\nproc=Button(controls,text=\"процедура\",command=lambda:ctrl.code(\"процедура ...\\n \\nконец\"))\ncycle=Button(controls,text=\"цикл\")\ncycle_menu=Menu(win, tearoff=0)\ncycle_menu.add_command(label=\"пока (не) стена\",command=lambda:ctrl.code(\"нц пока не стена\\n \\nкц\"))\ncycle_menu.add_command(label=\"повторить N раз\",command=lambda:ctrl.code(\"нц N раз\\n \\nкц\"))\ncycle.bind(\"\",lambda e: cycle.tk.call(\"tk_popup\", cycle_menu, e.x_root, e.y_root))\nifelse=Button(controls,text=\"условие\",command=lambda:ctrl.code(\"если не стена\\n \\nиначе\\n \\nвсё\"))\nclearAll=Button(controls,fg=\"red\",text=\"очистить всё\",command=lambda:(ctrl.clear(),code.delete(\"1.0\",\"end\"),maincode()))\nstep.pack(fill=X)\nskip.pack(fill=X)\nright.pack(fill=X)\nleft.pack(fill=X)\nifelse.pack(fill=X)\ncycle.pack(fill=X)\nproc.pack(fill=X)\nLabel(controls).pack()\nclearAll.pack(fill=X)\ncontrols.grid(row=0, column=0)\n# Поле ввода кода\ncodefield = Frame(win)\ncode = Text(\n codefield,\n width=25,\n font=\"Verdana 12\",\n foreground=\"red\",\n maxundo=10,\n undo=True\n )\nhler = Highlight(code,{\n \"алг\":(\n {\n 'post':{\n 'until':'\\n',\n 'style':{'font':'Verdana 12 underline','foreground':'brown'}\n },\n 'pre':None,\n 'style':{'font':'Verdana 12 bold','foreground':'blue'}\n },\n False\n ),\n \"нач\":(\n {\n 'post':None,\n 'pre':{\n 'until':'алг',\n 'style':{'font':'Verdana 12 italic','foreground':'gray'}\n },\n 'style':{'font':'Verdana 12 bold','foreground':'green'}\n },\n False\n ),\n \"кон\":(\n {\n 'post':None,\n 'pre':None,\n 'style':{'font':'Verdana 12 bold','foreground':'green'}\n },\n False\n ),\n (\"шаг\",\"прыжок\",\"влево\",\"вправо\"):(\n {\n 'post':None,\n 'pre':None,\n 'style':{'foreground':'#0000CD'}\n },\n True\n ),\n \"нц\":(\n {\n 'post':{\n 'until':'\\n',\n 'style':{'font':'Verdana 12 italic','foreground':'#FF00AE'}\n },\n 'pre':None,\n 'style':{'foreground':'#FF00AE'}\n },\n True\n ),\n \"кц\":(\n {\n 'post':None,\n 'pre':None,\n 'style':{'foreground':'#FF00AE'}\n },\n True\n ),\n \"если\":(\n {\n 'post':{\n 'until':'\\n',\n 'style':{'foreground':'purple','font':'Verdana 12 italic'}\n },\n 'pre':None,\n 'style':{'foreground':'purple'}\n },\n True\n ),\n (\"иначе\",\"всё\",\"все\"):(\n {\n 'post':None,\n 'pre':None,\n 'style':{'foreground':'purple'}\n },\n True\n ),\n \"процедура\":(\n {\n 'pre':None,\n 'post':{\n 'until':'\\n',\n 'style':{'font':'Verdana 12 underline','foreground':'brown'}\n },\n 'style':{'foreground':'#FF8C00','font':'Verdana 12 bold'}\n },\n True\n ),\n \"конец\":(\n {\n 'pre':None,\n 'post':None,\n 'style':{'foreground':'#FF8C00','font':'Verdana 12 bold'}\n },\n True\n )\n})\nmaincode()\nhler.startAutoHL()\nscr = Scrollbar(codefield, command=code.yview)\ncode.configure(yscrollcommand=scr.set)\ncode.pack(side=LEFT, fill=BOTH)\nscr.pack(side=RIGHT, fill=Y)\ncode.bind(\"\", lambda e: code.event_generate(\"<>\"))\ncode.bind(\"\", lambda e: code.event_generate(\"<>\"))\ncode.bind(\"\", lambda e: code.event_generate(\"<>\"))\ncode.bind(\"\", lambda e: code.event_generate(\"<>\"))\ncode.bind(\n \"\",\n lambda e: code.event_generate(\"<>\")\n)\n# code.bind(\"\",lambda e:hler.highlight())\n# code.bind(\"\",lambda e:hler.highlight())\ncodefield.grid(row=0, column=1, sticky=N+S)\n# Сцена для стрелки\nscene = Frame(win)\nc = Canvas(scene, bg=\"white\", cursor=\"tcross\")\nCELLSIZE = None\n\n\ndef drawGrid():\n global CELLSIZE\n n = int(ctrl._settings[\"N\"]())\n CELLSIZE = (640)//n\n size = CELLSIZE*n\n c.config(width=size, height=size)\n c.delete(\"grid\")\n for i in range(n+1):\n c.create_line(i*CELLSIZE, 0, i*CELLSIZE, size, tag=\"grid\")\n c.create_line(0, i*CELLSIZE, size, i*CELLSIZE, tag=\"grid\")\n c.move(\"grid\", 2, 2)\n c.lower(\"grid\")\n c.itemconfig(\"grid\", fill=\"black\", width=2)\ndrawGrid()\nvectors = [1, 0]\nc.create_image(CELLSIZE//2, CELLSIZE//2+2, image=r, tag=\"arrow\")\nc.pack()\nscene.grid(row=0, column=2)\n# CopyRight\ncopyright = Frame(win)\nLabel(\n copyright,\n text=\"\\u00A9 Maxsior, 2016.\",\n font=(\"Arial\", 10)\n).pack(side=LEFT)\nlink = Label(\n copyright,\n text=\"http://maxsior.e3w.biz/\",\n fg=\"blue\",\n font=(\"Arial\", 10, \"underline\"),\n cursor=\"hand2\"\n )\nlink.bind(\"\", lambda e: webbrowser_open(\"http://maxsior.e3w.biz/\"))\nlink.pack(side=LEFT)\ncopyright.grid(row=1, columnspan=3)\n# запуск\nwin.mainloop()\n","sub_path":"Pers.py","file_name":"Pers.py","file_ext":"py","file_size_in_byte":27779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"302133985","text":"#!/usr/bin/python3\nprint(\"Content-type:text/html\")\nprint()\n\nimport pymysql\nimport confighelper\n\n\nclass PyMysqlHelper:\n \"\"\"数据库查询类\"\"\"\n\n DATABASE_HOST = confighelper.get_config(\"database\", \"database_host\")\n DATABASE_NAME = confighelper.get_config(\"database\", \"database_name\")\n DATABASE_PORT = confighelper.get_config(\"database\", \"database_port\")\n DATABASE_USER = confighelper.get_config(\"database\", \"database_user\")\n DATABASE_PASSWORD = confighelper.get_config(\"database\", \"database_password\")\n DATABASE_CHARSET = confighelper.get_config(\"database\", \"database_charset\")\n\n def __init__(self, p_dbname=None, p_dbhost=None):\n if p_dbname is None:\n self._dbname = self.DATABASE_NAME\n else:\n self._dbname = p_dbname\n if p_dbhost is None:\n self._dbhost = self.DATABASE_HOST\n else:\n self._dbhost = p_dbhost\n\n self._dbuser = self.DATABASE_USER\n self._dbpawd = self.DATABASE_PASSWORD\n self._dbcharset = self.DATABASE_CHARSET\n self._dbport = int(self.DATABASE_PORT)\n self._conn = self.get_con()\n if self._conn:\n self._cursor = self._conn.cursor()\n\n def get_con(self):\n \"\"\"获取数据连接\n :return:connect -> conn\n \"\"\"\n try:\n conn = pymysql.connect(host=self._dbhost,\n user=self._dbuser,\n passwd=self._dbpawd,\n db=self._dbname,\n port=self._dbport,\n charset=self._dbcharset)\n except pymysql.Error as e:\n print(\"database error:%s\" % e)\n conn = False\n return conn\n\n def select(self, sql):\n \"\"\"\n 查询方法,使用con.cursor(MySQLdb.cursors.DictCursor),返回结果为字典\n :param sql:sql是查询语句\n :return:返回查询结果\n \"\"\"\n try:\n self._cursor.execute(sql)\n res = self._cursor.fetchall()\n except pymysql.Error as e:\n print(\"database error:%s\" % e)\n res = False\n return res\n\n def update_by_param(self, sql, params):\n \"\"\"\n :带参数的更新方法,eg:sql='insert into pythontest values(%s,%s,%s,now()',params=(6,'C#','good book')\n :param sql:\n :param params:\n :return:\n \"\"\"\n try:\n self._cursor.execute(sql, params)\n self._conn.commit()\n flag = True\n except pymysql.Error as e:\n self._conn.rollback()\n flag = False\n print(\"database error:%s\" % e)\n return flag\n\n def update(self, sql):\n \"\"\"\n :不带参数的更新方法\n :param sql:\n :return:\n \"\"\"\n try:\n self._cursor.execute(sql)\n self._conn.commit()\n flag = True\n except pymysql.Error as e:\n self._conn.rollback()\n flag = False\n print(\"database error:%s\" % e)\n return flag\n\n def close(self):\n \"\"\"\n :关闭数据库\n :return:\n \"\"\"\n if self._conn:\n try:\n if type(self._cursor) == 'object':\n self._cursor.close()\n if type(self._conn) == 'object':\n self._conn.close()\n except pymysql.Error as e:\n print(\"database error:%s\" % e)\n","sub_path":"py/pymysqlhelper.py","file_name":"pymysqlhelper.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"260956908","text":"\"\"\"tag a test file with various types of trained networks\nUsage:\n bilstmPredict.py [--vocab=] [--score_input]\n\nOptions:\n the representation of the input, one of a,b,c,d\n the base name of the file where the model is stored\n the name of the input file. if the name resolves to a directory, the file name 'test' is assumed'\n --vocab= The name of the file where vocab is stored. If omitted, it is assumed to be in the same directory as the input file and named 'trainwords.pickle'\n --score_input Treat input file as a dev file (with labels). instead of output, computer the classifier's score on it\n\"\"\"\n\nimport dynet as dy\nfrom os import path\nfrom docopt import docopt\nfrom os import path\nimport bilstmTrain as bt\nimport pickle\nimport network_structure as networks\n\ncorpusEncoder = bt.corpusEncoder\n\ndef read_untagged(fname, dummy_tag):\n sent = []\n with open(fname, 'rt') as a:\n for line in a:\n line = line.strip()\n if not line:\n if len(sent): yield sent\n sent = []\n else:\n w = line\n p = dummy_tag\n sent.append((w,p))\n\n \ndef tag_test_corpus(test_data, network, encoder):\n for sentence_to_tag in test_data:\n tagged_sent = bt.decode_tagged_sent(bt.tag_sent(sentence_to_tag, network), encoder)\n yield tagged_sent\n\nif __name__ == \"__main__\":\n arguments = docopt(__doc__)\n print(arguments)\n input_file = path.abspath(arguments[\"\"])\n \n if path.isdir(input_file):\n input_file = path.join(input_file,'test')\n\n print('input file: {}'.format(input_file))\n \n input_dir = path.dirname(input_file)\n output_file = path.join(input_dir, 'test_tagged')\n\n encoder_file = arguments[\"--vocab\"] or path.join(input_dir, bt.ENCODER_FILE)\n\n with open(encoder_file, 'rb') as a:\n encoder = pickle.load(a)\n\n Repr = arguments[\"\"]\n network_class = networks.choose_network_class(Repr)\n\n model_file = arguments[\"\"]\n pc = dy.ParameterCollection()\n params = iter(dy.load(model_file, pc))\n \n network = network_class.load(pc,params, encoder)\n \n dummy_tag = encoder.tag_dict.keys()[-1]\n #print(\"test data\", test_data[:20])\n w = s = 0\n if arguments[\"--score_input\"]:\n acc, acc_ex0 = bt.test_a_classifier_on_dev(network,bt.read(input_file))\n print(\"accuracy on dev: {}, excluding most common: {}\".format(acc,acc_ex0))\n else:\n test_data = list(read_untagged(input_file,dummy_tag))\n with open(output_file, 'wt') as a:\n for sent in tag_test_corpus(test_data, network, encoder):\n s += 1\n for word, tag in sent:\n w += 1\n a.write(\"{} {}\\n\".format(word[0],tag))\n a.write(\"\\n\") # space between sentences\n print(\"finished tagging {} word in {} sentences\".format(w,s))","sub_path":"NLP-courses/89687-DL/Assignment3/code/bilstmPredict.py","file_name":"bilstmPredict.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"156577197","text":"\"\"\"\nGiven the array of strings A,\nyou need to find the longest string S which is the prefix of ALL the strings in the array.\n\nLongest common prefix for a pair of strings S1 and S2 is the longest string S which is the prefix of both S1\nand S2.\n\nFor Example, longest common prefix of \"abcdefgh\" and \"abcefgh\" is \"abc\".\n\"\"\"\n\n\ndef get_common(s1, s2):\n m, n =len(s1), len(s2)\n i, j = 0, 0\n while i < m and j < n:\n if s1[i] != s2[j]:\n break\n i += 1\n j += 1\n return i\n\ndef longestCommonPrefix(A):\n n = len(A)\n \n for i in range(1, n):\n A[i] = A[i][:get_common(A[i-1], A[i])]\n \n return A[-1]\n\ndef main():\n A = [\"abab\", \"ab\", \"abcd\"]\n print(f\"longest commong prefix:{longestCommonPrefix(A)}\")\n\nif __name__ == '__main__':\n main()\n ","sub_path":"Interviewbit_Questions/Strings/longest_common_prefix.py","file_name":"longest_common_prefix.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"140682230","text":"import bluetooth\n\nserver_sock=bluetooth.BluetoothSocket(bluetooth.L2CAP)\n\nport = 0x1001\nserver_sock.bind((\"\",port))\nserver_sock.listen(1)\nprint(\"listening on port %d\" % port)\n\nuuid = \"1e0ca4ea-299d-4335-93eb-27fcfe7fa848\"\nbluetooth.advertise_service(\n server_sock,\n \"FooBar Service\", \n bluetooth.AV_REMOTE_CLASS)\n # bluetooth.AV_REMOTE_PROFILE )\n\nclient_sock,address = server_sock.accept()\nprint(\"Accepted connection from \",address)\n\ndata = client_sock.recv(1024)\nprint(\"received [%s]\" % data)\n\nclient_sock.close()\nserver_sock.close()\n","sub_path":"rbp_server_2.py","file_name":"rbp_server_2.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"66226841","text":"\"\"\"\n\n665. Non-decreasing Array\n\nGiven an array with n integers, your task is to check if it could become non-decreasing by modifying at most 1 element.\n\nWe define an array is non-decreasing if array[i] <= array[i + 1] holds for every i (1 <= i < n).\n\nExample 1:\nInput: [4,2,3]\nOutput: True\nExplanation: You could modify the first 4 to 1 to get a non-decreasing array.\nExample 2:\nInput: [4,2,1]\nOutput: False\nExplanation: You can't get a non-decreasing array by modify at most one element.\nNote: The n belongs to [1, 10,000].\n\n\n\"\"\"\n\nclass Solution:\n def checkPossibility(self, nums: 'List[int]') -> 'bool':\n\n if len(nums) <= 1:\n return True\n\n count, first = 0, -1\n for i in range(len(nums)-1):\n if nums[i+1] < nums[i]:\n if first < 0:\n first = i\n count += 1\n if count > 1:\n return False\n\n if 0 < first < len(nums)-2:\n if nums[first] > nums[first+2] and nums[first+1] < nums[first-1]:\n return False\n # elif first >= len(nums)-2:\n # if nums[first+1] < nums[first-1]:\n # return False\n return True\n\n def checkPossibility2(self, nums):\n n = len(nums)\n pos, pairs = 0, 0\n for i, num in enumerate(nums):\n if i < n - 1 and num > nums[i+1]:\n pairs += 1\n if pairs == 1:\n pos = i\n if pairs >= 2:\n return False\n\n if 0 < pos < n - 2 and nums[pos + 1] < nums[pos - 1] and nums[pos] > nums[pos+2]:\n return False\n return True\n\n\nif __name__=='__main__':\n\n sol = Solution()\n method = sol.checkPossibility2\n\n cases = [\n (method, ([4,2,3],), True),\n (method, ([4,2,1],), False),\n (method, ([1,3,2],), True),\n (method, ([1,2,4,5,3],), True),\n (method, ([3,4,2,3],), False),\n (method, ([-1,4,2,3],), True),\n (method, ([2,3,3,2,4],), True),\n\n ]\n\n for i, (func, case, expected) in enumerate(cases):\n ans = func(*case)\n if ans == expected:\n print(\"Case {:d} Passed\".format(i + 1))\n else:\n print(\"Case {:d} Failed; Expected {:s} != {:s}\".format(i+1, str(expected), str(ans)))","sub_path":"algo/array/nondecreasing_array.py","file_name":"nondecreasing_array.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"446961972","text":"import FWCore.ParameterSet.Config as cms\n\n\n\nprocess = cms.Process(\"TEST\")\n\n# --- ============== --- --- --- --- --- --- --- --- --- --- --- --- \n# --- GENERAL THINGS --- --- --- --- --- --- --- --- --- --- --- --- \n# --- ============== --- --- --- --- --- --- --- --- --- --- --- --- \n\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True))\n\nprocess.load(\"Configuration.StandardSequences.Geometry_cff\")\nprocess.load('Configuration/StandardSequences/Services_cff')\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.MessageLogger.cerr.threshold = 'INFO'\n\nprocess.TFileService = cms.Service(\n \"TFileService\",\n fileName = cms.string(\"efficiencies.root\"),\n closeFileFast = cms.untracked.bool(True)\n )\n\n\n\n\n\n\n\n# --- ====== --- --- --- --- --- --- --- --- --- --- --- \n# --- INPUT --- --- --- --- --- --- --- --- --- --- --- \n# --- ====== --- --- --- --- --- --- --- --- --- --- --- \n\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1000))\n\nprocess.source = cms.Source(\n \"PoolSource\",\n debugFlag = cms.untracked.bool(True),\n debugVebosity = cms.untracked.uint32(1),\n fileNames = cms.untracked.vstring(\n 'file:/data/NTUPLES/VBF/qqHWW_lnujj/H200/CMSSWfile_10.root',\n )\n )\n\n\n\n\n\n\n# --- ==================== --- --- --- --- --- --- --- --- --- --- --- \n# --- THE VBF SELECTIONS --- --- --- --- --- --- --- --- --- --- --- \n# --- ==================== --- --- --- --- --- --- --- --- --- --- --- \n\n# --- THE LEPTON FILTER --- --- --- --- --- --- --- --- --- --- --- \n\nprocess.load(\"HiggsAnalysis.VBFHiggsToVV.VBFPtMinLeptonCountFilter_cfi\")\n\n\n# --- THE LEPTON DISTRIBUTIONS --- --- --- --- --- --- --- --- --- --- --- \n\nprocess.load(\"HiggsAnalysis.VBFHiggsToVV.VBFLeptonDistributions_cfi\")\nprocess.VBFLeptonDistributions.fileName = cms.untracked.string(\"VBFPtMinLeptonCountFilterTEST_leptonDistributions.root\")\n\n\n\n\n\n\n# --- ====== --- --- --- --- --- --- --- --- --- --- --- \n# --- OUTPUT --- --- --- --- --- --- --- --- --- --- --- \n# --- ====== --- --- --- --- --- --- --- --- --- --- --- \n\nprocess.load(\"Configuration.EventContent.EventContent_cff\")\n\nprocess.out = cms.OutputModule(\n \"PoolOutputModule\",\n process.AODSIMEventContent,\n verbose = cms.untracked.bool(True),\n fileName = cms.untracked.string('VBFPtMinLeptonCountFilterTEST.root'),\n )\n\nprocess.out.outputCommands.extend(cms.untracked.vstring('keep *_*_*_TEST'))\n\n\n\n\n\n\n# --- ====== --- --- --- --- --- --- --- --- --- --- --- \n# --- PATHS --- --- --- --- --- --- --- --- --- --- --- \n# --- ====== --- --- --- --- --- --- --- --- --- --- --- \n \nprocess.p = cms.Path(\n process.VBFPtMinLeptonCountFilter +\n process.VBFLeptonDistributionsSequence\n )\n\n#process.o = cms.EndPath( process.out )\n","sub_path":"HiggsAnalysis/VBFHiggsToVV/test/test_VBFPtMinLeptonCountFilter_cfg.py","file_name":"test_VBFPtMinLeptonCountFilter_cfg.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"215774570","text":"from consolemenu import *\nfrom consolemenu.items import *\n\nfrom connections.jogoDAO import JogoDAO\n\n# region methods\nid_jogo = 23\n\n\ndef add_mecanica():\n id_mecanica = input(\"insira a id do da mecanica que deseja inserir\\n\")\n j = JogoDAO()\n resp = j.add_mecanica(id_jogo, id_mecanica)\n print(resp)\n input(\"aperte qualquer tecla para sair\\n\")\n\n\ndef remove_mecanica():\n j = JogoDAO()\n print(j.sel_mecanica_with_id(id_jogo))\n id_mecanica = input(\"insira a id do da mecanica que deseja remover\\n\")\n resp = j.remove_mecanica(id_jogo, id_mecanica)\n print(resp)\n input(\"aperte qualquer tecla para sair\\n\")\n\n\ndef add_designer():\n id_designer = input(\"insira a id do designer que deseja inserir\\n\")\n j = JogoDAO()\n resp = j.add_designer(id_jogo, id_designer)\n print(resp)\n input(\"aperte qualquer tecla para sair\\n\")\n\n\ndef remove_designer():\n j = JogoDAO()\n print(j.sel_designer_with_id(id_jogo))\n id_designer = input(\"insira a id do designer que deseja remover\\n\")\n resp = j.remove_designer(id_jogo, id_designer)\n print(resp)\n input(\"aperte qualquer tecla para sair\\n\")\n\n\ndef add_categoria():\n id_categoria = input(\"insira a id do da categoria que deseja inserir\")\n j = JogoDAO()\n resp = j.add_categoria(id_jogo, id_categoria)\n print(resp)\n input(\"aperte qualquer tecla para sair\\n\")\n\n\ndef remove_categoria():\n j = JogoDAO()\n print(j.sel_categoria_with_id(id_jogo))\n id_categoria = input(\"insira a id do da categoria que deseja remover\")\n resp = j.remove_categoria(id_jogo, id_categoria)\n print(resp)\n input(\"aperte qualquer tecla para sair\\n\")\n# endregion\n\n\njogo_update_menu = ConsoleMenu(\"Atualizar jogo\", \"\")\njogo_update_menu.append_item(FunctionItem(\"Adicionar mecanica\", add_mecanica))\njogo_update_menu.append_item(FunctionItem(\"Remover mecanica\", remove_mecanica))\njogo_update_menu.append_item(FunctionItem(\"Adicionar designer\", add_designer))\njogo_update_menu.append_item(FunctionItem(\"Remover designer\", remove_designer))\njogo_update_menu.append_item(FunctionItem(\n \"Adicionar categoria\", add_categoria))\njogo_update_menu.append_item(FunctionItem(\n \"Remover categoria\", remove_categoria))\n\n\ndef show_menu():\n global id_jogo\n id_jogo = input(\"insira a id do jogo que deseja modificar\\n\")\n jogo_update_menu.show()\n","sub_path":"menus/jogo_update_menu.py","file_name":"jogo_update_menu.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"535476382","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# doscaal_profile module for OpenERP, Manage line per product\n# Copyright (C) 2017 SYLEAM Info Services ()\n# Alexandre Moreau \n#\n# This file is a part of doscaal_profile\n#\n# doscaal_profile is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# doscaal_profile is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom odoo import models, fields\n\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n complexity = fields.Selection(selection=[(1, 'easy'), (2, 'normal'), (3, 'complex'), (4, 'hard')], string='Level', help='Help note')\n meal_ok = fields.Boolean(string='Meal', help='Help note')\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"modules/customer/addons/doscaal_profile/models/product_template.py","file_name":"product_template.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"524102603","text":"\"\"\"\n@brief Compute spectrum-weighted exposure correction for counts light\ncurves prepared by gtbin.\n\n@author J. Chiang \n\"\"\"\n#\n# $Header: /nfs/slac/g/glast/ground/cvs/users/jchiang/pyExposure/python/flux_lc.py,v 1.1 2006/05/28 14:40:27 jchiang Exp $\n#\nimport numarray as num\nfrom FunctionWrapper import FunctionWrapper\nfrom readXml import SourceModel\nimport pyLikelihood as pyLike\nfrom FitsNTuple import FitsNTuple\nimport pyExposure\n\ndef log_array(npts, xmin, xmax):\n xstep = num.log(xmax/xmin)/(npts - 1)\n return xmin*num.exp(num.arange(npts, type=num.Float)*xstep)\n\nclass ModelFunction(object):\n _funcFactory = pyLike.SourceFactory_funcFactory()\n def __init__(self, xmlFile, srcName):\n srcModel = SourceModel(xmlFile)\n spectrum = srcModel[srcName].spectrum\n self.func = self._funcFactory.create(spectrum.type)\n pars = spectrum.parameters\n for name in pars.keys():\n self.func.setParam(name, pars[name].value)\n def __call__(self, ee):\n foo = FunctionWrapper(lambda x : self.func.value(pyLike.dArg(x)))\n return foo(ee)\n\nclass Exposure(object):\n def __init__(self, lc_file, coords=None, ft2file='DC2_FT2_v2.fits',\n energies=None, irfs='DC2'):\n self.lc = FitsNTuple(lc_file, 'RATE')\n cuts = pyExposure.Cuts(lc_file, 'RATE', False)\n emin, emax = 20, 2e5\n for i in range(cuts.size()):\n my_cut = cuts.getCut(i)\n if my_cut.type() == 'SkyCone':\n my_cut = pyExposure.Cuts_castAsSkyConeCut(my_cut)\n self.ra = my_cut.ra()\n self.dec = my_cut.dec()\n if my_cut.type() == 'range':\n my_cut = pyExposure.Cuts_castAsRangeCut(my_cut)\n if my_cut.colname() == 'ENERGY':\n emin = my_cut.minVal()\n emax = my_cut.maxVal()\n energies = log_array(21, emin, emax)\n times = list(self.lc.TIME - self.lc.TIMEDEL/2.)\n times.append(self.lc.TIME[-1] + self.lc.TIMEDEL[-1]/2.)\n self.exposure = pyExposure.Exposure(ft2file, times, energies,\n self.ra, self.dec, irfs)\n def __getattr__(self, attrname):\n return getattr(self.exposure, attrname)\n def __call__(self, time, energy):\n return self.exposure.value(time, energy)\n def weightedAvgs(self, dnde):\n energies = self.energies()\n dnde_vals = dnde(energies)\n expvals = self.values()\n avg_exps = []\n dnde_avg = 0\n for k in range(len(energies) - 1):\n dnde_avg += ((dnde_vals[k+1]+dnde_vals[k])\n *(energies[k+1]-energies[k])/2.)\n for exprow in expvals:\n avg_exps.append(0)\n ff = dnde_vals*num.array(exprow)\n for k in range(len(energies) - 1):\n avg_exps[-1] += (ff[k+1]+ff[k])*(energies[k+1]-energies[k])/2.\n return num.array(avg_exps)/dnde_avg\n\nif __name__ == '__main__':\n import hippoplotter as plot\n \n ee = log_array(100, 20, 2e5)\n bpl = ModelFunction('solar_flare_bpl_model.xml', 'Solar Flare')\n# plot.scatter(ee, bpl(ee), xlog=1, ylog=1, pointRep='Line')\n\n exposure = Exposure('flare_lc.fits')\n\n my_exp = exposure.weightedAvgs(bpl)\n times = exposure.lc.TIME\n plot.xyplot(times - times[0]-644, exposure.lc.COUNTS/(my_exp+1),\n xerr=exposure.lc.TIMEDEL/2.,\n yerr=num.sqrt(exposure.lc.COUNTS)/(my_exp+1), ylog=1,\n pointRep='Column')\n","sub_path":"python/flux_lc.py","file_name":"flux_lc.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"326906329","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2019-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n\n\"\"\"\nExample: python data/vocab.txt data/train.txt\nvocab.txt: 1stline=word, 2ndline=count\n\"\"\"\n\nimport os\nimport numpy as np\nimport sys\nimport argparse\nimport torch\n\nfrom src.data.dictionary import Dictionary\n\ndef print_args(args):\n print(\"table:\\t{}\".format(args.table))\n print(\"table_label:\\t{}\".format(args.table_label))\n print(\"table_vocab:\\t{}\".format(args.table_vocab))\n\ndef main(args):\n if args.table_label is None:\n args.table_label = args.table + \"_label\"\n if args.table_vocab is None:\n args.table_vocab = args.table + \"_vocab\"\n\n assert os.path.isfile(args.table)\n assert os.path.isfile(args.table_label)\n assert os.path.isfile(args.table_vocab)\n\n print_args(args)\n\n table_dico = Dictionary.read_vocab(args.table_vocab)\n \n table_data = Dictionary.index_table(args.table, args.table_label, table_dico, args.table+\".pth\")\n\nif __name__ == '__main__':\n readme = \"\"\n parser = argparse.ArgumentParser(description=readme)\n parser.add_argument('--table', help = \"table data\")\n parser.add_argument('--table_label', help = \"table label\")\n parser.add_argument('--table_vocab', help = \"table vocab\")\n args = parser.parse_args()\n main(args)\n \n\n","sub_path":"model/preprocess_table_data.py","file_name":"preprocess_table_data.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"410105968","text":"#!/usr/bin/env python\n\nimport jinja2\nimport logging\nimport smtplib\nfrom email.mime.text import MIMEText\n\nfrom hc.monitor import Monitor\n\nlogger = logging.getLogger(\"Monitor.{}\".format(__name__))\n\n\nclass email_send(Monitor):\n\n def __init__(self, config, **args):\n Monitor.__init__(self, config, **args)\n\n self.emailto = self.getconfig(\"emailto\", default=\"\")\n self.emailfrom = self.getconfig(\"emailfrom\", default=\"test\")\n self.host = self.getconfig(\"host\", default=\"127.0.0.1\")\n self.port = int(self.getconfig(\"port\", default=25))\n self.timeout = int(self.getconfig(\"timeout\", default=10))\n self.user = self.getconfig(\"user\", default=\"\")\n self.password = self.getconfig(\"password\", default=\"\")\n self.ssl = self.getconfig(\"ssl\", default=\"\")\n self.dryrun = self.getconfig(\"dryrun\", default=False)\n self.templates_dir = self.getconfig(\"templates_dir\", default='conf/templates')\n self.template = self.getconfig(\"template\", default='')\n\n self.subject = self.getconfig(\"subject\", default='', resolve=False)\n self.body = self.getconfig(\"body\", default='', resolve=False)\n\n def runit(self):\n\n status = 0\n errs = ''\n data = {}\n\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.templates_dir))\n env.filters['res'] = self.filter_res\n\n if self.subject or self.body:\n subject = self.resolve_jinja(self.subject)\n body = self.resolve_jinja(self.body)\n else:\n email = env.get_template(self.template).render()\n email_spl = email.split('\\n')\n subject = email_spl[0]\n body = '\\n'.join(email_spl[1:])\n\n msgcl = MIMEText(body)\n msgcl['To'] = self.emailto\n msgcl['From'] = self.emailfrom\n msgcl['Subject'] = subject\n\n logger.debug('Sending:\\n{}'.format(msgcl))\n\n data = {'body': body, 'subject': subject}\n\n if self.dryrun:\n return status, data, {'dryrun': self.dryrun}, errs\n\n try:\n logger.debug('Connecting to server: {}'.format(self.host))\n smtpserver = smtplib.SMTP(self.host, self.port, timeout=self.timeout)\n if self.ssl:\n logger.debug('Using ssl')\n smtpserver.starttls()\n\n if self.user and self.password:\n logger.debug('Adding login info')\n smtpserver.login(self.user, self.password)\n\n logger.debug('Sending email from: {}'.format(self.emailfrom))\n logger.debug('Sending email to: {}'.format(self.emailto))\n smtpserver.sendmail(self.emailfrom, self.emailto, msgcl.as_string())\n\n smtpserver.close()\n\n except Exception as error:\n logger.error(\"Error: unable to send email: {}\".format(error))\n status = 2\n errs += str(error)\n\n return status, data, {}, errs\n","sub_path":"hc/modules/email_send.py","file_name":"email_send.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"382918970","text":"'''\nThe string \"PAYPALISHIRING\" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)\n\nP A H N\nA P L S I I G\nY I R\nAnd then read line by line: \"PAHNAPLSIIGYIR\"\n\nWrite the code that will take a string and make this conversion given a number of rows:\n\nstring convert(string s, int numRows);\n \n\nExample 1:\n\nInput: s = \"PAYPALISHIRING\", numRows = 3\nOutput: \"PAHNAPLSIIGYIR\"\nExample 2:\n\nInput: s = \"PAYPALISHIRING\", numRows = 4\nOutput: \"PINALSIGYAHRPI\"\nExplanation:\nP I N\nA L S I G\nY A H R\nP I\nExample 3:\n\nInput: s = \"A\", numRows = 1\nOutput: \"A\"\n \n\nConstraints:\n\n1 <= s.length <= 1000\ns consists of English letters (lower-case and upper-case), ',' and '.'.\n1 <= numRows <= 1000\n'''\nclass Solution:\n def convert(self, s: str, numRows: int) -> str:\n if numRows == 1 or numRows >= len(s):\n return s\n rows = [''] * numRows\n row = 0\n direction = 1\n for ch in s:\n rows[row] += ch\n row += direction\n if row == numRows-1 or row == 0:\n direction *= -1\n return ''.join(rows)\n\nimport unittest\nfunctions = [Solution().__getattribute__(f) for f in dir(Solution()) if not f.startswith('__')]\nclass Test(unittest.TestCase): \n def test1(self):\n for f in functions:\n self.assertEqual(f('PAYPALISHIRING', 3), 'PAHNAPLSIIGYIR', f.__name__)\n def test2(self):\n for f in functions:\n self.assertEqual(f('PAYPALISHIRING', 4), 'PINALSIGYAHRPI', f.__name__)\nunittest.main()","sub_path":"leetcode/LC6. ZigZag Conversion.py","file_name":"LC6. ZigZag Conversion.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"258700801","text":"import torch\nimport triton\n\nconfs = [\n triton.testing.Benchmark(\n x_names = ['N'],\n x_vals = [128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192],\n y_name = 'provider',\n y_vals = ['triton', 'torch'],\n y_lines = ['Triton', 'Torch'],\n ylabel = 'GBPS',\n plot_name = f'{mode}-2048',\n args = {'M': 2048, 'dtype': torch.float16, 'mode': mode}\n )\\\n for mode in ['forward', 'backward']\n]\n\n\n@triton.testing.perf_report(confs)\ndef bench_op(M, N, dtype, mode, provider):\n # create inputs\n x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True)\n idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda')\n num_gb = (2 * x.numel() * x.element_size() * 1e-9)\n gbps = lambda ms: num_gb / ms * 1e3\n # forward pass\n op = {'torch': torch.nn.CrossEntropyLoss(reduction='none'), \\\n 'triton': triton.ops.cross_entropy}[provider]\n if mode == 'forward':\n mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(x, idx))\n if mode == 'backward':\n y = op(x, idx)\n dy = torch.randn_like(y)\n fn = lambda: y.backward(dy, retain_graph=True)\n mean_ms, min_ms, max_ms = triton.testing.do_bench(fn, grad_to_none=x)\n return gbps(mean_ms), gbps(min_ms), gbps(max_ms)\n\n\nif __name__ == '__main__':\n bench_op.run('tmp', False)","sub_path":"python/bench/bench_cross_entropy.py","file_name":"bench_cross_entropy.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"125444972","text":"import serial\nfrom threading import Thread\nfrom time import sleep\n\nclass FacciniEEG(object):\n \"\"\"Faccini libraby, to get data from FacciniEEG.\n Initialising: object1=FacciniEEG(\"COM6\",57600) #windows\n After initialising , if required the callbacks must be set\n then using the start method the library will start fetching data from mindwave\n i.e. object1.start()\n similarly stop method can be called to stop fetching the data\n i.e. object1.stop()\n The data from the device can be obtained using either of the following methods or both of them together:\n \n Obtaining value: variable1=object1.channels\n \n Setting callback:a call back can be associated with all the above variables so that a function is called when the variable is updated. Syntax: setCallBack(\"variable\",callback_function)\n for eg. to set a callback for channels data the syntax will be setCallBack(\"channels\",callback_function)\"\"\"\n\n __ch1 = 0 \n __ch2 = 0\n __ch3 = 0\n __ch4 = 0\n __ch5 = 0\n __ch6 = 0\n __ch7 = 0\n __ch8 = 0\n \n callBacksDictionary={} #keep a track of all callbacks\n def __init__(self,port,baudRate=512000):\n self.__serialPort = port\n self.__serialBaudRate = baudRate\n self.__packetsReceived = 0\n \n self.__parserThread = None\n self.__threadRun = False\n self.__srl = None\n \n def __del__(self): \n if self.__threadRun == True:\n self.stop()\n \n def start(self):\n # Try to connect to serial port and start a separate thread\n # for data collection\n if self.__threadRun == True:\n print(\"FacciniEEG has already started!\")\n return\n \n if self.__srl == None:\n try:\n self.__srl = serial.Serial(self.__serialPort,self.__serialBaudRate)\n except serial.serialutil.SerialException as e:\n raise e\n else:\n self.__srl.open()\n\n self.__srl.write([0x01, 0x02, 0xD0, 0x03]) #RESET\n sleep(0.03)\n self.__srl.flushInput()\n self.__srl.write([0x01, 0x02, 0x82, 0x03]) #Set rate 500 hz\n sleep(0.03)\n self.__srl.write([0x01, 0x02, 0x10, 0x03]) #Codice che avvia l'acquisizione della scheda\n sleep(0.03)\n self.__packetsReceived = 0\n self.__parserThread = Thread(target=self.__packetParser, args = ())\n self.__threadRun=True\n self.__parserThread.start()\n \n def __packetParser(self):\n \"packetParser runs continously in a separate thread to parse packets from Faccini and update the corresponding variables\"\n '''I pacchetti inviati hanno la forma:\n 1 byte di avvio - Check\n 2 byte di header\n 1 byte di fine header - Check\n 24 byte di dati (piu altri che pero' abbiamo disattivato)\n 1 byte di fine dati - Check\n \n Per perdere il meno possibile dati bisogna cercare il primo check, se e' giusto prendere\n 3 byte e controllare il secondo, se e' giusto prendere 25 byte e controllare il terzo.\n Se sono tutti corretti si possono salvare i dati.\n \n I 24 byte di dati corrispondono a 8 canali, 3 byte di precisione per dato, inviati dal piu'\n significativo al meno sig. I valori vengono ricostruiti usando un'operazione di bit shift, e\n poi convertiti in float.\n \n La parte con gli header serviva per implementare l'invio di dati ulteriori (trasformata di \n fourier) ma non andava mai ed e' stata disattivata, quindi fregacazzi degli header ma dobbiamo\n usarli lo stesso. In generale la struttura degli header e' un casino, la parte che ci interessa\n e' il primo bit del primo header. Se vale 0 vuol dire che la scheda non ha inviato dati e ci\n sta solo mandando header e byte di check.\n \n \n '''\n while self.__threadRun:\n \n def read_hexs(num):\n res = [ int(self.__srl.read(1).encode('hex'),16) for i in range(0,num) ]\n return res\n \n packet1= read_hexs(1)[0] \n while (packet1!=0x01) and self.__threadRun: #Continua ad acquisire finche' non riceve il byte di avvio\n print('Packet 1 discarded. Received {} instead of {}'.format(packet1, 0x01))\n packet1= int(self.__srl.read(1).encode('hex'),16)\n else:\n if self.__threadRun == False:\n break\n packet2= read_hexs(3) #Controlla la ricezione del secondo byte\n if (packet2[2]!= 0x02):\n print('Packet 1,2 discarded')\n break\n\n header1 = packet2[0]\n \n flagRawData = header1 >> 7; #Il primo bit di header1 vale 1 se sono stati inviati dei dati, 0 altrimenti\n \n rawDataSize = 24\n \n dataBytesToRead = flagRawData*rawDataSize + 1;\n \n packet3= read_hexs(dataBytesToRead)\n \n if (packet3[dataBytesToRead - 1] != 0x03): #Controlla la ricezione del terzo byte\n print('Packet 1,2,3 discarded')\n break\n \n #valid packet received\n self.__packetsReceived += 1\n print(self.__packetsReceived)\n tmp_channels = []\n for index in range(0, rawDataSize/3):\n msb = packet3[3*index]\n lsb1 = packet3[3*index+1]\n lsb2 = packet3[3*index+2]\n \n reconstructedValue = (msb << 16) | (lsb1 << 8) | (lsb2)\n \n if ((msb>>7) == 1):\n reconstructedValue = reconstructedValue | 0xff000000 #Converte da valore solo positivo a complemento a 2\n \n tmp_channels.append( float(reconstructedValue) )\n self.ch1 = tmp_channels[0]\n self.ch2 = tmp_channels[1]\n self.ch3 = tmp_channels[2]\n self.ch4 = tmp_channels[3]\n self.ch5 = tmp_channels[4]\n self.ch6 = tmp_channels[5]\n self.ch7 = tmp_channels[6]\n self.ch8 = tmp_channels[7]\n \n def stop(self):\n # Stops a running parser thread\n if self.__threadRun == True:\n self.__threadRun=False\n self.__srl.write([0x01, 0x02, 0x20, 0x03]) #Codice che stoppa l'acquisizione della scheda\n print(\"Device Stopped\")\n self.__parserThread.join()\n self.__srl.close()\n \n \n def setCallBack(self,variable_name,callback_function):\n \"\"\"Setting callback:a call back can be associated with all the above variables so that a function is called when the variable is updated. Syntax: setCallBack(\"variable\",callback_function)\n for eg. to set a callback for attention data the syntax will be setCallBack(\"channels\",callback_function)\"\"\"\n self.callBacksDictionary[variable_name]=callback_function\n \n #setting getters and setters for all variables\n \n #packets received\n @property\n def packetsReceived(self):\n return self.__packetsReceived\n \n @property\n def bytesAvailable(self):\n if self.__threadRun:\n return self.__srl.inWaiting()\n else:\n return -1\n \n #ch1 (rawValue)\n @property\n def ch1(self):\n \"Get value for ch1\"\n return self.__ch1\n @ch1.setter\n def ch1(self,value):\n self.__ch1=value\n if self.callBacksDictionary.has_key(\"ch1\"): #if callback has been set, execute the function\n self.callBacksDictionary[\"ch1\"](self.__ch1)\n \n #ch2 (rawValue)\n @property\n def ch2(self):\n \"Get value for ch2\"\n return self.__ch2\n @ch2.setter\n def ch2(self,value):\n self.__ch2=value\n if self.callBacksDictionary.has_key(\"ch2\"): #if callback has been set, execute the function\n self.callBacksDictionary[\"ch2\"](self.__ch2)\n \n #ch3 (rawValue)\n @property\n def ch3(self):\n \"Get value for ch3\"\n return self.__ch3\n @ch3.setter\n def ch3(self,value):\n self.__ch3=value\n if self.callBacksDictionary.has_key(\"ch3\"): #if callback has been set, execute the function\n self.callBacksDictionary[\"ch3\"](self.__ch3)\n\n #ch4 (rawValue)\n @property\n def ch4(self):\n \"Get value for ch4\"\n return self.__ch4\n @ch4.setter\n def ch4(self,value):\n self.__ch4=value\n if self.callBacksDictionary.has_key(\"ch4\"): #if callback has been set, execute the function\n self.callBacksDictionary[\"ch4\"](self.__ch4)\n\n #ch5 (rawValue)\n @property\n def ch5(self):\n \"Get value for ch5\"\n return self.__ch5\n @ch5.setter\n def ch5(self,value):\n self.__ch5=value\n if self.callBacksDictionary.has_key(\"ch5\"): #if callback has been set, execute the function\n self.callBacksDictionary[\"ch5\"](self.__ch5)\n\n #ch6 (rawValue)\n @property\n def ch6(self):\n \"Get value for ch6\"\n return self.__ch6\n @ch6.setter\n def ch6(self,value):\n self.__ch6=value\n if self.callBacksDictionary.has_key(\"ch6\"): #if callback has been set, execute the function\n self.callBacksDictionary[\"ch6\"](self.__ch6)\n\n #ch7 (rawValue)\n @property\n def ch7(self):\n \"Get value for ch7\"\n return self.__ch7\n @ch7.setter\n def ch7(self,value):\n self.__ch7=value\n if self.callBacksDictionary.has_key(\"ch7\"): #if callback has been set, execute the function\n self.callBacksDictionary[\"ch7\"](self.__ch7)\n\n #ch8 (rawValue)\n @property\n def ch8(self):\n \"Get value for ch8\"\n return self.__ch8\n @ch8.setter\n def ch8(self,value):\n self.__ch8=value\n if self.callBacksDictionary.has_key(\"ch8\"): #if callback has been set, execute the function\n self.callBacksDictionary[\"ch8\"](self.__ch8)\n\n","sub_path":"Client/Devices/FacciniEEGLib.py","file_name":"FacciniEEGLib.py","file_ext":"py","file_size_in_byte":10198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"170537740","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 10 10:33:36 2017\n\n@authors: Sanjay Kottapalli, Tiezheng Yuan\n\"\"\"\nimport params\nimport phip_analysis\nimport os\nimport timeit\n\nif __name__==\"__main__\":\n\t''' Main method for exuting the AVARDA pipeline.\n\tAll necessary input goes into the variables file.\n\t'''\n\ttime1 = timeit.default_timer()\n\tpath = os.getcwd().replace('\\\\' , '/').replace('bin','input') + '/'\n\tvar_file = path+'variables_virus.txt'\n\tprint(\"Variables file: \" + var_file)\n\tpar = params.file_IO(var_file, '=').file_to_dict()\n\tpar = params.param_dict(par).adjust_par()\n\tprint(\"Alignment file: \" + par['dir_input'] + par['file_aln'])\n\tprint(\"Z-score file: \" + par['dir_input'] + par['zscore_file'])\n\tprint(\"---------------------------------------------------------------\")\n\tphip = phip_analysis.phip(par)\n\ttime2 = timeit.default_timer()\n\tprint(\"Total time: \" + str(time2-time1))\n\n# End","sub_path":"bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"371155899","text":"from pathlib import Path\nimport pandas as pd\nimport os\n\n\nfrom pyzbar import pyzbar\nimport argparse\nimport cv2\n\nfrom PIL import Image\n#กำหนดที่เก็บรูปภาพ\nDATASET_PATH = 'REG'\n#กำหนดที่เก็บ file.csv\nOUTPUT_FILE = 'index.csv'\n\nroot_dir = Path(DATASET_PATH)\nitems = root_dir.iterdir()\ndir_path = []\nfilename = []\nhn = []\ndoc_type = []\nbarcode_form = []\n#นำภาพมาวน loop\nfor item in items:\n if item.is_dir():\n # print(item.name)\n \n # 1 ภาพอาจมีหลายหน้า loop แยกในหน้าอีก 1 ชั้น\n for sub_dir in item.iterdir():\n for file in sub_dir.iterdir():\n if (file.is_file()):\n file_name = file.name.split('.')[0]\n\n directory = 'REG2/'+item.name+'/'+ str(sub_dir.name)\n newfile = directory+'/'+str(file_name)+'.jpg'\n image = Image.open(file)\n if not os.path.exists(directory):\n os.makedirs(directory)\n print('copy To => '+directory)\n \n \n # newfile = image.convert('L').save(directory+'/'+str(file_name)+'.jpg')\n print(newfile)\n else:\n if (os.path.isfile(newfile)):\n print('Is File => '+newfile)\n else:\n image.convert('L').save(directory+'/'+str(file_name)+'.jpg')\n print('convert File => '+newfile)\n\n \n\n # อ่าน barcode \n image = cv2.imread(str(file))\n barcodes = pyzbar.decode(image)\n for barcode in barcodes:\n (x, y, w, h) = barcode.rect\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n barcodeData = barcode.data.decode(\"utf-8\")\n barcodeType = barcode.type\n # text = \"{} ({})\".format(barcodeData, barcodeType)\n text = \"{}\".format(barcodeData)\n # print(sub_dir.name)\n\n dir_path.append(sub_dir)\n filename.append(file_name)\n hn.append(item.name)\n doc_type.append(sub_dir.name)\n barcode_form.append(text)\n \n\n raw_data = {'hn': hn, 'dir_path': dir_path, 'type':doc_type, 'filename': filename,'barcode':barcode_form}\n df = pd.DataFrame(raw_data, columns = ['hn','dir_path', 'type', 'filename','barcode'])\n df.to_csv(OUTPUT_FILE)\n","sub_path":"web/to_csv.py","file_name":"to_csv.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"421944141","text":"import web3\nimport time\nimport eth_account.messages\nimport web3.contract\nimport sys\nimport socket\nfrom threading import Thread, Lock\nfrom lib import *\nfrom ChunkedFiles import *\nfrom ChunkedFilesValidator import *\nimport json\nfrom lib import w3\nimport traceback\n\nHOST = '127.0.0.1' # Standard loopback interface address (localhost)\nPORT = 29290 # Port to listen on (non-privileged ports are > 1023)\n\nclass CustomerInterface: # This class is intended for the customer to interact with the blockchain. It doesn't generate any questions, and doesn't interact with the provider.\n def __init__(self, address):\n self.customer = Customer(address)\n self.qas = {}\n self.subscription_address = None\n\n def is_subscription_active(self):\n return self.customer.is_subscription_active()\n\n\n def join_subscription(self, subscription_address):\n self.customer.join_subscription(subscription_address)\n self.subscription_address = subscription_address\n\n def register_question(self, question):\n if not self.customer.get_validator().is_valid_question(question):\n print(\"Question invalid!!!\")\n return False\n q_hash = Signer.hash(question)\n if q_hash not in self.qas:\n self.qas[q_hash] = QA(question)\n #print(\"registerd:\",q_hash)\n return True\n\n def register_answer(self, question, answer):\n if not self.customer.validator.is_answer_correct(question, answer):\n print(\"Tried to register incorrect answer!\")\n return\n q_hash = Signer.hash(question)\n done = False\n if q_hash not in self.qas:\n self.qas[q_hash] = QA(question, answer=answer)\n else:\n self.qas[q_hash].set_answer(answer)\n\n def get_all_hashes(self):\n answered = []\n unanswered = []\n for qa in self.qas.values():\n if qa.is_answered():\n answered.append(qa.get_hash())\n else:\n unanswered.append(qa.get_hash())\n return answered + unanswered, len(unanswered)\n\n def get_signed_hashes(self):\n hashes, unanswered = self.get_all_hashes()\n return hashes, unanswered, self.sign_hashes(hashes, unanswered)\n\n def sign_hashes(self, hashes, unanswered):\n return Signer.sign(hashes, unanswered, self.subscription_address, self.customer.address)\n\n def get_answer(self, question):\n q_hash = Signer.hash(question)\n if q_hash in self.qas:\n return self.qas[q_hash].get_answer()\n return None\n\n def check_demand(self):\n ret = self.customer.check_demand()\n if ret is not None:\n question, answer = ret\n self.register_answer(question, answer)\n return ret\n\n def resolve_demand(self):\n demand = self.check_demand()\n if demand is not None:\n hashes, unanswered, signature = self.get_signed_hashes()\n print(\"Providing: \", hashes, unanswered)\n self.provide_signature(hashes, unanswered, signature)\n return demand\n\n def provide_signature(self, hashes, unanswered, signature=None):\n if signature is None:\n signature = self.sign_hashes(hashes, unanswered)\n try:\n self.customer.provide_signature(hashes, unanswered, signature)\n except Exception as e:\n print(\"Coudn't provide signature:\", e)\n traceback.print_tb(e.__traceback__)\n\n def get_all_answers(self):\n questions = []\n answers = []\n for qa in self.qas.values():\n if qa.is_answered():\n questions.append(qa.get_question())\n answers.append(qa.get_answer())\n return questions, answers\n\n def appeal(self, question, hashes=None, unanswered=None):\n if not self.register_question(question):\n print(\"Couldn't appeal: question not registered\")\n return False\n signature = None\n if hashes is None:\n hashes, unanswered, signature = self.get_signed_hashes()\n else:\n signature = self.sign_hashes(hashes, unanswered)\n try:\n #print(\"Appealing with:\", Coder.str_question(question), hashes, unanswered, signature)\n #print(QA(question).get_hash())\n if not self.customer.appeal(question, hashes, unanswered, signature):\n raise \"\"\n except Exception as e:\n print(\"Couldn't appeal:\", e)\n traceback.print_tb(e.__traceback__)\n return False\n return True\n\n def check_appeal(self):\n answer = self.customer.check_appeal()\n if answer is not None:\n answer = answer\n self.register_answer(self.customer.get_question_appealed(), answer)\n return answer\n\n def withdraw(self):\n return self.customer.withdraw()\n\n# Class for interaction between threads and socket thread\nclass CommandsList():\n def __init__(self):\n self.commands = []\n self.inputs = []\n \n def insert_command(self, msg):\n self.commands.append(msg)\n\n def get_last_input(self):\n if len(self.inputs) < 1:\n return None\n ret, self.inputs = self.inputs[-1], self.inputs[:-1]\n return ret\n\n def insert_input(self, inp):\n self.inputs.append(inp)\n\n def next(self):\n if len(self.commands) < 1:\n return None\n ret, self.commands = self.commands[0], self.commands[1:]\n return ret\n\ndef handle_socket_customer(host, port, cmd_list, lock, QUIT_MSG):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((host, port))\n s.settimeout(0.2)\n while True:\n time.sleep(0.1)\n inp = None\n try:\n inp = receive_dict(s)\n if inp == None:\n #print(\"CLOSING...\")\n print(\"\\nSocket connection was closed by provider.\")\n s.close()\n break\n except socket.timeout:\n pass\n except Exception as e:\n print(e)\n #print(\"CLOSING...\")\n s.close()\n break\n lock.acquire()\n try:\n if inp is not None:\n cmd_list.insert_input(inp)\n #print(\"GOT MSG: \", inp)\n msg = cmd_list.next()\n finally:\n lock.release()\n if msg is None:\n time.sleep(0.5)\n else:\n if msg == QUIT_MSG:\n print(\"\\nSocket connection was closed by you.\")\n s.close()\n break\n #print(\"SENDING MSG: \", msg)\n send_dict(s, msg)\n \n\ndef init_customer(address, host, port):\n #provider_int = ProviderInterface(address)\n #provider_lock = Lock()\n #x = Thread(target=handle_provider, args=(provider_lock, provider_int))\n #x.start()\n to_join = []\n customer_int = CustomerInterface(address)\n cmd_list = CommandsList()\n lock = Lock()\n customer_lock = Lock()\n QUIT_MSG = {\"close\":True}\n x = Thread(target=handle_socket_customer, args=(host, port, cmd_list, lock, QUIT_MSG))\n x.start()\n to_join.append(x)\n print(\"Sending address...\")\n msg = {\"type\": \"address\", \"address\": str(address)}\n lock.acquire()\n try:\n cmd_list.insert_command(msg)\n finally:\n lock.release()\n print(\"Waiting for subscription address...\")\n while True:\n lock.acquire()\n try:\n msg = cmd_list.get_last_input()\n finally:\n lock.release()\n if msg is not None and \"type\" in msg and msg[\"type\"] == \"subscription\" and \"address\" in msg:\n customer_int.join_subscription(msg[\"address\"])\n break\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n #------------------------------------------------\n x = Thread(target=auto_customer_background, args=(customer_int, customer_lock))\n x.start()\n to_join.append(x)\n auto_customer(customer_int, customer_lock, cmd_list, lock, user_input=True, only_appeals=False, sending_ack=True, auto_file=True, num_of_questions=10)\n #------------------------------------------------\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''\n lock.acquire()\n try:\n cmd_list.insert_command(QUIT_MSG)\n finally:\n lock.release()\n for x in to_join:\n x.join()\n\n\ndef user_customer(customer_int, cmd_list, lock):\n print(\"Joined Subscription!\\n\")\n print(\"Commands:\")\n print(\"q - exit\")\n print(\"new - register new question and send to provider\")\n print(\"check - check for new answers from provider\")\n print(\"get - get specific answer if submitted by provider\")\n print(\"ackall - sign all answers submitted by provider\")\n print(\"appeal - appeal a question\")\n print(\"status - check appeal and active\")\n print(\"demand - check if provider demanded signature\")\n print(\"resolve - resolve provider's signature demand\")\n print(\"withdraw - withdraw funds from contract\")\n while(True):\n value = input(\"$>> \")\n if value == \"q\":\n break\n elif value == \"new\":\n # register new question and send to provider\n print(\"Input new question:\")\n try:\n question = Solver.input()\n except Exception as e:\n traceback.print_tb(e.__traceback__)\n continue\n #print(\"Got new question:\",question)\n if not customer_int.register_question(question):\n print(\"Warning: invalid question\")\n hashes, unanswered, signature = customer_int.get_signed_hashes()\n #print(\"providing: \", hashes, unanswered)\n msg = {\n \"type\": \"new_question\", \n \"question\": Coder.encoded_to_stream(question),\n \"hashes\": bytes_to_str(hashes),\n \"unanswered\": unanswered,\n \"signature\": bytes_to_str(signature)}\n lock.acquire()\n try:\n cmd_list.insert_command(msg)\n finally:\n lock.release()\n print(\"Sent question:\", Coder.str_question(question))\n elif value == \"check\":\n msg = {\"type\": \"send_answer\"}\n lock.acquire()\n try:\n cmd_list.insert_command(msg)\n finally:\n lock.release()\n while True:\n time.sleep(0.2)\n lock.acquire()\n try:\n msg = cmd_list.get_last_input()\n finally:\n lock.release()\n if msg is not None:\n break\n answers = []\n questions = []\n answers_stream = []\n questions_stream = []\n if \"type\" in msg and msg[\"type\"] == \"answer\" and \"answers\" in msg and \"questions\" in msg:\n answers_stream = msg[\"answers\"]\n questions_stream = msg[\"questions\"]\n\n for i in range(min(len(questions_stream), len(answers_stream))):\n answers.append(Coder.stream_to_encoded(answers_stream[i]))\n questions.append(Coder.stream_to_encoded(questions_stream[i]))\n print(\"Got answers and questions:\")\n for i in range(min(len(answers), len(questions))):\n answer = answers[i]\n question = questions[i]\n customer_int.register_answer(question, answer)\n print(Coder.str_question(question), \"->\", Coder.str_answer(answer))\n # get next answer from provider\n pass\n elif value == \"get\":\n # get specific answer\n try:\n question = Solver.input()\n except Exception as e:\n traceback.print_tb(e.__traceback__)\n continue\n answer = customer_int.get_answer(question)\n if answer is None:\n print(\"Got no answer yet.\")\n else:\n print(\"Answer: \", answer)\n elif value == \"ackall\":\n # sign all answers submitted by provider\n #questions, answers = customer_int.get_all_answers()\n hashes, unanswered, signature = customer_int.get_signed_hashes()\n msg = {\n \"type\": \"ack\",\n \"hashes\": bytes_to_str(hashes),\n \"unanswered\": unanswered,\n \"signature\": bytes_to_str(signature)\n }\n lock.acquire()\n try:\n cmd_list.insert_command(msg)\n finally:\n lock.release()\n print(\"Sent ack for all answers\")\n elif value == \"appeal\":\n try:\n question = Solver.input()\n except Exception as e:\n traceback.print_tb(e.__traceback__)\n continue\n if not customer_int.appeal(question):\n #print(\"Couldn't appeal - invalid question.\")\n pass\n elif value == \"status\":\n print(\"Check appeal status: \", customer_int.check_appeal())\n elif value == \"demand\":\n ret = customer_int.check_demand()\n if ret is None:\n print(\"No demand from provider.\")\n else:\n print(\"Provider demanded signature for: \")\n question, answer = ret\n print(Coder.str_question(question), \"->\", Coder.str_answer(answer))\n elif value == \"resolve\":\n ret = customer_int.resolve_demand()\n if ret is None:\n print(\"No demand from provider.\")\n else:\n print(\"Resolved demand for: \")\n question, answer = ret\n print(Coder.str_question(question), \"->\", Coder.str_answer(answer))\n elif value == \"withdraw\":\n amount = customer_int.withdraw()\n if amount > 0:\n print(\"Withdrew:\", amount)\n break\n else:\n print(\"No funds to withdraw\")\n else:\n print(\"[x] Unknown command:\", value)\n\n\ndef auto_customer(customer_int, customer_lock, cmd_list, lock, user_input=False, only_appeals=False, sending_ack=False, auto_file=False, num_of_questions=3):\n # Generate all Questions\n questions = []\n answers = []\n if auto_file:\n filename, questions = generate_file_questions(customer_int, customer_lock)\n else:\n for x in range(num_of_questions):\n question = None\n if user_input:\n print(\"Input next question:\")\n try:\n question = Solver.input()\n except Exception as e:\n traceback.print_tb(e.__traceback__)\n continue\n else:\n question = Solver.generate()\n questions.append(question)\n\n # Send Questions\n for question in questions:\n # Register Question\n customer_lock.acquire()\n try:\n customer_int.register_question(question)\n #print(\"registed question\")\n except Exception as e:\n traceback.print_tb(e.__traceback__)\n print(e)\n finally:\n customer_lock.release() \n\n # Announce Question to Provider(?)\n if only_appeals:\n print(\"Generated question secretly:\", Coder.str_question(question))\n else:\n customer_lock.acquire()\n try:\n hashes, unanswered, signature = customer_int.get_signed_hashes()\n finally:\n customer_lock.release() \n msg = {\n \"type\": \"new_question\", \n \"question\": Coder.encoded_to_stream(question),\n \"hashes\": bytes_to_str(hashes),\n \"unanswered\": unanswered,\n \"signature\": bytes_to_str(signature)}\n lock.acquire()\n try:\n cmd_list.insert_command(msg)\n finally:\n lock.release()\n print(\"Generated and sent question:\", Coder.str_question(question))\n\n # Wait for Answer from Provider\n t = 0\n T = 50\n if only_appeals:\n T = 1\n appealed_block = 0\n answer = None\n ask_again = True\n printed_waiting = False\n while True:\n #print(\".\")\n # Break if inactive\n customer_lock.acquire()\n try:\n active = customer_int.is_subscription_active()\n finally:\n customer_lock.release() \n if not active:\n print(\"Subscription ended, closing main thread.\")\n break\n\n t += 1\n time.sleep(0.1)\n\n answer = None\n customer_lock.acquire()\n try:\n answer = customer_int.get_answer(question)\n finally:\n customer_lock.release() \n if answer is not None:\n print(\"Got answer from demand!\")\n break\n\n # Appeal Question\n if t == T:\n print(\"Appealing question - took too long for provider to respond.\")\n customer_lock.acquire()\n try:\n customer_int.appeal(question)\n finally:\n customer_lock.release() \n appealed_block = w3.eth.blockNumber\n printed_waiting = False\n\n # Check if Appeal Resolved\n if t > T:\n customer_lock.acquire()\n try:\n answer = customer_int.check_appeal()\n finally:\n customer_lock.release() \n \n if answer is not None:\n customer_lock.acquire()\n try:\n customer_int.register_answer(question, answer)\n finally:\n customer_lock.release()\n print(\"Appeal resolved by provider!\")\n break \n \n if only_appeals:\n print(\"Only appeals\")\n continue\n \n # Ask for Answers from Provider\n if ask_again:\n #print(\"asking again\")\n msg = {\"type\": \"send_answer\"}\n lock.acquire()\n try:\n cmd_list.insert_command(msg)\n finally:\n lock.release()\n ask_again = False\n\n # Receive Answers\n\n msg = None\n lock.acquire()\n try:\n msg = cmd_list.get_last_input()\n finally:\n lock.release()\n if msg is not None:\n ask_again = True\n else:\n continue\n answers_ = []\n questions_ = []\n answers_stream = []\n questions_stream = []\n if \"type\" in msg and msg[\"type\"] == \"answer\" and \"answers\" in msg and \"questions\" in msg:\n answers_stream = msg[\"answers\"]\n questions_stream = msg[\"questions\"]\n\n for i in range(min(len(answers_stream), len(questions_stream))):\n answer_ = Coder.stream_to_encoded(answers_stream[i])\n question_ = Coder.stream_to_encoded(questions_stream[i])\n answers_.append(answer_)\n questions_.append(question_)\n customer_lock.acquire()\n try:\n customer_int.register_answer(question_, answer_)\n finally:\n customer_lock.release()\n\n # Send Ack for Answers\n if sending_ack:\n # sign all answers submitted by provider\n #questions, answers = customer_int.get_all_answers()\n customer_lock.acquire()\n try:\n hashes, unanswered, signature = customer_int.get_signed_hashes()\n finally:\n customer_lock.release()\n \n msg = {\n \"type\": \"ack\",\n \"hashes\": bytes_to_str(hashes),\n \"unanswered\": unanswered,\n \"signature\": bytes_to_str(signature)\n }\n lock.acquire()\n try:\n cmd_list.insert_command(msg)\n finally:\n lock.release()\n print(\"Sent ack for all answers\")\n\n if question not in questions_:\n if not printed_waiting:\n print(\"question not answered - waiting...\")\n printed_waiting = True\n time.sleep(0.1)\n continue\n\n print(\"Received answer from provider.\")\n\n got_correct = False\n for i in range(len(questions_)):\n if questions_[i] == question:\n answer = answers_[i]\n ret = False\n customer_lock.acquire()\n try:\n ret = customer_int.customer.validator.is_answer_correct(question, answer)\n finally:\n customer_lock.release()\n if not ret:\n if t < T:\n print(\"Answer incorrect!\")\n t = T-1\n else:\n got_correct = True\n if got_correct:\n break\n if answer is not None:\n print(\"Got answer: \", Coder.str_answer(answer))\n answers.append(answer)\n else:\n print(\"Got no answer.\")\n customer_lock.acquire()\n try:\n active = customer_int.is_subscription_active()\n finally:\n customer_lock.release() \n if not active:\n break\n if auto_file:\n file = open('./FilesReceived/' + filename, 'wb')\n for answer in answers:\n answer = Coder.decode_answer(answer)\n print(answer[1])\n file.write(answer[1])\n file.close()\n print(\"Saved file to ./FilesReceived/\" + filename)\n if len(answers) < len(questions):\n print(\"File saved is partial - not all answers recevied.\")\n\n# Resolve Demands\ndef auto_customer_background(customer_int, customer_lock):\n while True:\n active = False\n customer_lock.acquire()\n try:\n active = customer_int.is_subscription_active()\n finally:\n customer_lock.release() \n if not active:\n print(\"Subscription ended, closing background thread.\")\n break\n time.sleep(0.2)\n customer_lock.acquire()\n try:\n ret = customer_int.resolve_demand()\n finally:\n customer_lock.release() \n if not (ret is None):\n print(\"Resolved demand for: \")\n question, answer = ret\n print(Coder.str_question(question), \"->\", Coder.str_answer(answer))\n customer_lock.acquire()\n try:\n amount = customer_int.withdraw()\n finally:\n customer_lock.release()\n print(\"Withdrew funds:\", amount)\n return\n\n# Generate Queries for all chunks of a File\ndef generate_file_questions(customer_int, customer_lock):\n filename = input(\"File name:\")\n customer_lock.acquire()\n try:\n chunks = customer_int.customer.validator.contract.functions.get_chunks_num(filename).call()\n finally:\n customer_lock.release()\n questions = []\n for x in range(chunks):\n question = Coder.encode_question([filename, x])\n questions.append(question)\n return filename, questions\n\n\nif __name__ == '__main__':\n #print(sys.argv)\n #print(len(sys.argv))\n if(len(sys.argv) < 2):\n print(\"USAGE: address [port]\")\n sys.exit()\n address = sys.argv[1]\n port = PORT\n if(len(sys.argv) > 2):\n port = int(sys.argv[2])\n from main import HOST\n init_customer(address, HOST, port)\n\n\n \n\n \n\n\n\n","sub_path":"Chunked Files Demo/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":24166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"459850164","text":"import cv2\n\nimg = cv2.imread('futbol.jpg')\n\n\n\nprint(img[80,80]) #80-80 pikseldeki bgr değerini gösterir\nimg[80,80]=[0,0,0] #80-80 pikseldeki noktaya rgb değerine göre bir renk koyar\n\nbolge = img[30:120 , 100:200] #bölge ayırma\nimg[30:120, 100:200] = [0,0,0] # belirli bölgenin içini siyaha boyar\ncv2.rectangle(img,(100,30),(200,120),(0,100,175),3) #bölgeyi çerçeve içine almak. (çerçeve rengi, çerçeve kalınlığı)\n\ncv2.imshow('Futbol Topu' , img) #görseli gösterir\ncv2.imshow('Futbol Topu 2' , bolge) #sadece bölgeyi gösterir\n\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"resimde_aritmetik_islemler/aritmetik_islemler_1.py","file_name":"aritmetik_islemler_1.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"291273176","text":"import numpy as np\nimport pandas as pd\nimport os\nfrom datetime import datetime\nimport requests\nimport json\n\nCOUNTER = 0\n\ndef add_or_append(dictionary, key, value):\n if key not in dictionary:\n dictionary[key] = []\n dictionary[key].append(value)\n\n\ndef days_between(d1, d2):\n d1 = datetime.strptime(d1, \"%Y-%m-%d\")\n d2 = datetime.strptime(d2, \"%Y-%m-%d\")\n return abs((d2 - d1).days)\n\n\ndef company_in_hubspot(domain):\n global COUNTER\n print('Doing hubspot lookup: %i'%COUNTER)\n COUNTER +=1\n company_name = None\n\n main_api = \"https://api.hubapi.com/companies/v2/companies/domain/\" + domain + \"?hapikey=278855c0-ac23-4228-bd34-4f4b554512b2\"\n # TODO get proper API, call\n try:\n json_data = requests.get(main_api).json()\n except:\n return company_name\n\n if 'status' not in json_data or json_data['status'] != 'error':\n company_name = json_data[0]['properties']['name']['value']\n\n return company_name\n\n\ndef average_days(dict_company):\n days_list = []\n for comp in dict_company:\n days = dict_company[comp][\"day difference between visits\"]\n if dict_company[comp][\"Visit\"] == 1:\n pass\n elif dict_company[comp][\"Visit\"] == 2:\n days_list.append(days)\n elif dict_company[comp][\"Visit\"] == 3:\n days_list.append(days[0])\n days_list.append(days[1])\n\n average_days = format(sum(days_list) / float(len(days_list)), '.2f')\n\n return float(average_days)\n\n\ndef make_analytics(csv_df):\n # Check the Data set dimensions. How many rows an columns?\n print(\"The Data set dimensions: (rows, columns):\", csv_df.shape)\n amount_companys = csv_df.shape[0]\n\n #print('***')\n # How often companies occur?\n #print(csv_df[\"Company Name\"].value_counts())\n #print(\"***\")\n\n company_list = csv_df[\"Company Name\"].unique().tolist()\n\n dict_visit_of_each_company = csv_df[\"Company Name\"].value_counts().to_dict()\n dict_amount_with_companies = {}\n amount_of_visit = set(csv_df[\"Company Name\"].value_counts())\n\n dict_company = {}\n for i in company_list:\n dict_company[i] = {}\n\n # How often visited?\n dict_company[i][\"Visit\"] = dict_visit_of_each_company[i]\n occurance = dict_company[i][\"Visit\"]\n # How many days in between?\n if occurance == 1:\n dict_company[i][\"day difference between visits\"] = 0\n\n dict_company[i][\"Company Domain\"] = csv_df[\"Company Domain\"][csv_df[\"Company Name\"] == i]\n\n elif occurance == 2:\n\n days_series = csv_df.Timestamp[csv_df[\"Company Name\"] == i]\n index_list = days_series.index.values.tolist()\n\n d1 = days_series[index_list[0]]\n d2 = days_series[index_list[1]]\n\n dict_company[i][\"day difference between visits\"] = days_between(d1, d2)\n dict_company[i][\"Company Domain\"] = csv_df[\"Company Domain\"][csv_df[\"Company Name\"] == i].iloc[0]\n\n elif occurance == 3:\n\n days_series = csv_df.Timestamp[csv_df[\"Company Name\"] == i]\n index_list = days_series.index.values.tolist()\n\n d1 = days_series[index_list[0]]\n d2 = days_series[index_list[1]]\n d3 = days_series[index_list[2]]\n\n diff1 = days_between(d1, d2)\n diff2 = days_between(d2, d3)\n\n dict_company[i][\"day difference between visits\"] = [diff1, diff2]\n\n dict_company[i][\"Company Domain\"] = csv_df[\"Company Domain\"][csv_df[\"Company Name\"] == i].iloc[0]\n\n add_or_append(dict_amount_with_companies, occurance, i)\n\n # Alexa rank?\n dict_company[i][\"Alexa Rank\"] = csv_df[\"Alexa Rank\"][csv_df[\"Company Name\"] == i].iloc[0]\n\n domain = csv_df[\"Company Domain\"][csv_df[\"Company Name\"] == i].iloc[0]\n dict_company[i][\"Hubspot Name\"] = company_in_hubspot(domain)\n\n return amount_companys, dict_company, dict_amount_with_companies, company_list\n\n\ndef read_in_csv(input_path):\n \"\"\"\n Reads in a csv file\n :param input_path: str\n path to input csv\n :return: pandas df\n pandas dataframe lying at input_path\n \"\"\"\n if not os.path.exists(input_path) or not \".csv\" in input_path:\n raise RuntimeError(\"Not a valid path to a csv file given\")\n csv_df = pd.read_csv(input_path, encoding='utf-8', converters={i: str for i in range(1000)}, sep=\",\")\n\n for i in range(len(csv_df.index)):\n # Define variables for iterating\n row = csv_df.iloc[i]\n row[\"Timestamp\"] = row[\"Timestamp\"][:10]\n\n return csv_df\n\n\ndef main(input_path_1, input_path_2, output_path):\n \"\"\"\n This function splits a csv file into a cleaned one and an exception one in a form almost ready to be uploaded to Hubspo (only email is missing.\n Then it assigns the persona to each contact.\n\n :param input_path: str\n path to my input csv file\n :param output_path: str\n path to output folder where results are saved to\n :return: None\n \"\"\"\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # read in csv file\n csv_df_1 = read_in_csv(input_path_1)\n csv_df_2 = read_in_csv(input_path_2)\n\n amount_companys_1, dict_company_1, dict_amount_visits_with_companies_1, company_list_1 = make_analytics(csv_df_1)\n amount_companys_2, dict_company_2, dict_amount_visits_with_companies_2, company_list_2 = make_analytics(csv_df_2)\n\n columns_of_sheet = [\"Week 1 (25 ^Jan-1Feb)\", \"Week 2 (4-11Feb)\", \"Conclusions\"]\n index_of_sheet = [\"Amount Of Unique Companies\", \"'Best' Companies Due To Alexa Ranking\",\n \"Average Days Between Visits\", \"Maximum Visits\", \"Companies With (This) Maximum Visit\"]\n result_df = pd.DataFrame()\n\n result_df.at[index_of_sheet[0], columns_of_sheet[0]] = amount_companys_1\n result_df.at[index_of_sheet[0], columns_of_sheet[1]] = amount_companys_2\n\n new_in_scnd_week = []\n for company in company_list_2:\n if company not in company_list_1:\n new_in_scnd_week.append(company)\n\n amount_new_in_scnd_week = len(new_in_scnd_week)\n\n result_df.at[index_of_sheet[0], columns_of_sheet[2]] = \"Number of new companies in 2nd week: \" + str(\n amount_new_in_scnd_week)\n # result_df.at[index_of_sheet[1], columns_of_sheet[1]] =\n\n result_df.at[index_of_sheet[2], columns_of_sheet[0]] = average_days(dict_company_1)\n result_df.at[index_of_sheet[2], columns_of_sheet[1]] = average_days(dict_company_2)\n average_both = (average_days(dict_company_1) + average_days(dict_company_2)) / float(2)\n result_df.at[index_of_sheet[2], columns_of_sheet[2]] = \"Average in both weeks: \" + str(average_both)\n\n max_visit_comp_1 = max(dict_amount_visits_with_companies_1)\n max_visit_comp_2 = max(dict_amount_visits_with_companies_2)\n\n max_value_1 = dict_amount_visits_with_companies_1[max_visit_comp_1]\n max_value_2 = dict_amount_visits_with_companies_2[max_visit_comp_2]\n\n dic_append = {}\n values = [[max_visit_comp_1, max_value_1], [max_visit_comp_2, max_value_2] , ['','']]\n for i, k in enumerate(result_df.keys()):\n dic_append[k] = values[i]\n\n df_append = pd.DataFrame(dic_append, index =['Maximum Visits', 'Maximum Visiting Companies'])\n\n result_df = result_df.append(df_append)\n\n dic_append = {}\n values = [np.sum([1 for i in dict_company_1 if dict_company_1[i]['Hubspot Name'] is None]),\n np.sum([1 for i in dict_company_2 if dict_company_2[i]['Hubspot Name'] is None]),\n '']\n for i, k in enumerate(result_df.keys()):\n dic_append[k] = values[i]\n df_append = pd.DataFrame(dic_append, index=['Number of Companies not in Hubspot'])\n\n result_df = result_df.append(df_append)\n\n overview_df_1 = pd.DataFrame.from_dict(dict_company_1, orient='index')\n overview_df_1 = overview_df_1.sort_values(by=['Visit'], ascending=False)\n overview_df_2 = pd.DataFrame.from_dict(dict_company_2, orient='index')\n overview_df_2 = overview_df_2.sort_values(by=['Visit'], ascending=False)\n\n result_df.to_csv(os.path.join(output_path, \"Result.csv\"))\n overview_df_1.to_csv(os.path.join(output_path, \"About_week_1.csv\"))\n overview_df_2.to_csv(os.path.join(output_path, \"About_week_2.csv\"))\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n\n \"\"\"\n Every parameter which has to be handed in manually must be defined here and edited\n in the configuration settings as well\n\n Per Default: Input path, Output path, market, language, plan (Agency or Enterprise), contact_owner and hunter_request\n \"\"\"\n\n parser.add_argument(\"--input_csv_path_1\", help=\"Path to the CSV file to be used as input.\", default='./VisitorReport(25Jan-1Feb).csv',\n type=str)\n parser.add_argument(\"--input_csv_path_2\", help=\"Path to the CSV file to be used as input.\", default = 'VisitorReport(4-11Feb).csv',\n type=str)\n parser.add_argument(\"--output_path\", help=\"Folder where result csv files are saved.\", default = './', type=str)\n\n args = parser.parse_args()\n\n main(args.input_csv_path_1, args.input_csv_path_2, args.output_path)\n print(\"The files are in:\", args.output_path)\n\n\n\n\n","sub_path":"WebsiteAnalytics2.py","file_name":"WebsiteAnalytics2.py","file_ext":"py","file_size_in_byte":9229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"393387841","text":"import tensorflow as tf\r\ngpus = tf.config.experimental.list_physical_devices('GPU')\r\ntf.config.experimental.set_memory_growth(gpus[0], True)\r\n\r\nfrom Data import Data\r\nfrom Model import Model\r\nfrom Train import Train\r\nfrom Test import Test\r\nfrom Utils import Drawer\r\n\r\n\r\ndef train(train_gen, valid_gen, df_train, df_val, batch_size, target_size):\r\n epochs = 150\r\n Drawer.draw_data_samples(df_train)\r\n unet_model = Model.create_model(input_size=target_size + (3,))\r\n Train.run_train(unet_model, train_gen=train_gen, valid_gen=valid_gen, batch_size=batch_size,\r\n df_train=df_train, df_val=df_val, epochs=epochs)\r\n\r\n\r\ndef test(test_gen, model_path):\r\n test_result = Test.run_test(test_gen, model_path, 10)\r\n Drawer.draw_test_result(test_result)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n target_size = (256, 256)\r\n batch_size = 2 # tune depends on graphic card memory card\r\n path = r\"D:\\Dataset\\MRI\\lgg-mri-segmentation\\kaggle_3m\"\r\n train_gen, valid_gen, test_gen, df_train, df_val = Data.get_train_generator(batch_size=batch_size,\r\n target_size=target_size, path=path)\r\n\r\n mode = \"train\"\r\n\r\n if mode is \"train\":\r\n train(train_gen, valid_gen, df_train, df_val, batch_size, target_size)\r\n else:\r\n test(test_gen, \"./unet_brain_mri_seg2.hdf5\")\r\n\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"13333344","text":"class Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n \n prevSums = {0:1}\n runningSum = 0\n count = 0\n \n for i in range(len(nums)):\n runningSum += nums[i]\n prevSumToFind = runningSum - k\n \n if prevSumToFind in prevSums:\n count += prevSums[prevSumToFind]\n \n prevSums[runningSum] = prevSums.get(runningSum, 0) + 1\n \n return count\n","sub_path":"Power25/6-subarray-sum-equals-k.py","file_name":"6-subarray-sum-equals-k.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"155793059","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import print_function\r\nfrom flask import Flask, session, redirect, url_for, request, render_template, jsonify\r\nfrom collections import OrderedDict\r\n\r\n#from flask_mqtt import Mqtt\r\nimport pymysql\r\nimport datetime\r\nimport subprocess\r\nimport json\r\n\r\n\r\nimport paho.mqtt.client as mqtt\r\nimport mysql.connector\r\ncount = 0\r\nice = 0\r\nweather = \"weather\"\r\nval_distance =0\r\nval_speed_avg =0\r\nval_heartbeat = 0\r\ncnt = 0\r\nroro = \"s1\"\r\nMQTT_SERVER = \"test.mosquitto.org\"\r\nMQTT_PATH = \"hoo/#\"\r\ndef on_connect(client, userdata, flags, rc):\r\n print(\"connect result\" + str(rc))\r\n client.subscribe(MQTT_PATH)\r\n\r\ndef on_message(client, userdata, msg):\r\n global count\r\n global roro\r\n global ice\r\n global cnt\r\n global val_distance\r\n global val_speed_avg\r\n global val_heartbeat\r\n if count<36:\r\n # print(msg.topic + \" \" + str(msg.payload))\r\n\r\n\r\n ## 2018.05.22(화)\r\n ## 평균속도 INSERT in DB\r\n if (msg.topic == \"hoo/avg\"):\r\n val_speed_avg = float(str(msg.payload.decode(\"utf-8\")))\r\n\r\n # print(\"speed : \", val_speed)\r\n # sql0 = \"SELECT * FROM speedtest order by id desc limit 1\"\r\n\r\n # cursor.execute(sql0)\r\n # data = cursor.fetchall()\r\n conn = mysql.connector.connect(user=\"raspberrypi\", password=\"raspberrypi\",\r\n host=\"raspberrydb.cvlmaax7vr80.ap-northeast-2.rds.amazonaws.com\",\r\n database=\"raspberrypi\", port=3306)\r\n\r\n cursor = conn.cursor(buffered=True)\r\n query = \"SELECT nickname, you_nickname FROM customer ORDER BY id DESC LIMIT 1\"\r\n cursor.execute(query)\r\n rows = cursor.fetchall()\r\n for row in rows:\r\n row1 = row[0]\r\n row2 = row[1]\r\n # if row1:\r\n # sqlsql = \"INSERT INTO speed3(%s) WHERE nickname = %s VALUES(%s)\"\r\n cursor.execute(\"UPDATE speed3 SET {} = {} WHERE nickname = '{}'\".format(roro, val_speed_avg, row1))\r\n # print(roro, row1, val_)\r\n # cursor.execute(sqlsql,(row1,val_speed_avg))\r\n count += 1\r\n conn.commit()\r\n\r\n\r\n\r\n if row2:\r\n cursor.execute(\"SELECT {} FROM speed3 where nickname = '{}'\".format(roro, row2))\r\n # cursor.execute(query1)\r\n row3 = cursor.fetchall()\r\n # row = row3[0]\r\n # if(row3):\r\n # print(row3)\r\n # else:\r\n # print(\"no\")\r\n # apple = []\r\n # apple.append(row[0])\r\n # for index in range(len(row3)):\r\n # apple.append(row3[index])\r\n # print(cnt)\r\n\r\n row = float(str(row3[0][0]))\r\n # row = apple(0)\r\n # row = row3[cnt]\r\n\r\n roro2 = int(roro[1])\r\n roro2 += 1\r\n roro = roro[0] + str(roro2)\r\n\r\n # for row in list:\r\n #\r\n # if row[cnt] != None:\r\n # row = row[cnt]\r\n\r\n if (val_speed_avg - float(row)) > 50.0:\r\n ice = 5\r\n elif (val_speed_avg - float(row)) > 40.0:\r\n ice = 4\r\n elif (val_speed_avg - float(row)) > 30.0:\r\n ice = 3\r\n elif (val_speed_avg - float(row)) > 20.0:\r\n ice = 2\r\n elif (val_speed_avg - float(row)) > 10.0:\r\n ice = 1\r\n elif (val_speed_avg - float(row)) > 0.0:\r\n ice = 0\r\n elif (val_speed_avg - float(row)) > -10.0:\r\n ice = -1\r\n elif (val_speed_avg - float(row)) > -20.0:\r\n ice = -2\r\n elif (val_speed_avg - float(row)) > -30.0:\r\n ice = -3\r\n elif (val_speed_avg - float(row)) > -40.0:\r\n ice = -4\r\n elif (val_speed_avg - float(row)) > -50.0:\r\n ice = -5\r\n # cnt+=1\r\n\r\n print(ice)\r\n if (msg.topic == \"hoo/distance\"):\r\n val_distance = float(str(msg.payload.decode(utf-8)))\r\n print(\"val_distance : {}\".format(val_distance))\r\n if (msg.topic == \"hoo/heartbeat\"):\r\n val_heartbeat = float(str(msg.payload.decode(utf-8)))\r\n print(\"heartbeat : {}\".format(val_heartbeat))\r\n else:\r\n roro = \"s1\"\r\n print(\"close\")\r\n\r\n## 2018.05.16(수)\r\n## RDS 접속 확인\r\n\r\n\r\n\r\n # sql = \"INSERT INTO speedtest(speed) VALUE(%s)\"\r\n\r\n # cursor.execute(sql,data)\r\n # cursor.execute(\"\"\"INSERT INTO speedtest (speed) VALUES (%f)\"\"\" % val_speed)\r\n # cursor.execute(\"\"\"INSERT INTO speedtest WHERE rownum1 ORDER BY your_auto_increment_field DESC\"\"\" % val_speed)\r\n # conn.commit()\r\n\r\n\r\n# conn.close()\r\n\r\n\r\napp = Flask(__name__)\r\n#mqtt = Mqtt(app)\r\n# subprocess.Popen(\r\n# [\"C:/Program Files/Anaconda3/python/bike/mqtt_Test2.py\"],\r\n# stdout=subprocess.PIPE\r\n# )\r\n@app.route('/back')\r\ndef back() :\r\n return render_template('back.html')\r\n\r\n@app.route('/user')\r\ndef showUserName():\r\n return render_template('user.html',\r\n myteam = session['myteam'],\r\n name = session['userName'],\r\n gender = session['gender'],\r\n age = session['age'],\r\n competitor = session['competitor']\r\n )\r\n\r\n\r\n@app.route('/')\r\ndef Mode() :\r\n return render_template('Mode.html')\r\n\r\n@app.route('/resister', methods=['POST' ,'GET'])\r\ndef resister():\r\n if request.method =='POST':\r\n if request.form[\"Mode\"]==\"one\":\r\n return render_template('login_one.html')\r\n else:\r\n return render_template('login_two.html')\r\n\r\n@app.route('/loginone', methods=['POST','GET'])\r\ndef loginone():\r\n global count\r\n global cnt\r\n global weather\r\n if request.form['play'] == 'back':\r\n return redirect(url_for('Mode'))\r\n else:\r\n\r\n if request.method == 'POST':\r\n session['userName'] = request.form['userName']\r\n session['gender'] = request.form['gender']\r\n session['age'] = request.form['age']\r\n session['myteam'] = request.form['myteam']\r\n session['competitor'] = request.form['competitor']\r\n # weather = request.form['weather']\r\n\r\n try:\r\n conn = pymysql.connect(host='raspberrydb.cvlmaax7vr80.ap-northeast-2.rds.amazonaws.com',\r\n user='raspberrypi',\r\n password='raspberrypi',\r\n db='raspberrypi',\r\n charset='utf8mb4'\r\n )\r\n curs = conn.cursor()\r\n sql0 = \"SELECT id FROM customer WHERE nickname=%s\"\r\n curs.execute(sql0,request.form['competitor'])\r\n conn.commit()\r\n\r\n data = curs.fetchall()\r\n\r\n for row in data:\r\n data = row[0]\r\n if data:\r\n sql=\"INSERT INTO customer(nickname,age,gender,myteam,yourteam,you_nickname) VALUE(%s,%s,%s,%s,%s,%s)\"\r\n curs.execute(sql, (\r\n session['userName'],\r\n session['age'],\r\n session['gender'],\r\n session['myteam'],\r\n 'yourteam',\r\n session['competitor']\r\n )\r\n )\r\n\r\n\r\n ##sql2 = \"ALTER TABLE ADD speedtest abcde VARCHAR(100)\"\r\n # sql2 = \"INSERT INTO speed3(nickname) VALUES({}) \".format(session['userName'])\r\n sql2 = \"INSERT INTO speed3(nickname) VALUES(%s)\"\r\n curs.execute(sql2, session['userName'])\r\n conn.commit()\r\n\r\n count =0\r\n cnt = 3\r\n else:\r\n\r\n return redirect(url_for('back'))\r\n conn.close()\r\n except:\r\n return redirect(url_for('back'))\r\n return redirect(url_for('showUserName'))\r\n else:\r\n return 'login failed'\r\n\r\n@app.route('/logintwo', methods=['POST','GET'])\r\ndef logintwo():\r\n global count\r\n global cnt\r\n global weather\r\n if request.form['play'] == 'back':\r\n return redirect(url_for('Mode'))\r\n else:\r\n\r\n if request.method == 'POST':\r\n session['userName'] = request.form['userName']\r\n session['gender'] = request.form['gender']\r\n session['age'] = request.form['age']\r\n session['myteam'] = request.form['myteam']\r\n session['competitor'] = request.form['competitor']\r\n # weather = request.form['weather']\r\n try:\r\n conn = pymysql.connect(host='raspberrydb.cvlmaax7vr80.ap-northeast-2.rds.amazonaws.com',\r\n user='raspberrypi',\r\n password='raspberrypi',\r\n db='raspberrypi',\r\n charset='utf8mb4'\r\n )\r\n curs = conn.cursor()\r\n sql0 = \"SELECT id FROM customer WHERE myteam=%s\"\r\n curs.execute(sql0,request.form['competitor'])\r\n data = curs.fetchall()\r\n\r\n for row in data:\r\n data = row[0]\r\n if data:\r\n sql=\"INSERT INTO customer(nickname,age,gender,myteam,yourteam,you_nickname) VALUE(%s,%s,%s,%s,%s,%s)\"\r\n curs.execute(sql, (\r\n session['userName'],\r\n session['age'],\r\n session['gender'],\r\n session['myteam'],\r\n session['competitor'],\r\n 'yournickname'\r\n )\r\n )\r\n ##sql2 = \"ALTER TABLE ADD speedtest abcde VARCHAR(100)\"\r\n # sql2 = \"INSERT INTO speed3 (nickname) VALUE ({}) \".format(session['userName'])\r\n # curs.execute(sql2)\r\n sql2 = \"INSERT INTO speed3(nickname) VALUES(%s)\"\r\n curs.execute(sql2, session['userName'])\r\n conn.commit()\r\n count = 0\r\n cnt = 3\r\n else:\r\n\r\n return redirect(url_for('back'))\r\n conn.close()\r\n except:\r\n return redirect(url_for('back'))\r\n return redirect(url_for('showUserName'))\r\n else:\r\n return 'login failed'\r\n\r\n@app.route('/start', methods=['POST' ,'GET'])\r\ndef start():\r\n print(\"check!!\")\r\n # if request.method == 'POST':\r\n # if request.form['val'] == 'start':\r\n # cnt = 0\r\n # while cnt<20:\r\n global count\r\n global ice\r\n global val_distance\r\n # global val_heartbeat, val_distance, ice, count, weather\r\n global val_heartbeat\r\n client = mqtt.Client()\r\n\r\n client.on_connect = on_connect\r\n client.on_message = on_message\r\n client.connect(MQTT_SERVER, 1883, 60)\r\n client.loop_start()\r\n\r\n # print(val_speed_avg)\r\n sensor = {}\r\n sensor[\"position\"] = ice # 위치값\r\n sensor[\"distance\"] = val_distance\r\n sensor[\"speed\"] = val_speed_avg\r\n sensor[\"heartbeat\"] = val_heartbeat\r\n # sensor[\"weather\"] = weather\r\n if count==36:\r\n sensor[\"count\"] = \"stop\"\r\n data = json.dumps(sensor)\r\n # print(data)\r\n\r\n # cnt+=1\r\n # print(cnt)\r\n # client.loop_stop()\r\n # return render_template(\"start.html\", candy = data)\r\n # return(redirect(url_for('showUserName')),json.dumps(sensor, ensure_ascii = False, indent =\"\\t\"))\r\n return render_template('user.html',\r\n myteam=session['myteam'],\r\n name=session['userName'],\r\n gender=session['gender'],\r\n age=session['age'],\r\n competitor=session['competitor']\r\n ,string=data)\r\n\r\n\r\n\r\napp.secret_key = 'abcdefgadsjflkjsdljjdlsjfkja'\r\n\r\nif __name__ == \"__main__\":\r\n app.run()\r\n\r\n## host='0.0.0.0', port=5002, debug=True\r\n","sub_path":"testtest2.py","file_name":"testtest2.py","file_ext":"py","file_size_in_byte":12735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"67463894","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport pytest\n\nfrom poetry.packages import Package\n\n\ndef test_package_authors():\n package = Package(\"foo\", \"0.1.0\")\n\n package.authors.append(\"Sébastien Eustace \")\n assert package.author_name == \"Sébastien Eustace\"\n assert package.author_email == \"sebastien@eustace.io\"\n\n package.authors.insert(\n 0, \"Raphaël Yancey \"\n ) # With combining diacritics (ë = e + ¨ = e\\u0308)\n assert package.author_name == \"Raphaël Yancey\" # Is normalized into \\u00EB\n assert package.author_email == \"raphael@badfile.net\"\n\n package.authors.insert(\n 0, \"Raphaël Yancey \"\n ) # Without (ë = \\u00EB)\n assert package.author_name == \"Raphaël Yancey\"\n assert package.author_email == \"raphael@badfile.net\"\n\n package.authors.insert(0, \"John Doe\")\n assert package.author_name == \"John Doe\"\n assert package.author_email is None\n\n\n@pytest.mark.parametrize(\"category\", [\"main\", \"dev\"])\ndef test_package_add_dependency_vcs_category(category):\n package = Package(\"foo\", \"0.1.0\")\n\n dependency = package.add_dependency(\n \"poetry\",\n constraint={\"git\": \"https://github.com/python-poetry/poetry.git\"},\n category=category,\n )\n assert dependency.category == category\n\n\ndef test_package_add_dependency_vcs_category_default_main():\n package = Package(\"foo\", \"0.1.0\")\n\n dependency = package.add_dependency(\n \"poetry\", constraint={\"git\": \"https://github.com/python-poetry/poetry.git\"}\n )\n assert dependency.category == \"main\"\n\n\n@pytest.mark.parametrize(\"category\", [\"main\", \"dev\"])\n@pytest.mark.parametrize(\"optional\", [True, False])\ndef test_package_url_category_optional(category, optional):\n package = Package(\"foo\", \"0.1.0\")\n\n dependency = package.add_dependency(\n \"poetry\",\n constraint={\n \"url\": \"https://github.com/python-poetry/poetry/releases/download/1.0.5/poetry-1.0.5-linux.tar.gz\",\n \"optional\": optional,\n },\n category=category,\n )\n assert dependency.category == category\n assert dependency.is_optional() == optional\n","sub_path":"tests/packages/test_package.py","file_name":"test_package.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"191359679","text":"#!/usr/bin/env python3\n\nimport argparse, os, sys\n\nimport check\nfrom utils import colour\n\ndef checkSingleLine(line):\n check.doubleDollars(line)\n check.tooLong(line)\n check.trailingWhitespace(line)\n\ndef checkMarkdownFile(file):\n ok = True\n\n try:\n check.encoding(file.name)\n except check.EncodingError as e:\n print(\"File {name} is not valid: {message}\".format(\n name = colour(file.name, 'name'),\n message = colour(e.message, 'error'),\n ))\n return False\n\n for number, line in enumerate(file):\n try:\n checkSingleLine(line)\n except check.SingleLineError as e:\n print(\"File {name} line {num}: {message}\".format(\n name = colour(file.name, 'name'),\n message = colour(e.message, 'error'),\n num = colour(number, 'no')\n ))\n print(colour(line, 'no', 'hv'), end = '')\n print('-' * (e.column - 2) + '^')\n ok = False\n\n return ok\n\ndef main():\n parser = argparse.ArgumentParser(\n description = \"DeGeŠ Markdown style checker\",\n )\n parser.add_argument('infiles', nargs = '+', type = argparse.FileType('r'), default = [sys.stdin])\n parser.add_argument('-v', '--verbose', action = 'store_true')\n args = parser.parse_args()\n \n for filename in args.infiles:\n if checkMarkdownFile(filename):\n if args.verbose:\n print(\"File {name} {ok}\".format(\n name = colour(filename.name, 'name'),\n ok = colour('OK', 'ok'),\n ))\n\nmain()\n","sub_path":"dgs-mdcheck.py","file_name":"dgs-mdcheck.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"106467227","text":"# ##### BEGIN GPL LICENSE BLOCK #####\r\n#\r\n# This program is free software; you can redistribute it and/or\r\n# modify it under the terms of the GNU General Public License\r\n# as published by the Free Software Foundation; either version 2\r\n# of the License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software Foundation,\r\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\r\n#\r\n# ##### END GPL LICENSE BLOCK #####\r\n\r\nbl_info = {\r\n \"name\": \"Rotate around active and cursor\",\r\n \"author\": \"1COD\",\r\n \"version\": (1, 2, 0),\r\n \"blender\": (2, 83, 0),\r\n \"location\": \"View3D\",\r\n \"description\": \"activ object as new space referential. ctrl+. numpad\", \r\n \"warning\": \"\",\r\n \"wiki_url\": \"\",\r\n \"category\": \"3D View\"\r\n}\r\n\r\nimport bpy\r\nfrom mathutils import Matrix, Euler, Vector\r\nfrom bpy.types import Operator, Panel\r\nfrom bpy.props import FloatProperty, BoolProperty\r\nfrom bpy_extras import view3d_utils\r\n\r\ndef remove_prop (self,context): \r\n \r\n del bpy.types.Scene.rot_x \r\n del bpy.types.Scene.rot_y \r\n del bpy.types.Scene.rot_z \r\n del bpy.types.Scene.loc_x \r\n del bpy.types.Scene.loc_y\r\n del bpy.types.Scene.loc_z\r\n del bpy.types.Scene.cur_rot_x\r\n del bpy.types.Scene.cur_rot_y\r\n del bpy.types.Scene.cur_rot_z\r\n del bpy.types.Scene.cur_loc_x\r\n del bpy.types.Scene.cur_loc_y\r\n del bpy.types.Scene.cur_loc_z\r\n \r\n\r\nScn = bpy.types.Scene\r\nScn.whole_scene=BoolProperty(default=False)\r\nScn.around_cursor=BoolProperty(default=False)\r\nON=False\r\nr=0\r\n\r\ndef loc_rot_props(self,context):\r\n \r\n Scn = bpy.types.Scene \r\n context = bpy.context \r\n scn = context.scene \r\n cao=context.active_object\r\n\r\n Scn.rot_x=FloatProperty() #for active object\r\n Scn.rot_y=FloatProperty() \r\n Scn.rot_z=FloatProperty() \r\n Scn.loc_x=FloatProperty() \r\n Scn.loc_y=FloatProperty() \r\n Scn.loc_z=FloatProperty() \r\n \r\n scn.rot_x = cao.rotation_euler.x\r\n scn.rot_y = cao.rotation_euler.y\r\n scn.rot_z = cao.rotation_euler.z \r\n scn.loc_x = cao.location.x\r\n scn.loc_y = cao.location.y \r\n scn.loc_z = cao.location.z \r\n \r\n Scn.cur_rot_x=FloatProperty() #for cursor\r\n Scn.cur_rot_y=FloatProperty() \r\n Scn.cur_rot_z=FloatProperty() \r\n Scn.cur_loc_x=FloatProperty() \r\n Scn.cur_loc_y=FloatProperty() \r\n Scn.cur_loc_z=FloatProperty() \r\n\r\n scn.cur_rot_x = scn.cursor.rotation_euler.x\r\n scn.cur_rot_y = scn.cursor.rotation_euler.y\r\n scn.cur_rot_z = scn.cursor.rotation_euler.z \r\n scn.cur_loc_x = scn.cursor.location.x\r\n scn.cur_loc_y = scn.cursor.location.y \r\n scn.cur_loc_z = scn.cursor.location.z \r\n\r\n \r\ndef call_props(self,context):\r\n \r\n context = bpy.context \r\n scn = context.scene \r\n cao=context.active_object\r\n \r\n rot_cur = context.scene.cursor.rotation_euler.copy()\r\n loc_cur = context.scene.cursor.location.copy() \r\n rot = cao.rotation_euler.copy()\r\n loc = cao.location.copy() \r\n\r\n self.rot = Euler((rot.x, rot.y, rot.z), 'XYZ') \r\n self.loc = loc \r\n\r\n rot_to_curs = Euler((\r\n -rot_cur.x + rot.x,\r\n -rot_cur.y + rot.y,\r\n -rot_cur.z + rot.z\r\n ), 'XYZ')\r\n \r\n if scn.around_cursor: \r\n self.rot = rot_to_curs\r\n self.loc_cur = loc_cur\r\n \r\ndef call_props_back(self,context):\r\n\r\n context = bpy.context \r\n scn = context.scene \r\n \r\n self.rot = Euler((\r\n scn.rot_x,\r\n scn.rot_y,\r\n scn.rot_z), \r\n 'XYZ') \r\n \r\n self.loc = Vector((scn.loc_x, scn.loc_y, scn.loc_z))\r\n \r\n rot_curs = Euler((\r\n -scn.cur_rot_x + scn.rot_x,\r\n -scn.cur_rot_y + scn.rot_y,\r\n -scn.cur_rot_z + scn.rot_z\r\n ))\r\n\r\n loc_curs = Vector((\r\n scn.cur_loc_x,\r\n scn.cur_loc_y,\r\n scn.cur_loc_z\r\n ))\r\n\r\n if scn.around_cursor: \r\n self.rot = rot_curs\r\n self.loc_curs= loc_curs\r\n \r\ndef face_to_cursor(self,context): \r\n \r\n ob=context.object\r\n scn = context.scene\r\n vert=ob.data.vertices\r\n\r\n if scn.geo: \r\n \r\n global r \r\n r=ob.matrix_world @ (\r\n sum((i.co for i in vert if i.select),Vector())/\r\n sum(int(i.select) for i in vert)) \r\n\r\n if scn.whole_scene:\r\n override = bpy.context.copy()\r\n override['selected_objects'] = list(bpy.context.scene.objects)\r\n bpy.ops.transform.translate(override, value=(\r\n scn.cursor.location.x-r.x, \r\n scn.cursor.location.y-r.y, \r\n scn.cursor.location.z-r.z))\r\n \r\n else: \r\n bpy.ops.transform.translate(value=(\r\n scn.cursor.location.x-r.x, \r\n scn.cursor.location.y-r.y, \r\n scn.cursor.location.z-r.z)) \r\n \r\n else:\r\n\r\n if scn.whole_scene:\r\n override = bpy.context.copy()\r\n override['selected_objects'] = list(bpy.context.scene.objects)\r\n bpy.ops.transform.translate(override, value=(\r\n -scn.cursor.location.x+r.x, \r\n -scn.cursor.location.y+r.y, \r\n -scn.cursor.location.z+r.z))\r\n \r\n else: \r\n bpy.ops.transform.translate(value=(\r\n -scn.cursor.location.x+r.x, \r\n -scn.cursor.location.y+r.y, \r\n -scn.cursor.location.z+r.z)) \r\n \r\nScn.geo=BoolProperty(default=False, update= face_to_cursor) \r\n\r\nclass OBJ_OT_rot_loc (Operator): \r\n bl_idname = \"obj.rot_loc\"\r\n bl_label = \"matrice loc rot from active\"\r\n bl_options = {\"REGISTER\", \"UNDO\"}\r\n \r\n @classmethod\r\n def poll (cls, context):\r\n return context.object and not ON and not context.object.parent \r\n \r\n def execute(self, context): \r\n \r\n global ON\r\n ON=True \r\n \r\n context = bpy.context \r\n scn = context.scene \r\n loc_rot_props(self,context)\r\n call_props(self,context)\r\n \r\n rot=self.rot\r\n loc=self.loc\r\n \r\n to_qt = rot.to_quaternion()\r\n to_qt.invert()\r\n R = to_qt.to_matrix().to_4x4()\r\n T = Matrix.Translation(loc)\r\n M = T @ R @ T.inverted()\r\n \r\n if scn.whole_scene:\r\n obj = scn.objects\r\n else:\r\n obj = context.selected_objects\r\n \r\n for ob in obj:\r\n if ob.parent:\r\n continue\r\n ob.location = M @ ob.location -loc\r\n ob.rotation_euler.rotate(M) \r\n \r\n if scn.around_cursor:\r\n ob.location += self.loc_cur\r\n \r\n \r\n return {'FINISHED'}\r\n\r\n\r\nclass OBJ_OT_rot_loc_cancel (Operator): \r\n bl_idname = \"obj.rot_loc_cancel\"\r\n bl_label = \"cancel rot loc\"\r\n bl_options = {\"REGISTER\", \"UNDO\"}\r\n\r\n @classmethod\r\n def poll (cls, context):\r\n return context.object and ON\r\n def execute(self, context):\r\n \r\n global ON\r\n ON=False \r\n context = bpy.context \r\n scn = context.scene \r\n scn.geo=False \r\n call_props_back(self,context)\r\n \r\n loc= self.loc\r\n rot= self.rot\r\n \r\n to_qt = rot.to_quaternion()\r\n to_qt.invert()\r\n\r\n R = to_qt.to_matrix().to_4x4()\r\n T = Matrix.Translation(loc)\r\n M = T @ R @ T.inverted()\r\n M = M.inverted() #rotation inverted\r\n\r\n if scn.whole_scene:\r\n obj = scn.objects\r\n else:\r\n obj = context.selected_objects\r\n \r\n for ob in obj: \r\n if ob.parent:\r\n continue \r\n \r\n if scn.around_cursor: \r\n ob.location = M @ (ob.location + loc- self.loc_curs) \r\n \r\n else:\r\n ob.location = M @ (ob.location + loc)\r\n \r\n ob.rotation_euler.rotate(M)\r\n \r\n if scn.geo:\r\n ob=bpy.context.object\r\n vert=ob.data.vertices\r\n r=ob.matrix_world @ (\r\n sum((i.co for i in vert if i.select),Vector())/\r\n sum(int(i.select) for i in vert)) \r\n bpy.ops.transform.translate(value=(\r\n -scn.cursor.location.x + r.x,\r\n -scn.cursor.location.y - r.y,\r\n -scn.cursor.location.z - r.z)) \r\n \r\n# remove_prop(self,context)\r\n\r\n \r\n return {'FINISHED'}\r\n \r\n \r\nclass OBJ_OT_rot_loc_confirm (Operator): \r\n bl_idname = \"obj.rot_loc_confirm\"\r\n bl_label = \"matrice loc rot from active\"\r\n bl_options = {\"REGISTER\", \"UNDO\"}\r\n\r\n @classmethod\r\n def poll (cls, context):\r\n return context.object and ON\r\n \r\n def execute(self, context): \r\n \r\n global ON\r\n ON=False \r\n\r\n remove_prop(self,context) \r\n \r\n return {'FINISHED'}\r\n \r\n\r\nclass OBJ_PT_loc_rot_menu(Panel):\r\n bl_label = \"Global rotation\"\r\n bl_space_type = 'PROPERTIES'\r\n bl_region_type = 'WINDOW'\r\n bl_context = \"0data\" # not valid name to hide it\r\n bl_options = {'DEFAULT_CLOSED'}\r\n \r\n def draw(self, context):\r\n \r\n scn = context.scene\r\n layout = self.layout\r\n\r\n row = layout.row(align=True)\r\n row.alignment = 'LEFT' \r\n row.prop(scn,'whole_scene',text='Scene')\r\n row = layout.row(align=True)\r\n row.alignment = 'LEFT' \r\n row.prop(scn,'around_cursor',text='Cursor') \r\n if ON and scn.around_cursor: \r\n row.prop(scn,'geo',text='From selected Geo')\r\n row=layout.row()\r\n label='Cursor'if scn.around_cursor else 'W Center'\r\n row.operator(\"obj.rot_loc\",text=label) \r\n row.operator(\"obj.rot_loc_confirm\", text='Confirm')\r\n row.operator(\"obj.rot_loc_cancel\", text='Cancel')\r\n layout.separator_spacer\r\n layout.operator(\"view3d.face_center\",text=\"snap Cursor 2 face center\")\r\n\r\n#-----------------------------snap to face center\r\n\r\n\r\ndef main(context, event):\r\n \r\n scene = context.scene\r\n region = context.region\r\n rv3d = context.region_data\r\n coord = event.mouse_region_x, event.mouse_region_y\r\n viewlayer = context.view_layer\r\n depsgraph = context.evaluated_depsgraph_get()\r\n\r\n # get the ray from the viewport and mouse\r\n view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)\r\n ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)\r\n \r\n result, location, normal, index, object, matrix = scene.ray_cast(viewlayer, ray_origin, view_vector) \r\n\r\n if object:\r\n wmtx = object.matrix_world \r\n object_eval = object.evaluated_get(depsgraph) \r\n \r\n if context.mode=='OBJECT': \r\n \r\n face=object_eval.data.polygons[index] \r\n loc = wmtx @ face.center \r\n \r\n else: \r\n mesh_from_eval = bpy.data.meshes.new_from_object(object_eval)\r\n \r\n face=mesh_from_eval.polygons[index]\r\n loc = wmtx @ face.center \r\n \r\n bpy.context.scene.cursor.location=loc \r\n\r\n return {'FINISHED'}\r\n\r\nclass FACE_OT_center(bpy.types.Operator):\r\n bl_idname = \"view3d.face_center\"\r\n bl_label = \"snap cursor 2 face center-sfc\" \r\n \r\n @classmethod\r\n def poll(cls, context):\r\n return context.mode in {'OBJECT','EDIT_MESH'}\r\n\r\n def modal(self, context, event): \r\n \r\n bpy.ops.view3d.cursor3d('INVOKE_DEFAULT',use_depth=False, orientation='GEOM')\r\n main(context, event)\r\n \r\n if event.type == 'LEFTMOUSE':\r\n return {'FINISHED'}\r\n\r\n elif event.type in {'RIGHTMOUSE', 'ESC'}:\r\n bpy.context.scene.cursor.location=self.cursor_loc\r\n bpy.context.scene.cursor.rotation_euler=self.cursor_rot\r\n return {'FINISHED'}\r\n \r\n elif event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE','TAB'}:\r\n # allow navigation\r\n return {'PASS_THROUGH'}\r\n\r\n return {'RUNNING_MODAL'}\r\n\r\n def invoke(self, context, event): \r\n \r\n self.cursor_loc=bpy.context.scene.cursor.location.copy()\r\n self.cursor_rot=bpy.context.scene.cursor.rotation_euler.copy()\r\n context.window_manager.modal_handler_add(self)\r\n return {'RUNNING_MODAL'} \r\n\r\n\r\n\r\n\r\n\r\n\r\n#------------------------------ \r\n\r\naddon_keymaps = []\r\n\r\ndef register():\r\n bpy.utils.register_class(OBJ_OT_rot_loc)\r\n bpy.utils.register_class(OBJ_OT_rot_loc_cancel)\r\n bpy.utils.register_class(OBJ_PT_loc_rot_menu)\r\n bpy.utils.register_class(OBJ_OT_rot_loc_confirm)\r\n bpy.utils.register_class(FACE_OT_center)\r\n \r\n wm = bpy.context.window_manager\r\n\r\n if wm.keyconfigs.addon:\r\n\r\n km = wm.keyconfigs.addon.keymaps.new(name = '3D View Generic', space_type = 'VIEW_3D')\r\n kmi = km.keymap_items.new(idname='wm.call_panel', type='NUMPAD_PERIOD', value='PRESS',shift=True)\r\n kmi.properties.name = \"OBJ_PT_loc_rot_menu\"\r\n addon_keymaps.append((km, kmi)) \r\n\r\ndef unregister():\r\n bpy.utils.unregister_class(OBJ_OT_rot_loc)\r\n bpy.utils.unregister_class(OBJ_OT_rot_loc_cancel)\r\n bpy.utils.unregister_class(OBJ_PT_loc_rot_menu)\r\n bpy.utils.unregister_class(OBJ_OT_rot_loc_confirm)\r\n bpy.utils.unregister_class(FACE_OT_center)\r\n\r\n wm = bpy.context.window_manager\r\n kc = wm.keyconfigs.addon\r\n if kc:\r\n for km, kmi in addon_keymaps:\r\n km.keymap_items.remove(kmi)\r\n addon_keymaps.clear() \r\n\r\nif __name__ == \"__main__\":\r\n register()\r\n \r\n\r\n","sub_path":"rotate_around_active_and _cursor.py","file_name":"rotate_around_active_and _cursor.py","file_ext":"py","file_size_in_byte":14353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"376944626","text":"# ----------------------------------------------------------------#\n# Title: String Formatting Exercise Task 3\n# Change Log: (Who, When, What)\n# D. Rodriguez, 2019-01-26, Initial release\n# ----------------------------------------------------------------#\n\n\n# -- Processing --#\ndef format_string(numbers):\n form_string = ''\n for x in range(len(numbers)-1):\n form_string += '{:d}, '\n form_string += '{:d}'\n return len(numbers), form_string.format(*numbers)\n\n\n# -- Data --#\ntplNumbers = (2, 1, 10, 212, 754, 567)\n\n\n# -- Presentation (Input/Output) --#\nintNumCount, strNumbers = format_string(tplNumbers)\nprint('The ', intNumCount, ' formatted numbers are: ', strNumbers, sep='')\n","sub_path":"students/DanielRodriguez/Lesson03Assignment/String Formatting/strformat_lab03.py","file_name":"strformat_lab03.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"65044485","text":"import numpy as np\nimport obspy\nimport obspyh5\nfrom energyDetectorUtils import removeDoubleCounting\nfrom energyDetectorUtils import getFiles\nfrom energyDetectorUtils import getTriggers\nfrom energyDetectorUtils import getEnergyPeaks\nfrom energyDetectorUtils import saveWaveforms\nfrom energyDetectorUtils import saveDetections\nfrom energyDetectorUtils import testPlot\n\npath = \"/media/Data/Data/PIG/MSEED/noIR/\"\noutPath = \"/home/setholinger/Documents/Projects/PIG/detections/energy/run3/\"\nstat = \"PIG2\"\nchan = \"all\"\nfileType = \"MSEED\"\nfs = 100\n\n# specify two frequency bands, prominence, and allowable number of seconds between low and high frequency detections\nfreqLow = [0.01,1]\nfreqHigh = [1,10]\nprominence = 0.1\ntolerance = 120\nmultiplier = 10\n\n# specify window to pull template around detection in seconds\nbuffer = [2*60,3*60]\n\n# get all files of desired station and channel\nfiles = getFiles(chan,path,stat)\n\n# first day is garbage, so remove it\nfiles = files[1:]\n\n# scan a specific day (for testing)\n#day = \"2012-05-09\"\n#dayFile = path + stat + \"/HH*/\" + day + \".\" + stat + \".HH*.noIR.MSEED\"\n#files = [dayFile]\n\n# make empty arrays to store detection times\ndetShort = []\ndetLong = []\n\n# iterate through all filestrings\nfor f in files:\n\n # make empty arrays to store detection times\n detShortTemp = []\n detLongTemp = []\n\n # give some output\n print(\"Scanning \" + f + \"...\")\n\n\t# read data files for all channels into one stream object\n st = obspy.read(f)\n\n # basic preprocessing\n st.detrend(\"demean\")\n st.detrend(\"linear\")\n st.taper(max_percentage=0.01, max_length=10.)\n\n # copy for other bands\n stLow = st.copy()\n stHigh = st.copy()\n\n # filter the data\n stLow.filter(\"bandpass\",freqmin=freqLow[0],freqmax=freqLow[1])\n stHigh.filter(\"bandpass\",freqmin=freqHigh[0],freqmax=freqHigh[1])\n\n # run trigger-finding algorithm for each channel\n for s in range(len(st)):\n\n # make empty arrays to store detections from current channel\n detShortChan = []\n detLongChan = []\n detShortDay = []\n detLongDay = []\n\n # calculate kinetic energy and find peaks\n peaksLow,energyLow = getEnergyPeaks(stLow[s],prominence,tolerance,fs)\n peaksHigh,energyHigh = getEnergyPeaks(stHigh[s],prominence,tolerance,fs)\n\n # plot trace and energy peaks (for testing)\n #testPlot(energyHigh,peaksHigh,energyLow,peaksLow)\n\n # check if peaks are concurrent in each band\n for h in range(len(peaksHigh)):\n for l in range(len(peaksLow)):\n\n # skip to next iteration if low frequency detection is first\n if peaksLow[l] - peaksHigh[h] < 0:\n continue\n\n # get triggers when peaks are sufficiently close to each other\n detShortChan,detLongChan = getTriggers(st[s],energyLow,peaksLow[l:l+2],peaksHigh[h],tolerance,buffer[0],fs,detShortChan,detLongChan,multiplier*0.75)\n\n # remove double counting within current channel\n detShortChan = removeDoubleCounting(detShortChan,tolerance)\n detLongChan = removeDoubleCounting(detLongChan,tolerance*multiplier)\n\n # append to list for current channel\n detShortTemp.extend(detShortChan)\n detLongTemp.extend(detLongChan)\n\n # sort detections\n detShortTemp.sort()\n detLongTemp.sort()\n\n # if a detection is repeated 2 times, save it\n for d in range(len(detShortTemp)-1):\n if detShortTemp[d+1] - detShortTemp[d] < tolerance:\n detShortDay.append(detShortTemp[d])\n for d in range(len(detLongTemp)-1):\n if detLongTemp[d+1] - detLongTemp[d] < tolerance:\n detLongDay.append(detLongTemp[d])\n\n # remove double counting from daily list\n detShortDay = removeDoubleCounting(detShortDay,tolerance)\n detLongDay = removeDoubleCounting(detLongDay,tolerance*multiplier)\n\n # append to final list of detections\n detShort.extend(detShortDay)\n detLong.extend(detLongDay)\n\n # save waveform snippets of detections from current day\n saveWaveforms(detShortDay,st,buffer,outPath,'short')\n saveWaveforms(detLongDay,st,[buffer[0]*multiplier*0.75,buffer[1]*multiplier*1.5],outPath,'long')\n\n# save list of final detections\nsaveDetections(detShort,outPath,'short')\nsaveDetections(detLong,outPath,'long')\n","sub_path":"python/detection/energyDetector.py","file_name":"energyDetector.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"521183433","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 25 14:18:26 2021\n\n@author: nenad\n\"\"\"\n\n\n\"\"\"\nURL: https://leetcode.com/problems/climbing-stairs/\nDescription: \nYou are climbing a staircase. It takes n steps to reach the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\n \n\nExample 1:\n\nInput: n = 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n1. 1 step + 1 step\n2. 2 steps\nExample 2:\n\nInput: n = 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n \n\nConstraints:\n\n1 <= n <= 45\n\"\"\"\nclass Solution:\n def __init__(self):\n self. memo = {1:1, 2:2}\n # O(2^n)\n def climbStairs(self, n: int) -> int:\n if n <= 2:\n return n\n return self.climbStairs(n-1) + self.climbStairs(n-2)\n # apply memo\n def climbStairs(self, n: int) -> int:\n if n in self.memo:\n return self.memo[n]\n result = self.climbStairs(n-1) + self.climbStairs(n-2)\n self.memo[n] = result\n return result\n\n def climbStairs(self, n: int) -> int:\n if n <= 2:\n return n\n dp = [1] * (n+1)\n dp[2] = 2\n for i in range(3, n+1):\n dp[i] = dp[i-1] + dp[i-2]\n return dp[-1]\n if n in self.memo:\n return self.memo[n]\n result = self.climbStairs(n-1) + self.climbStairs(n-2)\n self.memo[n] = result\n return result\n \n \n \n \n \n \n \n \n ","sub_path":"70.ClimbingStairs.py","file_name":"70.ClimbingStairs.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"346398535","text":"from django.conf.urls import url, include\n# from rest_framework.urlpatterns import format_suffix_patterns\nfrom . import views\n\nurlpatterns = [\n # url(r'^$', 'api_root'),\n url(r'^users/$', views.UserList.as_view(), name='user-list'),\n url(r'^users/(?P\\d+)/$', views.UserDetail.as_view(), name='user-detail'),\n url(r'^groups/$', views.GroupList.as_view(), name='group-list'),\n url(r'^groups/(?P\\d+)/$', views.GroupDetail.as_view(), name='group-detail'),\n]\n\n# # Format suffixes\n# urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'api'])\n#\n# # Default login/logout views\n# urlpatterns += [\n# url(r'^rest_auth/', include('rest_framework.urls', namespace='rest_framework'))\n# ]","sub_path":"backend/usersinfo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"203327168","text":"Import('*')\n\nINC = [ '#/vm/inc', '/usr/include/SDL2' ]\nLIBS = [ 'miniat', 'SDL2', 'SDL2_image' ]\nbuildProgram('MiniAT_Racer', ['Racer.cpp', 'Texture.cpp', 'main.cpp', \n\t'Bus_scanner.cpp', 'wp_collision.cpp'], \n\tCPPPATH=INC, LIBS=LIBS) \n\nbuildProgram('Track_editor', ['Editor.cpp', 'Texture.cpp'], CPPPATH=INC, LIBS=LIBS)\n\nenv.Install('cycle1_track.track')\n\n","sub_path":"cs370-Software-Engineering-as-a-Group/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"294790797","text":"import matplotlib.pyplot as plt\r\nfrom pandas import read_csv\r\nfrom pandas.plotting import scatter_matrix\r\n\r\n# Data_Analysis_Libraries\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\ndf_train = read_csv('D:\\ML-Program\\Titanic/train.csv')\r\n#shape,description ,scatter matrix plot for the training data\r\nprint(' Shape = ', df_train.shape)\r\n\r\nprint('\\t',df_train.head())\r\n\r\n#Descriptive Statistics for a Dataset.\r\ndescription = df_train.describe()\r\nprint(description)\r\n\r\n\r\n#Create and Display a Scatter Plot Matrix.\r\nscatter_matrix(df_train)\r\nplt.show()\r\ndf_test = read_csv('D:\\ML-Program\\Titanic/test.csv')\r\n#draw a bar plot of survival by sex\r\nsns.barplot(x='Sex', y='Survived', data=df_train)\r\nplt.show()\r\n#print percentages of females vs. males that survive\r\nprint('Percentage of females_survived:', df_train['Survived'][df_train['Sex'] == 'female'].value_counts(normalize = True)[1]*100)\r\n\r\nprint('Percentage of males_survived:', df_train['Survived'][df_train['Sex'] == 'male'].value_counts(normalize = True)[1]*100)\r\n#draw a bar plot of survival by Pclass\r\nsns.barplot(x='Pclass', y='Survived', data=df_train)\r\nplt.show()\r\n\r\n#print percentage of people by Pclass that survived\r\nprint('Percentage of Pclass = 1_survived:', df_train['Survived'][df_train['Pclass'] == 1].value_counts(normalize = True)[1]*100)\r\n\r\nprint('Percentage of Pclass = 2_survived:', df_train['Survived'][df_train['Pclass'] == 2].value_counts(normalize = True)[1]*100)\r\n\r\nprint('Percentage of Pclass = 3_survived:', df_train['Survived'][df_train['Pclass'] == 3].value_counts(normalize = True)[1]*100)\r\n#draw a bar plot for SibSp vs. survival\r\nsns.barplot(x='SibSp', y='Survived', data=df_train)\r\nplt.show()\r\n# printing individual percent values for all of these.\r\nprint('Percentage of SibSp = 0_survived:', df_train['Survived'][df_train['SibSp'] == 0].value_counts(normalize = True)[1]*100)\r\n\r\nprint('Percentage of SibSp = 1_survived:', df_train['Survived'][df_train['SibSp'] == 1].value_counts(normalize = True)[1]*100)\r\n\r\nprint('Percentage of SibSp = 2_survived:', df_train['Survived'][df_train['SibSp'] == 2].value_counts(normalize = True)[1]*100)\r\n\r\n#draw a bar plot for Parch vs.survival\r\nsns.barplot(x='Parch', y='Survived', data=df_train)\r\nplt.show()\r\ndf_train['Age'] = df_train['Age'].fillna(-0.5)\r\ndf_test['Age'] = df_test['Age'].fillna(-0.5)\r\nbins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]\r\nlabels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']\r\ndf_train['AgeGroup'] = pd.cut(df_train['Age'], bins, labels = labels)\r\ndf_test['AgeGroup'] = pd.cut(df_test['Age'], bins, labels = labels)\r\n\r\n#draw a bar plot of Age vs. survival\r\nsns.barplot(x='AgeGroup', y='Survived', data=df_train)\r\nplt.show()\r\ndf_train['CabinBool'] = (df_train['Cabin'].notnull().astype('int'))\r\ndf_test['CabinBool'] = (df_test['Cabin'].notnull().astype('int'))\r\n\r\n#calculate percentages of CabinBool vs. survived\r\nprint('Percentage of CabinBool = 1_survived:', df_train['Survived'][df_train['CabinBool'] == 1].value_counts(normalize = True)[1]*100)\r\n\r\nprint('Percentage of CabinBool = 0_survived:', df_train['Survived'][df_train['CabinBool'] == 0].value_counts(normalize = True)[1]*100)\r\n#draw a bar plot of CabinBool vs. survival\r\nsns.barplot(x='CabinBool', y='Survived', data=df_train)\r\nplt.show()\r\nsns.pointplot(x='Pclass', y='Survived', hue='Sex', data=df_train,\r\n palette={'male': 'blue', 'female': 'pink'},\r\n markers=['*', 'o'], linestyles=['-', '--']);\r\nplt.show()\r\nprint(df_test.describe())\r\n# By dropping the Cabin feature since not a lot more useful information can be extracted from it.\r\ndf_train = df_train.drop(['Cabin'], axis = 1)\r\ndf_test = df_test.drop(['Cabin'], axis = 1)\r\n#Drop the Ticket feature since it's unlikely to yield any useful information\r\ndf_train = df_train.drop(['Ticket'], axis = 1)\r\ndf_test = df_test.drop(['Ticket'], axis = 1)\r\n\r\n# Fill the missing values in the Embarked feature\r\nprint('Number of people embarking in Southampton (S):')\r\nsouthampton = df_train[df_train['Embarked'] == 'S'].shape[0]\r\nprint(southampton)\r\n\r\nprint('Number of people embarking in Cherbourg (C):')\r\ncherbourg = df_train[df_train['Embarked'] == 'C'].shape[0]\r\nprint(cherbourg)\r\n\r\nprint('Number of people embarking in Queenstown (Q):')\r\nqueenstown = df_train[df_train['Embarked'] == 'Q'].shape[0]\r\nprint(queenstown)\r\n#replacing the missing values in the Embarked feature with S\r\ndf_train = df_train.fillna({'Embarked': 'S'})\r\n\r\n##create a combined group of both datasets\r\ncombine = [df_train, df_test]\r\nfor dataset in combine:\r\n dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)\r\n\r\nprint(pd.crosstab(df_train['Title'], df_train['Sex']))\r\n#replace various titles with more common names\r\nfor dataset in combine:\r\n dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col',\r\n 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')\r\n \r\n dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')\r\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\r\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\r\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\r\n\r\nprint(df_train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean())\r\n#map each of the title groups to a numerical value\r\ntitle_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Royal\": 5, \"Rare\": 6}\r\nfor dataset in combine:\r\n dataset['Title'] = dataset['Title'].map(title_mapping)\r\n dataset['Title'] = dataset['Title'].fillna(0)\r\n\r\nprint(df_train.head())\r\n# fill missing age with mode age group for each title\r\nmr_age = df_train[df_train['Title'] == 1]['AgeGroup'].mode() #Young Adult\r\nmiss_age = df_train[df_train['Title'] == 2]['AgeGroup'].mode() #Student\r\nmrs_age = df_train[df_train['Title'] == 3]['AgeGroup'].mode() #Adult\r\nmaster_age = df_train[df_train['Title'] == 4]['AgeGroup'].mode() #Baby\r\nroyal_age = df_train[df_train['Title'] == 5]['AgeGroup'].mode() #Adult\r\nrare_age = df_train[df_train['Title'] == 6]['AgeGroup'].mode() #Adult\r\n\r\nage_title_mapping = {1: 'Young Adult', 2: 'Student', 3: 'Adult', 4: 'Baby', 5: 'Adult', 6: 'Adult'}\r\n\r\nfor x in range(len(df_train['AgeGroup'])):\r\n if df_train['AgeGroup'][x] == 'Unknown':\r\n df_train['AgeGroup'][x] = age_title_mapping[df_train['Title'][x]]\r\n \r\nfor x in range(len(df_test['AgeGroup'])):\r\n if df_test['AgeGroup'][x] == 'Unknown':\r\n df_test['AgeGroup'][x] = age_title_mapping[df_test['Title'][x]]\r\n#map each Age value to a numerical value\r\nage_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}\r\ndf_train['AgeGroup'] = df_train['AgeGroup'].map(age_mapping)\r\ndf_test['AgeGroup'] = df_test['AgeGroup'].map(age_mapping)\r\n\r\nprint(df_train.head())\r\n\r\n#dropping the Age feature for now, might change\r\ndf_train = df_train.drop(['Age'], axis = 1)\r\ndf_test = df_test.drop(['Age'], axis = 1)\r\n#drop the name feature since it contains no more useful information.\r\ndf_train = df_train.drop(['Name'], axis = 1)\r\ndf_test = df_test.drop(['Name'], axis = 1)\r\n#map each Sex value to a numerical value\r\nsex_mapping = {\"male\": 0, \"female\": 1}\r\ndf_train['Sex'] = df_train['Sex'].map(sex_mapping)\r\ndf_test['Sex'] = df_test['Sex'].map(sex_mapping)\r\n\r\nprint(df_train.head())\r\n\r\n\r\n\r\n\r\n#map each Embarked value to a numerical value\r\nembarked_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\r\ndf_train['Embarked'] = df_train['Embarked'].map(embarked_mapping)\r\ndf_test['Embarked'] = df_test['Embarked'].map(embarked_mapping)\r\n\r\ndf_train.head()\r\n\r\n#fill in missing Fare value in test set based on mean fare for that Pclass \r\nfor x in range(len(df_test['Fare'])):\r\n if pd.isnull(df_test['Fare'][x]):\r\n pclass = df_test['Pclass'][x] #Pclass = 3\r\n df_test['Fare'][x] = round(df_train[df_train['Pclass'] == pclass]['Fare'].mean(), 4)\r\n#map Fare values into groups of numerical values\r\ndf_train['FareBand'] = pd.qcut(df_train['Fare'], 4, labels = [1, 2, 3, 4])\r\ndf_test['FareBand'] = pd.qcut(df_test['Fare'], 4, labels = [1, 2, 3, 4])\r\n#drop Fare values\r\ndf_train = df_train.drop(['Fare'], axis = 1)\r\ndf_test = df_test.drop(['Fare'], axis = 1)\r\n#check train data\r\nprint(df_train.head())\r\n#check test data\r\ndf_test.head()\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import train_test_split\r\n\r\npredictors = df_train.drop(['Survived', 'PassengerId'], axis=1)\r\ntarget = df_train['Survived']\r\nx_train, x_val, y_train, y_val = train_test_split(predictors, target, test_size = 0.22, random_state = 0)\r\nlogreg = LogisticRegression()\r\nlogreg.fit(x_train, y_train)\r\ny_pred = logreg.predict(x_val)\r\nacc_logreg = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via logistic regression =', acc_logreg)\r\n\r\n# Gaussian Naive Bayes\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ngaussian = GaussianNB()\r\ngaussian.fit(x_train, y_train)\r\ny_pred = gaussian.predict(x_val)\r\nacc_gaussian = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via Gaussian Naive Bayes =',acc_gaussian)\r\n# Support Vector Machines\r\nfrom sklearn.svm import SVC\r\n\r\nsvc = SVC()\r\nsvc.fit(x_train, y_train)\r\ny_pred = svc.predict(x_val)\r\nacc_svc = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via Support Vector Machines =',acc_svc)\r\n# Linear SVC\r\nfrom sklearn.svm import LinearSVC\r\n\r\nlinear_svc = LinearSVC()\r\nlinear_svc.fit(x_train, y_train)\r\ny_pred = linear_svc.predict(x_val)\r\nacc_linear_svc = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via Linear SVC =',acc_linear_svc)\r\n# Perceptron\r\nfrom sklearn.linear_model import Perceptron\r\n\r\nperceptron = Perceptron()\r\nperceptron.fit(x_train, y_train)\r\ny_pred = perceptron.predict(x_val)\r\nacc_perceptron = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via Perceptron =',acc_perceptron)\r\n#Decision Tree\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\ndecisiontree = DecisionTreeClassifier()\r\ndecisiontree.fit(x_train, y_train)\r\ny_pred = decisiontree.predict(x_val)\r\nacc_decisiontree = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via Decision Tree =',acc_decisiontree)\r\n# Random Forest\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nrandomforest = RandomForestClassifier()\r\nrandomforest.fit(x_train, y_train)\r\ny_pred = randomforest.predict(x_val)\r\nacc_randomforest = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via Random Forest =',acc_randomforest)\r\n# KNN or k-Nearest Neighbors\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nknn = KNeighborsClassifier()\r\nknn.fit(x_train, y_train)\r\ny_pred = knn.predict(x_val)\r\nacc_knn = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via k-Nearest Neighbors =',acc_knn)\r\n# Stochastic Gradient Descent\r\nfrom sklearn.linear_model import SGDClassifier\r\n\r\nsgd = SGDClassifier()\r\nsgd.fit(x_train, y_train)\r\ny_pred = sgd.predict(x_val)\r\nacc_sgd = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via Stochastic Gradient Descent =',acc_sgd)\r\n #Gradient Boosting Classifier\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\n\r\ngbk = GradientBoostingClassifier()\r\ngbk.fit(x_train, y_train)\r\ny_pred = gbk.predict(x_val)\r\nacc_gbk = round(accuracy_score(y_pred, y_val) * 100, 2)\r\nprint('Accuracy via Gradient Boosting Classifier =',acc_gbk)\r\nmodels = pd.DataFrame({\r\n 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', \r\n 'Random Forest', 'Naive Bayes', 'Perceptron', 'Linear SVC', \r\n 'Decision Tree', 'Stochastic Gradient Descent', 'Gradient Boosting Classifier'],\r\n 'Score': [acc_svc, acc_knn, acc_logreg, \r\n acc_randomforest, acc_gaussian, acc_perceptron,acc_linear_svc, acc_decisiontree,\r\n acc_sgd, acc_gbk]})\r\nprint(models.sort_values(by='Score', ascending=False))\r\na = models.sort_values(by='Score', ascending=False)\r\nids = df_test['PassengerId']\r\ngi = df_test.drop('PassengerId', axis=1)\r\ny_pred_new = randomforest.predict(gi)\r\n#set the output as a dataframe and convert to csv file named submission.csv\r\noutput = pd.DataFrame({ 'PassengerId' : ids, 'Survived': y_pred_new })\r\noutput.to_csv('D:\\ML-Program\\Titanic/submission.csv', index=False)\r\n'''\r\n#set ids as PassengerId and predict survival \r\nids = df_test['PassengerId']\r\ngi = df_test.drop('PassengerId', axis=1)\r\ny_pred_new = randomforest.predict(gi)\r\n#set the output as a dataframe and convert to csv file named submission.csv\r\noutput = pd.DataFrame({ 'PassengerId' : ids, 'Survived': y_pred_new })\r\noutput.to_csv('D:\\ML-Program\\Titanic/submission.csv', index=False)'''\r\n","sub_path":"ti.py","file_name":"ti.py","file_ext":"py","file_size_in_byte":12727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"23343852","text":"#!/usr/bin/env python3\n\n# Python requirements: pytest requests\n# install curieconfctl:\n# (cd ../curiefense/curieconf/utils ; pip3 install .)\n# (cd ../curiefense/curieconf/client ; pip3 install .)\n#\n# To run this with minikube (does not support IPv6):\n#\n# pytest --base-protected-url http://$(minikube ip):30081 --base-conf-url http://$(minikube ip):30000/api/v2/ --base-ui-url http://$(minikube ip):30080 --elasticsearch-url http://$IP:30200 . # pylint: disable=line-too-long\n#\n# To run this with docker-compose:\n# pytest --base-protected-url http://localhost:30081/ --base-conf-url http://localhost:30000/api/v2/ --base-ui-url http://localhost:30080 --elasticsearch-url http://localhost:9200 . # pylint: disable=line-too-long\n\n# pylint: disable=too-many-lines,too-many-public-methods\n# pylint: disable=too-many-arguments,too-few-public-methods,too-many-statements\n# pylint: disable=missing-function-docstring,missing-module-docstring\n# pylint: disable=missing-class-docstring\n\n# This is not really a problem for fixtures\n# pylint: disable=redefined-outer-name\n\n# This is often wrong: fixtures are not mentioned in the function, but they\n# define the required test environment\n# pylint: disable=unused-argument\n\n# This follows examples from the pytest doc: tests are class methods, even\n# though they don't use self\n# pylint: disable=no-self-use\n\n\nfrom typing import List, Optional\nfrom urllib.parse import urlparse\nimport json\nimport logging\nimport random\nimport string\nimport subprocess\nimport time\nimport pytest\nimport requests\n\nlog = logging.getLogger(\"e2e\")\n\n# --- Helpers ---\nTEST_CONFIG_NAME = \"master\"\n\n\nclass CliHelper:\n def __init__(self, base_url):\n self._base_url = base_url\n self._initial_version_cache = None\n\n def call(self, args, inputjson=None):\n logging.info(\"Calling CLI with arguments: %s\", args)\n cmd = [\"curieconfctl\", \"-u\", self._base_url, \"-o\", \"json\"]\n cmd += args.split(\" \")\n indata = None\n if inputjson:\n indata = json.dumps(inputjson).encode(\"utf-8\")\n\n process = subprocess.run(\n cmd,\n shell=False,\n input=indata,\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n if process.stdout:\n logging.debug(\"CLI output: %s\", process.stdout)\n\n try:\n return json.loads(process.stdout.decode(\"utf-8\"))\n except json.JSONDecodeError:\n return process.stdout.decode(\"utf-8\")\n else:\n return []\n\n def delete_test_config(self):\n self.call(\"conf delete test\")\n\n def initial_version(self):\n if not self._initial_version_cache:\n versions = self.call(\"conf list-versions master\")\n if \"version\" not in versions[-3]:\n print(\"Unsupported curieconfctl output\", versions)\n raise TypeError(\"Unsupported curieconfctl output\")\n self._initial_version_cache = versions[-3][\"version\"]\n return self._initial_version_cache\n\n def empty_acl(self):\n version = self.initial_version()\n return self.call(f\"doc get master aclprofiles --version {version}\")\n\n def revert_and_enable(self, acl=True, waf=True):\n version = self.initial_version()\n self.call(f\"conf revert {TEST_CONFIG_NAME} {version}\")\n securitypolicy = self.call(f\"doc get {TEST_CONFIG_NAME} securitypolicies\")\n securitypolicy[0][\"map\"][0][\"acl_active\"] = acl\n securitypolicy[0][\"map\"][0][\"waf_active\"] = waf\n self.call(\n f\"doc update {TEST_CONFIG_NAME} securitypolicies /dev/stdin\",\n inputjson=securitypolicy,\n )\n\n def publish_and_apply(self):\n buckets = self.call(\"key get system publishinfo\")\n\n for bucket in buckets[\"buckets\"]:\n if bucket[\"name\"] == \"prod\":\n url = bucket[\"url\"]\n self.call(f\"tool publish master {url}\")\n time.sleep(20)\n\n\n@pytest.fixture(scope=\"session\")\ndef cli(request):\n return CliHelper(request.config.getoption(\"--base-conf-url\"))\n\n\nclass TargetHelper:\n def __init__(self, base_url):\n self._base_url = base_url\n\n def query(\n self, path=\"/\", suffix=\"\", method=\"GET\", headers=None, srcip=None, **kwargs\n ):\n # specifying a path helps spot tests easily in the access log\n if headers is None:\n headers = {}\n if srcip is not None:\n headers[\"X-Forwarded-For\"] = srcip\n res = requests.request(\n method=method, url=self._base_url + path + suffix, headers=headers, **kwargs\n )\n return res\n\n def is_reachable(self, *args, **kwargs):\n res = self.query(*args, **kwargs)\n return res.status_code in [200, 404]\n\n def authority(self) -> str:\n return urlparse(self._base_url).netloc\n\n\n@pytest.fixture(scope=\"session\")\ndef target(request):\n url = request.config.getoption(\"--base-protected-url\").rstrip(\"/\")\n return TargetHelper(url)\n\n\n# geo=US, company=SPRINTLINK, asn=1239\nIP4_US = \"199.0.0.1\"\n\n# geo=JP, company=Softbank BB Corp., asn=17676\nIP4_JP = \"126.0.0.1\"\n\n# geo=AU, company=CLOUDFLARENET, asn=13335\nIP4_CLOUDFLARE = \"1.0.0.0\"\n\n# geo=FR, company=Orange, asn=3215\nIP4_ORANGE = \"2.0.0.0\"\n\nIP6_1 = \"0000:0000:0000:0000:0000:0000:0000:0001\"\nIP6_2 = \"0000:0000:0000:0000:0000:0000:0000:0002\"\n\n\nclass LogHelper:\n def __init__(self, base_url, es_url):\n self._base_url = base_url\n self._es_url = es_url + \"/_search\"\n\n def check_log_pattern(self, pattern):\n data = {\n \"query\": {\"bool\": {\"must\": {\"match\": {\"request.attributes.uri\": pattern}}}}\n }\n res = requests.get(self._es_url, json=data)\n nbhits = res.json()[\"hits\"][\"total\"][\"value\"]\n if nbhits == 1:\n return True\n else:\n print(\"Pattern %r\" % (pattern,))\n print(\"Request result %r\" % (res,))\n return False\n\n\n@pytest.fixture(scope=\"session\")\ndef log_fixture(request):\n url = request.config.getoption(\"--base-ui-url\").rstrip(\"/\")\n es_url = request.config.getoption(\"--elasticsearch-url\").rstrip(\"/\")\n return LogHelper(url, es_url)\n\n\nclass ACLHelper:\n def __init__(self, cli):\n self._cli = cli\n\n def set_acl(self, updates: dict):\n acl = self._cli.empty_acl()\n # update acl\n for key, value in updates.items():\n acl[0][key].append(value)\n self._cli.call(\n f\"doc update {TEST_CONFIG_NAME} aclprofiles /dev/stdin\", inputjson=acl\n )\n\n def reset_and_set_acl(self, updates: dict):\n self._cli.revert_and_enable()\n self.set_acl(updates)\n self._cli.publish_and_apply()\n\n\n@pytest.fixture(scope=\"session\")\ndef acl(cli):\n return ACLHelper(cli)\n\n\n@pytest.fixture(scope=\"class\")\ndef default_config(cli):\n cli.revert_and_enable()\n cli.publish_and_apply()\n\n\n@pytest.fixture(scope=\"function\", params=[\"headers\", \"cookies\", \"params\"])\ndef section(request):\n return request.param\n\n\n# --- Tests ---\n\n\nclass TestLogs:\n def test_logs(self, default_config, cli, target, log_fixture):\n test_pattern = \"/test\" + \"\".join(\n [random.choice(string.ascii_lowercase) for i in range(20)]\n )\n assert target.is_reachable(test_pattern)\n time.sleep(10)\n assert log_fixture.check_log_pattern(test_pattern)\n\n\nclass TestACL:\n def test_enforce_deny_all(self, acl, target):\n acl.reset_and_set_acl({\"force_deny\": \"all\"})\n assert not target.is_reachable(\"/deny-all\")\n\n def test_passthrough_all(self, acl, target):\n acl.reset_and_set_acl({\"deny\": \"all\", \"passthrough\": \"all\"})\n assert target.is_reachable(\"/deny-passthrough-all\")\n\n def test_allow_bot_all(self, acl, target):\n acl.reset_and_set_acl({\"allow_bot\": \"all\"})\n assert not target.is_reachable(\n \"/allow_bot-all\", headers={\"Long-Header\": \"not_alphanum\" * 1500}\n )\n assert target.is_reachable()\n\n def test_deny_bot_all(self, acl, target):\n acl.reset_and_set_acl({\"deny_bot\": \"all\"})\n res = target.query(path=\"/deny_bot-all\")\n assert res.status_code == 247\n assert \";;window.rbzns={bereshit:\" in res.text\n\n def test_allow_all(self, acl, target):\n acl.reset_and_set_acl({\"allow\": \"all\", \"deny\": \"all\"})\n assert not target.is_reachable(\n \"/allow-deny-all\", headers={\"Long-Header\": \"not_alphanum\" * 1500}\n )\n assert target.is_reachable()\n\n def test_deny_all(self, acl, target):\n acl.reset_and_set_acl({\"deny\": \"all\"})\n assert not target.is_reachable(\"/deny-all\")\n\n def test_ip_asn(self, acl, target):\n acl.reset_and_set_acl({\"deny\": \"asn:1239\"})\n assert not target.is_reachable(\"/acl-asn\", srcip=IP4_US)\n assert target.is_reachable(\"/\")\n\n def test_ipv4(self, acl, target):\n acl.reset_and_set_acl({\"deny\": \"ip:199-0-0-1\"})\n assert not target.is_reachable(\"/acl-ipv4\", srcip=IP4_US)\n assert target.is_reachable(\"/\")\n\n def test_geo(self, acl, target):\n acl.reset_and_set_acl({\"deny\": \"geo:united-states\"})\n assert not target.is_reachable(\"/acl-geo\", srcip=IP4_US)\n assert target.is_reachable(\"/acl-geo\", srcip=IP4_JP)\n assert target.is_reachable(\"/\")\n\n def test_ipv6(self, acl, target):\n acl.reset_and_set_acl({\"deny\": \"ip:0000:0000:0000:0000:0000:0000:0000:0001\"})\n assert not target.is_reachable(\"/acl-ipv6\", srcip=IP6_1)\n assert target.is_reachable(\"/\")\n\n\n# --- Rate limit tests ---\n\n\ndef gen_rl_rules(authority):\n rl_rules = []\n prof_rules = []\n map_path = {}\n\n def build_profiling_rule(id: str, name: str, prefix: str, **kwargs) -> List[str]:\n for n in [\"cookies\", \"headers\", \"args\", \"attrs\"]:\n r: Optional[str] = kwargs.get(\"%s_%s\" % (prefix, n))\n if r is None:\n continue\n if isinstance(r, dict):\n (k, v) = list(r.items())[0]\n if n == \"attrs\":\n if k == \"tags\":\n return [v]\n entry = [k, v, \"annotation\"]\n else:\n entry = [n, [k, v], \"annotation\"]\n else:\n entry = [n, r, \"annotation\"]\n prof_rules.append(\n {\n \"id\": id,\n \"name\": name,\n \"source\": \"self-managed\",\n \"mdate\": \"2020-11-22T00:00:00.000Z\",\n \"description\": \"E2E test tag rules\",\n \"entries_relation\": \"OR\",\n \"active\": True,\n \"tags\": [id],\n \"rule\": {\n \"relation\": \"OR\",\n \"sections\": [\n {\n \"relation\": \"OR\",\n \"entries\": [entry],\n },\n ],\n },\n }\n )\n return [id]\n return []\n\n def add_rl_rule(\n path, action_ext=None, subaction_ext=None, param_ext=None, **kwargs\n ):\n rule_id = f\"e2e1{len(rl_rules):0>9}\"\n incl_id = f\"incl{len(rl_rules):0>9}\"\n excl_id = f\"excl{len(rl_rules):0>9}\"\n\n if subaction_ext is None:\n subaction_ext = {}\n if action_ext is None:\n action_ext = {}\n if param_ext is None:\n param_ext = {}\n map_path[path] = rule_id\n incl = build_profiling_rule(incl_id, incl_id, \"incl\", **kwargs)\n excl = build_profiling_rule(excl_id, excl_id, \"excl\", **kwargs)\n rl_rules.append(\n {\n \"id\": rule_id,\n \"name\": \"Rate Limit Rule 3/10 \" + path,\n \"description\": \"3 requests per 10 seconds\",\n \"timeframe\": \"10\",\n \"limit\": \"3\",\n \"action\": {\n \"type\": kwargs.get(\"action\", \"default\"),\n \"params\": {\n \"action\": {\n \"type\": kwargs.get(\"subaction\", \"default\"),\n \"params\": kwargs.get(\"subaction_params\", {}),\n **subaction_ext,\n },\n **param_ext,\n },\n **action_ext,\n },\n \"include\": incl,\n \"exclude\": excl,\n \"key\": kwargs.get(\"key\", [{\"attrs\": \"ip\"}]),\n \"pairwith\": kwargs.get(\"pairwith\", {\"self\": \"self\"}),\n }\n )\n\n # RL scope\n add_rl_rule(\n \"scope-cookies\",\n incl_cookies={\"include\": \"true\"},\n excl_cookies={\"exclude\": \"true\"},\n )\n add_rl_rule(\n \"scope-headers\",\n incl_headers={\"include\": \"true\"},\n excl_headers={\"exclude\": \"true\"},\n )\n add_rl_rule(\n \"scope-params\", incl_args={\"include\": \"true\"}, excl_args={\"exclude\": \"true\"}\n )\n add_rl_rule(\n \"scope-path\",\n incl_attrs={\"path\": \"/scope-path/include/\"},\n excl_attrs={\"path\": \"/scope-path/include/exclude/\"},\n )\n add_rl_rule(\n \"scope-uri\",\n incl_attrs={\"uri\": \"/scope-uri/include/\"},\n excl_attrs={\"uri\": \"/scope-uri/include/exclude/\"},\n )\n add_rl_rule(\"scope-ipv4-include\", incl_attrs={\"ip\": IP4_US})\n add_rl_rule(\"scope-ipv4-exclude\", excl_attrs={\"ip\": IP4_US})\n add_rl_rule(\"scope-country-include\", incl_attrs={\"country\": \"us\"})\n add_rl_rule(\"scope-country-exclude\", excl_attrs={\"country\": \"us\"})\n add_rl_rule(\"scope-company-include\", incl_attrs={\"company\": \"CLOUDFLARENET\"})\n add_rl_rule(\"scope-company-exclude\", excl_attrs={\"company\": \"CLOUDFLARENET\"})\n add_rl_rule(\"scope-provider-include\", incl_attrs={\"asn\": \"1239\"})\n add_rl_rule(\"scope-provider-exclude\", excl_attrs={\"asn\": \"1239\"})\n add_rl_rule(\"scope-method-include\", incl_attrs={\"method\": \"GET\"})\n add_rl_rule(\"scope-method-exclude\", excl_attrs={\"method\": \"GET\"})\n add_rl_rule(\"scope-query-include\", incl_attrs={\"query\": \"QUERY\"})\n add_rl_rule(\"scope-query-exclude\", excl_attrs={\"query\": \"QUERY\"})\n add_rl_rule(\"scope-authority-include\", incl_attrs={\"authority\": authority})\n add_rl_rule(\"scope-authority-exclude\", excl_attrs={\"authority\": authority})\n add_rl_rule(\n \"scope-other-authority-include\", incl_attrs={\"authority\": \"doesnotmatch\"}\n )\n add_rl_rule(\n \"scope-other-authority-exclude\", excl_attrs={\"authority\": \"doesnotmatch\"}\n )\n\n # RL count by 1 value\n add_rl_rule(\"countby-cookies\", key=[{\"cookies\": \"countby\"}])\n add_rl_rule(\"countby-headers\", key=[{\"headers\": \"countby\"}])\n add_rl_rule(\"countby-params\", key=[{\"args\": \"countby\"}])\n add_rl_rule(\"countby-ipv4\", key=[{\"attrs\": \"ip\"}])\n add_rl_rule(\"countby-ipv6\", key=[{\"attrs\": \"ip\"}])\n # \"Provider\" in the UI maps to \"asn\"\n add_rl_rule(\"countby-provider\", key=[{\"attrs\": \"asn\"}])\n add_rl_rule(\"countby-uri\", key=[{\"attrs\": \"uri\"}])\n add_rl_rule(\"countby-path\", key=[{\"attrs\": \"path\"}])\n add_rl_rule(\"countby-query\", key=[{\"attrs\": \"query\"}])\n add_rl_rule(\"countby-method\", key=[{\"attrs\": \"method\"}])\n add_rl_rule(\"countby-company\", key=[{\"attrs\": \"company\"}])\n add_rl_rule(\"countby-country\", key=[{\"attrs\": \"country\"}])\n add_rl_rule(\"countby-authority\", key=[{\"attrs\": \"authority\"}])\n # RL count by 2 value (same type)\n add_rl_rule(\n \"countby2-cookies\", key=[{\"cookies\": \"countby1\"}, {\"cookies\": \"countby2\"}]\n )\n add_rl_rule(\n \"countby2-headers\", key=[{\"headers\": \"countby1\"}, {\"headers\": \"countby2\"}]\n )\n add_rl_rule(\"countby2-params\", key=[{\"args\": \"countby1\"}, {\"args\": \"countby2\"}])\n # RL count by 2 value (different type)\n add_rl_rule(\n \"countby-cookies-headers\", key=[{\"cookies\": \"countby\"}, {\"headers\": \"countby\"}]\n )\n add_rl_rule(\n \"countby-headers-params\", key=[{\"headers\": \"countby\"}, {\"args\": \"countby\"}]\n )\n add_rl_rule(\n \"countby-params-cookies\", key=[{\"args\": \"countby\"}, {\"cookies\": \"countby\"}]\n )\n # RL Event condition\n add_rl_rule(\"event-cookies\", pairwith={\"cookies\": \"event\"})\n add_rl_rule(\"event-headers\", pairwith={\"headers\": \"event\"})\n add_rl_rule(\"event-params\", pairwith={\"args\": \"event\"})\n add_rl_rule(\"event-ipv4\", key=[{\"attrs\": \"path\"}], pairwith={\"attrs\": \"ip\"})\n add_rl_rule(\"event-ipv6\", key=[{\"attrs\": \"path\"}], pairwith={\"attrs\": \"ip\"})\n # \"Provider\" in the UI maps to \"asn\"\n add_rl_rule(\"event-provider\", key=[{\"attrs\": \"path\"}], pairwith={\"attrs\": \"asn\"})\n add_rl_rule(\"event-uri\", pairwith={\"attrs\": \"uri\"})\n add_rl_rule(\"event-path\", pairwith={\"attrs\": \"path\"})\n add_rl_rule(\"event-query\", pairwith={\"attrs\": \"query\"})\n add_rl_rule(\"event-method\", pairwith={\"attrs\": \"method\"})\n add_rl_rule(\"event-company\", key=[{\"attrs\": \"path\"}], pairwith={\"attrs\": \"company\"})\n add_rl_rule(\"event-country\", key=[{\"attrs\": \"path\"}], pairwith={\"attrs\": \"country\"})\n add_rl_rule(\"event-authority\", pairwith={\"attrs\": \"authority\"})\n # action\n add_rl_rule(\"action-challenge\", action=\"challenge\")\n add_rl_rule(\"action-monitor\", action=\"monitor\")\n add_rl_rule(\n \"action-response\",\n action=\"response\",\n param_ext={\"status\": \"123\", \"content\": \"Response body\"},\n )\n add_rl_rule(\n \"action-redirect\",\n action=\"redirect\",\n param_ext={\"status\": \"124\", \"location\": \"/redirect/\"},\n )\n add_rl_rule(\n \"action-ban-503\",\n action=\"ban\",\n subaction=\"default\",\n param_ext={\"duration\": \"10\"},\n excl_attrs={\"tags\": \"allowlist\"},\n incl_attrs={\"tags\": \"blocklist\"},\n )\n add_rl_rule(\n \"action-ban-challenge\",\n action=\"ban\",\n subaction=\"challenge\",\n param_ext={\"duration\": \"10\"},\n subaction_params={\"action\": {\"type\": \"default\", \"params\": {}}},\n )\n add_rl_rule(\n \"action-ban-tagonly\",\n action=\"ban\",\n subaction=\"monitor\",\n param_ext={\"duration\": \"10\"},\n subaction_params={\"action\": {\"type\": \"default\", \"params\": {}}},\n )\n add_rl_rule(\n \"action-ban-response\",\n action=\"ban\",\n subaction=\"response\",\n param_ext={\"status\": \"123\", \"duration\": \"10\", \"content\": \"Content\"},\n subaction_params={\"content\": \"Response body\", \"status\": \"123\"},\n )\n add_rl_rule(\n \"action-ban-redirect\",\n action=\"ban\",\n subaction=\"redirect\",\n param_ext={\"duration\": \"10\"},\n subaction_ext={\"status\": \"124\", \"duration\": \"10\", \"location\": \"/redirect/\"},\n subaction_params={\n \"location\": \"/redirect\",\n \"status\": \"301\",\n \"action\": {\"type\": \"default\", \"params\": {}},\n },\n )\n add_rl_rule(\n \"action-ban-header\",\n action=\"ban\",\n subaction=\"request_header\",\n param_ext={\"duration\": \"10\"},\n subaction_ext={\"headers\": \"Header-Name\"},\n subaction_params={\n \"headers\": {\"foo\": \"bar\"},\n \"action\": {\"type\": \"default\", \"params\": {}},\n },\n )\n add_rl_rule(\n \"action-header\",\n action=\"request_header\",\n action_ext={\"headers\": \"Header-Name\"},\n param_ext={\"headers\": {\"foo\": \"bar\"}},\n )\n\n rl_securitypolicy = [\n {\n \"id\": \"__default__\",\n \"name\": \"default entry\",\n \"match\": \"__default__\",\n \"map\": [\n {\n \"name\": \"default\",\n \"match\": \"/\",\n \"acl_profile\": \"__default__\",\n \"acl_active\": True,\n \"waf_profile\": \"__default__\",\n \"waf_active\": True,\n \"limit_ids\": [],\n }\n ]\n + [\n {\n \"name\": k,\n \"match\": f\"/{k}/\",\n \"acl_profile\": \"__default__\",\n \"acl_active\": True,\n \"waf_profile\": \"__default__\",\n \"waf_active\": True,\n \"limit_ids\": [v],\n }\n for k, v in map_path.items()\n ],\n }\n ]\n return (rl_rules, rl_securitypolicy, prof_rules)\n\n\n@pytest.fixture(scope=\"class\")\ndef ratelimit_config(cli, target):\n cli.revert_and_enable()\n # Add new RL rules\n rl_rules = cli.call(f\"doc get {TEST_CONFIG_NAME} ratelimits\")\n (new_rules, new_securitypolicy, new_profiling) = gen_rl_rules(target.authority())\n rl_rules.extend(new_rules)\n # Apply new profiling\n cli.call(\n f\"doc update {TEST_CONFIG_NAME} globalfilters /dev/stdin\",\n inputjson=new_profiling,\n )\n # Apply rl_rules\n cli.call(f\"doc update {TEST_CONFIG_NAME} ratelimits /dev/stdin\", inputjson=rl_rules)\n # Apply new_securitypolicy\n cli.call(\n f\"doc update {TEST_CONFIG_NAME} securitypolicies /dev/stdin\",\n inputjson=new_securitypolicy,\n )\n cli.publish_and_apply()\n\n\nclass TestRateLimit:\n def test_ratelimit_scope_include(self, target, ratelimit_config, section):\n # rate limit: max 3 requests within 10 seconds\n param = {section: {\"include\": \"true\"}}\n for i in range(1, 4):\n assert target.is_reachable(\n f\"/scope-{section}/include/{i}\", **param\n ), f\"Request #{i} for {section} should be allowed\"\n assert not target.is_reachable(\n f\"/scope-{section}/include/4\", **param\n ), f\"Request #4 for {section} should be blocked by the rate limit\"\n time.sleep(10)\n assert target.is_reachable(\n f\"/scope-{section}/include/5\", **param\n ), f\"Request #5 for {section} should be allowed\"\n\n def test_ratelimit_scope_include_exclude(self, target, ratelimit_config, section):\n # rate limit: max 3 requests within 10 seconds\n param = {section: {\"include\": \"true\", \"exclude\": \"true\"}}\n for i in range(1, 5):\n assert target.is_reachable(\n f\"/scope-{section}/include-exclude/{i}\", **param\n ), f\"Request #{i} for {section} should be allowed\"\n\n def test_ratelimit_scope_exclude(self, target, ratelimit_config, section):\n # rate limit: max 3 requests within 10 seconds\n param = {section: {\"exclude\": \"true\"}}\n for i in range(1, 5):\n assert target.is_reachable(\n f\"/scope-{section}/exclude/{i}\", **param\n ), f\"Request #{i} for {section} should be allowed\"\n\n def test_ratelimit_scope_path_include(self, target, ratelimit_config):\n # rate limit: max 3 requests within 10 seconds\n for i in range(1, 4):\n assert target.is_reachable(\n f\"/scope-path/include/{i}\"\n ), f\"Request #{i} for path should be allowed\"\n assert not target.is_reachable(\n \"/scope-path/include/4\"\n ), \"Request #4 for path should be blocked by the rate limit\"\n time.sleep(10)\n assert target.is_reachable(\n \"/scope-path/include/5\"\n ), \"Request #5 for path should be allowed\"\n\n def test_ratelimit_scope_path_include_exclude(self, target, ratelimit_config):\n # rate limit: max 3 requests within 10 seconds\n for i in range(1, 5):\n assert target.is_reachable(\n f\"/scope-path/include/exclude/{i}\"\n ), f\"Request #{i} for path should be allowed\"\n\n def test_ratelimit_scope_uri_include(self, target, ratelimit_config):\n # rate limit: max 3 requests within 10 seconds\n for i in range(1, 4):\n assert target.is_reachable(\n f\"/scope-uri/include/{i}\"\n ), f\"Request #{i} for uri should be allowed\"\n assert not target.is_reachable(\n \"/scope-uri/include/4\"\n ), \"Request #4 for uri should be blocked by the rate limit\"\n time.sleep(10)\n assert target.is_reachable(\n \"/scope-uri/include/5\"\n ), \"Request #5 for uri should be allowed\"\n\n def test_ratelimit_scope_uri_include_exclude(self, target, ratelimit_config):\n # rate limit: max 3 requests within 10 seconds\n for i in range(1, 5):\n assert target.is_reachable(\n f\"/scope-uri/include/exclude/{i}\"\n ), f\"Request #{i} for uri should be allowed\"\n\n def test_ratelimit_scope_ipv4_include(self, target, ratelimit_config):\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-ipv4-include/included\", srcip=IP4_US\n ), f\"Request #{i} for included ipv4 should be allowed\"\n assert not target.is_reachable(\n \"/scope-ipv4-include/included\", srcip=IP4_US\n ), \"Request #4 for included ipv4 should be denied\"\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-ipv4-include/not-included\", srcip=IP4_JP\n ), f\"Request #{i} for non included ipv4 should be allowed\"\n\n def test_ratelimit_scope_ipv4_exclude(self, target, ratelimit_config):\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-ipv4-exclude/excluded\", srcip=IP4_US\n ), f\"Request #{i} for excluded ipv4 should be allowed\"\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-ipv4-exclude/not-excluded\", srcip=IP4_JP\n ), f\"Request #{i} for non excluded ipv4 should be allowed\"\n assert not target.is_reachable(\n \"/scope-ipv4-exclude/not-excluded\", srcip=IP4_JP\n ), \"Request #4 for non excluded ipv4 should be denied\"\n\n def test_ratelimit_scope_country_include(self, target, ratelimit_config):\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-country-include/included\", srcip=IP4_US\n ), f\"Request #{i} for included country should be allowed\"\n assert not target.is_reachable(\n \"/scope-country-include/included\", srcip=IP4_US\n ), \"Request #4 for included country should be denied\"\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-country-include/not-included\", srcip=IP4_JP\n ), f\"Request #{i} for non included country should be allowed\"\n\n def test_ratelimit_scope_country_exclude(self, target, ratelimit_config):\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-country-exclude/excluded\", srcip=IP4_US\n ), f\"Request #{i} for excluded country should be allowed\"\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-country-exclude/not-excluded\", srcip=IP4_JP\n ), f\"Request #{i} for non excluded country should be allowed\"\n assert not target.is_reachable(\n \"/scope-country-exclude/not-excluded\", srcip=IP4_JP\n ), \"Request #4 for non excluded country should be denied\"\n\n def test_ratelimit_scope_company_include(self, target, ratelimit_config):\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-company-include/included\", srcip=IP4_CLOUDFLARE\n ), f\"Request #{i} for included company should be allowed\"\n assert not target.is_reachable(\n \"/scope-company-include/included\", srcip=IP4_CLOUDFLARE\n ), \"Request #4 for included company should be denied\"\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-company-include/not-included\", srcip=IP4_US\n ), f\"Request #{i} for non included company should be allowed\"\n\n def test_ratelimit_scope_company_exclude(self, target, ratelimit_config):\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-company-exclude/excluded\", srcip=IP4_CLOUDFLARE\n ), f\"Request #{i} for excluded company should be allowed\"\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-company-exclude/not-excluded\", srcip=IP4_US\n ), f\"Request #{i} for non excluded company should be allowed\"\n assert not target.is_reachable(\n \"/scope-company-exclude/not-excluded\", srcip=IP4_US\n ), \"Request #4 for non excluded company should be denied\"\n\n def test_ratelimit_scope_provider_include(self, target, ratelimit_config):\n # \"provider\" means \"asn\"\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-provider-include/included\", srcip=IP4_US\n ), f\"Request #{i} for included provider should be allowed\"\n assert not target.is_reachable(\n \"/scope-provider-include/included\", srcip=IP4_US\n ), \"Request #4 for included provider should be denied\"\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-provider-include/not-included\", srcip=IP4_JP\n ), f\"Request #{i} for non included provider should be allowed\"\n\n def test_ratelimit_scope_provider_exclude(self, target, ratelimit_config):\n # \"provider\" means \"asn\"\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-provider-exclude/excluded\", srcip=IP4_US\n ), f\"Request #{i} for excluded provider should be allowed\"\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-provider-exclude/not-excluded\", srcip=IP4_JP\n ), f\"Request #{i} for non excluded provider should be allowed\"\n assert not target.is_reachable(\n \"/scope-provider-exclude/not-excluded\", srcip=IP4_JP\n ), \"Request #4 for non excluded provider should be denied\"\n\n def test_ratelimit_scope_method_include(self, target, ratelimit_config):\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-method-include/included\"\n ), f\"Request #{i} for included method should be allowed\"\n assert not target.is_reachable(\n \"/scope-method-include/included\"\n ), \"Request #4 for included method should be denied\"\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-method-include/not-included\", method=\"HEAD\"\n ), f\"Request #{i} for non included method should be allowed\"\n\n def test_ratelimit_scope_method_exclude(self, target, ratelimit_config):\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-method-exclude/excluded\"\n ), f\"Request #{i} for excluded method should be allowed\"\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-method-exclude/not-excluded\", method=\"HEAD\"\n ), f\"Request #{i} for non excluded method should be allowed\"\n assert not target.is_reachable(\n \"/scope-method-exclude/not-excluded\", method=\"HEAD\"\n ), \"Request #4 for non excluded method should be denied\"\n\n def test_ratelimit_scope_query_include(self, target, ratelimit_config):\n # if \"QUERY\" is a substring of the query, rate limiting applies\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-query-include/included?QUERY\"\n ), f\"Request #{i} for included query should be allowed\"\n assert not target.is_reachable(\n \"/scope-query-include/included?QUERY\"\n ), \"Request #4 for included query should be denied\"\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-query-include/not-included?SOMETHINGELSE\"\n ), f\"Request #{i} for non included query should be allowed\"\n\n def test_ratelimit_scope_query_exclude(self, target, ratelimit_config):\n # if \"QUERY\" is a substring of the query, rate limiting does not apply\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-query-exclude/excluded?QUERY\"\n ), f\"Request #{i} for excluded query should be allowed\"\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-query-exclude/not-excluded?SOMETHINGELSE\"\n ), f\"Request #{i} for non excluded query should be allowed\"\n assert not target.is_reachable(\n \"/scope-query-exclude/not-excluded?SOMETHINGELSE\"\n ), \"Request #4 for non excluded query should be denied\"\n\n def test_ratelimit_scope_authority_include(self, target, ratelimit_config):\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-authority-include/included\"\n ), f\"Request #{i} for included authority should be allowed\"\n assert not target.is_reachable(\n \"/scope-authority-include/included\"\n ), \"Request #4 for included authority should be denied\"\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-other-authority-include/not-included\"\n ), f\"Request #{i} for non included authority should be allowed\"\n\n def test_ratelimit_scope_authority_exclude(self, target, ratelimit_config):\n for i in range(1, 5):\n assert target.is_reachable(\n \"/scope-authority-exclude/excluded\"\n ), f\"Request #{i} for excluded authority should be allowed\"\n for i in range(1, 4):\n assert target.is_reachable(\n \"/scope-other-authority-exclude/not-excluded\"\n ), f\"Request #{i} for non excluded authority should be allowed\"\n assert not target.is_reachable(\n \"/scope-other-authority-exclude/not-excluded\"\n ), \"Request #4 for non excluded authority should be denied\"\n\n def ratelimit_countby_helper(self, target, name, param1, param2, nocount=False):\n def disp(i):\n # do not change URLs when countby is set to uri or path\n if nocount:\n return \"\"\n return i\n\n for i in range(1, 4):\n assert target.is_reachable(\n f\"/countby-{name}/1/{disp(i)}\", **param1\n ), f\"Request #{i} with {name} countby 1 should be allowed\"\n assert target.is_reachable(\n f\"/countby-{name}/2/{disp(i)}\", **param2\n ), f\"Request #{i} with {name} countby 2 should be allowed\"\n # empty {name} -> not counted\n # assert target.is_reachable(f\"/countby-{name}/3/{disp(i)}\"), \\\n # f\"Request #{i} with no {name} should be allowed\"\n assert not target.is_reachable(\n f\"/countby-{name}/2/{disp(4)}\", **param1\n ), f\"Request #4 with {name} countby 1 should be blocked\"\n assert not target.is_reachable(\n f\"/countby-{name}/2/{disp(4)}\", **param2\n ), f\"Request #4 with {name} countby 2 should be blocked\"\n # assert not target.is_reachable(f\"/countby-{name}/3/{disp(4)}\"), \\\n # f\"Request #{i} with no {name} should be denied\"\n time.sleep(10)\n assert target.is_reachable(\n f\"/countby-{name}/2/{disp(5)}\", **param1\n ), f\"Request #5 with {name} countby 1 should be allowed\"\n assert target.is_reachable(\n f\"/countby-{name}/2/{disp(5)}\", **param2\n ), f\"Request #5 with {name} countby 2 should be allowed\"\n # assert target.is_reachable(f\"/countby-{name}/3/{disp(5)}\"), \\\n # f\"Request #{i} with no {name} should be denied\"\n\n def test_ratelimit_countby_section(self, target, ratelimit_config, section):\n param1 = {section: {\"countby\": \"1\"}}\n param2 = {section: {\"countby\": \"2\"}}\n self.ratelimit_countby_helper(target, section, param1, param2)\n\n def test_ratelimit_countby_ipv4(self, target, ratelimit_config):\n param1 = {\"srcip\": IP4_US}\n param2 = {\"srcip\": IP4_JP}\n self.ratelimit_countby_helper(target, \"ipv4\", param1, param2)\n\n def test_ratelimit_countby_ipv6(self, target, ratelimit_config):\n param1 = {\"srcip\": IP6_1}\n param2 = {\"srcip\": IP6_2}\n self.ratelimit_countby_helper(target, \"ipv6\", param1, param2)\n\n def test_ratelimit_countby_provider(self, target, ratelimit_config):\n # \"provider\" means \"asn\"\n param1 = {\"srcip\": IP4_US}\n param2 = {\"srcip\": IP4_JP}\n self.ratelimit_countby_helper(target, \"provider\", param1, param2)\n\n def test_ratelimit_countby_uri(self, target, ratelimit_config):\n param1 = {}\n param2 = {}\n self.ratelimit_countby_helper(target, \"uri\", param1, param2, nocount=True)\n\n def test_ratelimit_countby_path(self, target, ratelimit_config):\n param1 = {}\n param2 = {}\n self.ratelimit_countby_helper(target, \"path\", param1, param2, nocount=True)\n\n def test_ratelimit_countby_query(self, target, ratelimit_config):\n param1 = {\"suffix\": \"?QUERY-1\"}\n param2 = {\"suffix\": \"?QUERY-2\"}\n self.ratelimit_countby_helper(target, \"query\", param1, param2)\n\n def test_ratelimit_countby_method(self, target, ratelimit_config):\n param1 = {\"method\": \"HEAD\"}\n param2 = {\"method\": \"GET\"}\n self.ratelimit_countby_helper(target, \"method\", param1, param2)\n\n def test_ratelimit_countby_company(self, target, ratelimit_config):\n param1 = {\"srcip\": IP4_US}\n param2 = {\"srcip\": IP4_JP}\n self.ratelimit_countby_helper(target, \"company\", param1, param2)\n\n def test_ratelimit_countby_country(self, target, ratelimit_config):\n param1 = {\"srcip\": IP4_US}\n param2 = {\"srcip\": IP4_JP}\n self.ratelimit_countby_helper(target, \"country\", param1, param2)\n\n def test_ratelimit_countby_authority(self, target, ratelimit_config):\n param1 = {\"headers\": {\"Host\": \"authority-1\"}}\n param2 = {\"headers\": {\"Host\": \"authority-2\"}}\n self.ratelimit_countby_helper(target, \"authority\", param1, param2)\n\n def test_ratelimit_countby2_section(self, target, ratelimit_config, section):\n param1 = {section: {\"countby1\": \"1\"}}\n param2 = {section: {\"countby2\": \"1\"}}\n param12 = {section: {\"countby1\": \"1\", \"countby2\": \"1\"}}\n for i in range(1, 4):\n assert target.is_reachable(\n f\"/countby2-{section}/1/{i}\", **param1\n ), f\"Request #{i} with {section} countby 1 should be allowed\"\n assert target.is_reachable(\n f\"/countby2-{section}/2/{i}\", **param2\n ), f\"Request #{i} with {section} countby 2 should be allowed\"\n assert target.is_reachable(\n f\"/countby2-{section}/2/{i}\", **param12\n ), f\"Request #{i} with {section} countby 1&2 should be allowed\"\n assert target.is_reachable(\n f\"/countby2-{section}/2/4\", **param1\n ), f\"Request #4 with {section} countby 1 should not be blocked\"\n assert target.is_reachable(\n f\"/countby2-{section}/2/4\", **param2\n ), f\"Request #4 with {section} countby 2 should not be blocked\"\n assert not target.is_reachable(\n f\"/countby2-{section}/2/4\", **param12\n ), f\"Request #4 with {section} countby 1&2 should be blocked\"\n time.sleep(10)\n assert target.is_reachable(\n f\"/countby2-{section}/2/5\", **param1\n ), f\"Request #5 with {section} countby 1 should be allowed\"\n assert target.is_reachable(\n f\"/countby2-{section}/2/5\", **param2\n ), f\"Request #5 with {section} countby 2 should be allowed\"\n assert target.is_reachable(\n f\"/countby2-{section}/2/5\", **param12\n ), f\"Request #5 with {section} countby 1&2 should be allowed\"\n\n def test_ratelimit_countby_2sections(self, target, ratelimit_config, section):\n # condition: have countby set for 2 sections\n othersection = {\"headers\": \"params\", \"cookies\": \"headers\", \"params\": \"cookies\"}[\n section\n ]\n param1 = {section: {\"countby\": \"1\"}}\n param2 = {othersection: {\"countby\": \"1\"}}\n param12 = {section: {\"countby\": \"1\"}, othersection: {\"countby\": \"1\"}}\n for i in range(1, 4):\n assert target.is_reachable(\n f\"/countby-{section}-{othersection}/1/{i}\", **param1\n ), f\"Request #{i} with {section} countby 1 should be allowed\"\n assert target.is_reachable(\n f\"/countby-{section}-{othersection}/2/{i}\", **param2\n ), f\"Request #{i} with {section} countby 2 should be allowed\"\n assert target.is_reachable(\n f\"/countby-{section}-{othersection}/2/{i}\", **param12\n ), f\"Request #{i} with {section} countby 1&2 should be allowed\"\n assert target.is_reachable(\n f\"/countby-{section}-{othersection}/2/4\", **param1\n ), f\"Request #4 with {section} countby 1 should not be blocked\"\n assert target.is_reachable(\n f\"/countby-{section}-{othersection}/2/4\", **param2\n ), f\"Request #4 with {section} countby 2 should not be blocked\"\n assert not target.is_reachable(\n f\"/countby-{section}-{othersection}/2/4\", **param12\n ), f\"Request #4 with {section} countby 1&2 should be blocked\"\n time.sleep(10)\n assert target.is_reachable(\n f\"/countby-{section}-{othersection}/2/5\", **param1\n ), f\"Request #5 with {section} countby 1 should be allowed\"\n assert target.is_reachable(\n f\"/countby-{section}-{othersection}/2/5\", **param2\n ), f\"Request #5 with {section} countby 2 should be allowed\"\n assert target.is_reachable(\n f\"/countby-{section}-{othersection}/2/5\", **param12\n ), f\"Request #5 with {section} countby 1&2 should be allowed\"\n\n def ratelimit_event_param_helper(self, target, name, params):\n limit = len(params)\n for i in range(limit - 1):\n assert target.is_reachable(\n f\"/event-{name}/1/\", **params[i]\n ), f\"Request for value #{i+1} with {name} event should be allowed\"\n assert not target.is_reachable(\n f\"/event-{name}/1/\", **params[limit - 1]\n ), f\"Request for value #{limit} with {name} event should be denied\"\n for i in range(limit):\n assert not target.is_reachable(\n f\"/event-{name}/1/\", **params[i]\n ), f\"Request for value #{i+1} with {name} event should be denied\"\n time.sleep(10)\n for i in range(limit - 1):\n assert target.is_reachable(\n f\"/event-{name}/1/\", **params[i]\n ), f\"Request for value #{i+1} with {name} event should be allowed\"\n\n def test_ratelimit_event_section(self, target, ratelimit_config, section):\n params = [{section: {\"event\": f\"{i}\"}} for i in range(1, 5)]\n self.ratelimit_event_param_helper(target, section, params)\n\n def test_ratelimit_event_ipv4(self, target, ratelimit_config):\n params = [{\"srcip\": f\"199.0.0.{i}\"} for i in range(1, 5)]\n self.ratelimit_event_param_helper(target, \"ipv4\", params)\n\n def test_ratelimit_event_ipv6(self, target, ratelimit_config):\n params = [\n {\"srcip\": f\"0000:0000:0000:0000:0000:0000:0000:000{i}\"} for i in range(1, 5)\n ]\n self.ratelimit_event_param_helper(target, \"ipv6\", params)\n\n def test_ratelimit_event_provider(self, target, ratelimit_config):\n # \"provider\" means \"asn\"\n params = [{\"srcip\": ip} for ip in (IP4_US, IP4_JP, IP4_CLOUDFLARE, IP4_ORANGE)]\n self.ratelimit_event_param_helper(target, \"provider\", params)\n\n def test_ratelimit_event_uri(self, target, ratelimit_config):\n # URI is different for each query, nothing more needs changing\n params = [{\"suffix\": f\"{i}\"} for i in range(1, 5)]\n self.ratelimit_event_param_helper(target, \"uri\", params)\n\n def test_ratelimit_event_path(self, target, ratelimit_config):\n # Path is different for each query, nothing more needs changing\n params = [{\"suffix\": f\"{i}\"} for i in range(1, 5)]\n self.ratelimit_event_param_helper(target, \"path\", params)\n\n def test_ratelimit_event_query(self, target, ratelimit_config):\n params = [{\"suffix\": f\"?QUERY-{i}\"} for i in range(1, 5)]\n self.ratelimit_event_param_helper(target, \"query\", params)\n\n def test_ratelimit_event_method(self, target, ratelimit_config):\n params = [{\"method\": m} for m in (\"GET\", \"HEAD\", \"POST\", \"PUT\")]\n self.ratelimit_event_param_helper(target, \"method\", params)\n\n def test_ratelimit_event_company(self, target, ratelimit_config):\n params = [{\"srcip\": ip} for ip in (IP4_US, IP4_JP, IP4_CLOUDFLARE, IP4_ORANGE)]\n self.ratelimit_event_param_helper(\n target,\n \"company\",\n params,\n )\n\n def test_ratelimit_event_country(self, target, ratelimit_config):\n params = [{\"srcip\": ip} for ip in (IP4_US, IP4_JP, IP4_CLOUDFLARE, IP4_ORANGE)]\n self.ratelimit_event_param_helper(target, \"country\", params)\n\n def test_ratelimit_event_authority(self, target, ratelimit_config):\n params = [{\"headers\": {\"Host\": f\"authority-{i}\"}} for i in range(1, 5)]\n self.ratelimit_event_param_helper(target, \"authority\", params)\n\n\n# --- Tag rules tests (formerly profiling lists) ---\n\nTEST_GLOBALFILTERS = {\n \"id\": \"e2e000000000\",\n \"name\": \"e2e test tag rules\",\n \"source\": \"self-managed\",\n \"mdate\": \"2020-11-22T00:00:00.000Z\",\n \"description\": \"E2E test tag rules\",\n \"entries_relation\": \"OR\",\n \"active\": True,\n \"tags\": [\"e2e-test\"],\n \"rule\": {\n \"relation\": \"OR\",\n \"sections\": [\n {\n \"relation\": \"OR\",\n \"entries\": [\n [\"cookies\", [\"e2e\", \"value\"], \"annotation\"],\n [\"headers\", [\"e2e\", \"value\"], \"annotation\"],\n [\"method\", \"(POST|PUT)\", \"annotation\"],\n [\"path\", \"/e2e-globalfilters-path/\", \"annotation\"],\n [\"query\", \"e2e=value\", \"annotation\"],\n [\"uri\", \"/e2e-globalfilters-uri\", \"annotation\"],\n [\"ip\", IP6_1, \"annotation\"],\n [\"ip\", IP4_US, \"annotation\"],\n [\n \"country\",\n \"jp\",\n \"annotation\",\n ], # TODO: discuss is this should work using caps\n [\"asn\", \"13335\", \"annotation\"],\n ],\n },\n {\n \"relation\": \"AND\",\n \"entries\": [\n [\"path\", \"/e2e-and/\", \"annotation\"],\n [\"cookies\", [\"e2e-and\", \"value\"], \"annotation\"],\n ],\n },\n ],\n },\n}\n\n\n@pytest.fixture(scope=\"session\", params=[True, False], ids=[\"active\", \"inactive\"])\ndef active(request):\n return request.param\n\n\n@pytest.fixture(scope=\"class\")\ndef globalfilters_config(cli, acl, active):\n cli.revert_and_enable()\n acl.set_acl({\"force_deny\": \"e2e-test\", \"passthrough\": \"all\"})\n # Apply TEST_GLOBALFILTERS\n TEST_GLOBALFILTERS[\"active\"] = active\n # 'updating' wafpolicies with a list containing a single entry adds this\n # entry, without removing pre-existing ones.\n cli.call(\n f\"doc update {TEST_CONFIG_NAME} globalfilters /dev/stdin\",\n inputjson=[TEST_GLOBALFILTERS],\n )\n cli.publish_and_apply()\n\n\nclass TestGlobalFilters:\n def test_cookies(self, target, globalfilters_config, active):\n assert (\n target.is_reachable(\"/e2e-globalfilters-cookies\", cookies={\"e2e\": \"value\"})\n is not active\n )\n assert (\n target.is_reachable(\n \"/e2e-globalfilters-cookies\", cookies={\"e2e\": \"allowed\"}\n )\n is True\n )\n\n def test_headers(self, target, globalfilters_config, active):\n assert (\n target.is_reachable(\"/e2e-globalfilters-headers\", headers={\"e2e\": \"value\"})\n is not active\n )\n assert (\n target.is_reachable(\n \"/e2e-globalfilters-headers\", headers={\"e2e\": \"allowed\"}\n )\n is True\n )\n\n def test_method(self, target, globalfilters_config, active):\n assert (\n target.is_reachable(\"/e2e-globalfilters-method-GET\", method=\"GET\") is True\n )\n assert (\n target.is_reachable(\"/e2e-globalfilters-method-POST\", method=\"POST\")\n is not active\n )\n assert (\n target.is_reachable(\"/e2e-globalfilters-method-PUT\", method=\"PUT\")\n is not active\n )\n\n def test_path(self, target, globalfilters_config, active):\n assert target.is_reachable(\"/e2e-globalfilters-path/\") is not active\n assert target.is_reachable(\"/e2e-globalfilters-valid-path/\") is True\n\n def test_query(self, target, globalfilters_config, active):\n assert (\n target.is_reachable(\"/e2e-globalfilters-query\", params={\"e2e\": \"value\"})\n is not active\n )\n assert (\n target.is_reachable(\"/e2e-globalfilters-query\", params={\"e2e\": \"allowed\"})\n is True\n )\n\n def test_uri(self, target, globalfilters_config, active):\n assert target.is_reachable(\"/e2e-globalfilters-uri\") is not active\n assert target.is_reachable(\"/e2e-globalfilters-allowed-uri\") is True\n\n def test_ipv4(self, target, globalfilters_config, active):\n assert target.is_reachable(\"/tag-ipv4-1\", srcip=IP4_US) is not active\n assert target.is_reachable(\"/tag-ipv4-2\", srcip=IP4_ORANGE) is True\n\n def test_ipv6(self, target, globalfilters_config, active):\n assert target.is_reachable(\"/tag-ipv6-1\", srcip=IP6_1) is not active\n assert target.is_reachable(\"/tag-ipv6-2\", srcip=IP6_2) is True\n\n def test_country(self, target, globalfilters_config, active):\n # JP address (Softbank)\n assert target.is_reachable(\"/tag-country\", srcip=IP4_JP) is not active\n\n def test_asn(self, target, globalfilters_config, active):\n # ASN 13335\n assert target.is_reachable(\"/tag-asn\", srcip=IP4_CLOUDFLARE) is not active\n\n def test_and(self, target, globalfilters_config, active):\n assert (\n target.is_reachable(\"/e2e-and/\", cookies={\"e2e-and\": \"value\"}) is not active\n )\n assert (\n target.is_reachable(\"/not-e2e-and/\", cookies={\"e2e-and\": \"value\"}) is True\n )\n assert (\n target.is_reachable(\"/e2e-and/\", cookies={\"not-e2e-and\": \"value\"}) is True\n )\n\n\n# --- Security Policies tests ---\n\n\nACL_BYPASSALL = {\n \"id\": \"e2e00ac10000\",\n \"name\": \"e2e-denyall-acl\",\n \"allow\": [],\n \"allow_bot\": [],\n \"deny_bot\": [],\n \"passthrough\": [\"all\"],\n \"force_deny\": [],\n \"deny\": [],\n}\n\nWAF_SHORT_HEADERS = {\n \"id\": \"e2e000000002\",\n \"name\": \"e2e waf short headers\",\n \"ignore_alphanum\": True,\n \"max_header_length\": 50,\n \"max_cookie_length\": 1024,\n \"max_arg_length\": 1024,\n \"max_headers_count\": 42,\n \"max_cookies_count\": 42,\n \"max_args_count\": 512,\n \"args\": {\"names\": [], \"regex\": []},\n \"headers\": {\"names\": [], \"regex\": []},\n \"cookies\": {\"names\": [], \"regex\": []},\n}\n\nSECURITYPOLICY = [\n {\n \"id\": \"e2e000000001\",\n \"name\": \"e2e Security Policy\",\n \"match\": \".*\",\n \"map\": [\n {\n \"name\": \"acl\",\n \"match\": \"/acl/\",\n \"acl_profile\": \"__default__\",\n \"acl_active\": True,\n \"waf_profile\": \"__default__\",\n \"waf_active\": False,\n \"limit_ids\": [],\n \"isnew\": True,\n },\n {\n \"name\": \"acl-passthroughall\",\n \"match\": \"/acl-passthroughall/\",\n \"acl_profile\": \"e2e00ac10000\",\n \"acl_active\": True,\n \"waf_profile\": \"__default__\",\n \"waf_active\": True,\n \"limit_ids\": [],\n \"isnew\": True,\n },\n {\n \"name\": \"acl-waf\",\n \"match\": \"/acl-waf/\",\n \"acl_profile\": \"__default__\",\n \"acl_active\": True,\n \"waf_profile\": \"__default__\",\n \"waf_active\": True,\n \"limit_ids\": [],\n \"isnew\": True,\n },\n {\n \"name\": \"waf\",\n \"match\": \"/waf/\",\n \"acl_profile\": \"__default__\",\n \"acl_active\": False,\n \"waf_profile\": \"__default__\",\n \"waf_active\": True,\n \"limit_ids\": [],\n \"isnew\": True,\n },\n {\n \"name\": \"waf-short-headers\",\n \"match\": \"/waf-short-headers/\",\n \"acl_profile\": \"__default__\",\n \"acl_active\": False,\n \"waf_profile\": \"e2e000000002\",\n \"waf_active\": True,\n \"limit_ids\": [],\n \"isnew\": True,\n },\n {\n \"name\": \"nofilter\",\n \"match\": \"/nofilter/\",\n \"acl_profile\": \"__default__\",\n \"acl_active\": False,\n \"waf_profile\": \"__default__\",\n \"waf_active\": False,\n \"limit_ids\": [],\n },\n ],\n }\n]\n\n\n@pytest.fixture(scope=\"class\")\ndef securitypolicy_config(cli, acl):\n cli.revert_and_enable()\n # Add ACL entry\n default_acl = cli.empty_acl()\n default_acl[0][\"force_deny\"].append(\"all\")\n default_acl.append(ACL_BYPASSALL)\n cli.call(\n f\"doc update {TEST_CONFIG_NAME} aclprofiles /dev/stdin\", inputjson=default_acl\n )\n # Add waf profile entry\n wafpolicy = cli.call(f\"doc get {TEST_CONFIG_NAME} wafpolicies\")\n wafpolicy.append(WAF_SHORT_HEADERS)\n cli.call(\n f\"doc update {TEST_CONFIG_NAME} wafpolicies /dev/stdin\", inputjson=wafpolicy\n )\n # Add securitypolicy entry SECURITYPOLICY\n cli.call(\n f\"doc update {TEST_CONFIG_NAME} securitypolicies /dev/stdin\",\n inputjson=SECURITYPOLICY,\n )\n cli.publish_and_apply()\n\n\nclass TestSecurityPolicy:\n def test_nofilter(self, target, securitypolicy_config):\n assert target.is_reachable(\"/nofilter/\")\n assert target.is_reachable(\n \"/nofilter/\", headers={\"Long-header\": \"Overlong_header\" * 100}\n )\n\n def test_waffilter(self, target, securitypolicy_config):\n assert target.is_reachable(\"/waf/\")\n assert not target.is_reachable(\n \"/waf/\", headers={\"Long-header\": \"Overlong_header\" * 100}\n )\n\n def test_aclfilter(self, target, securitypolicy_config):\n assert not target.is_reachable(\"/acl/\")\n assert not target.is_reachable(\n \"/acl/\", headers={\"Long-header\": \"Overlong_header\" * 100}\n )\n\n def test_nondefault_aclfilter_passthroughall(self, target, securitypolicy_config):\n assert target.is_reachable(\"/acl-passthroughall/\")\n assert target.is_reachable(\n \"/acl-passthroughall/\", headers={\"Long-header\": \"Overlong_header\" * 100}\n )\n\n def test_aclwaffilter(self, target, securitypolicy_config):\n assert not target.is_reachable(\"/acl-waf/\")\n assert not target.is_reachable(\n \"/acl/\", headers={\"Long-header\": \"Overlong_header\" * 100}\n )\n\n def test_nondefault_wafpolicy_short_headers(self, target, securitypolicy_config):\n assert target.is_reachable(\n \"/waf-short-headers/\", headers={\"Short-header\": \"0123456789\" * 5}\n )\n assert not target.is_reachable(\n \"/waf-short-headers/\", headers={\"Long-header\": \"0123456789\" * 5 + \"A\"}\n )\n\n\n# --- WAF Policies tests (formerly WAF profiles) ---\n\n\nclass TestWAFLengthCount:\n def test_length_overlong(self, default_config, target, section):\n # default limit: len 1024\n assert not target.is_reachable(\n f\"/overlong-{section}\",\n **{section: {f\"Long-{section}\": f\"Overlong_{section}\" * 100}},\n ), f\"Reachable despite overlong {section}\"\n\n def test_length_short(self, default_config, target, section):\n assert target.is_reachable(\n f\"/short-{section}\", headers={f\"Short-{section}\": f\"Short_{section}\"}\n ), f\"Not reachable despite short {section}\"\n\n def test_count_few(self, default_config, target, section):\n # default limit: 512 for args, 42 for other sections\n values = {}\n for i in range(10):\n values[f\"{section}-{i}\"] = \"not_alphanum\"\n assert target.is_reachable(\n f\"/few-{section}\", **{section: values}\n ), f\"Not reachable despite few {section}\"\n\n def test_count_toomany(self, default_config, target, section):\n values = {}\n for i in range(513):\n values[f\"{section}-{i}\"] = \"not_alphanum\"\n assert not target.is_reachable(\n f\"/too-many-{section}\", **{section: values}\n ), f\"Reachable despite too many {section}\"\n\n\nWAF_PARAM_CONSTRAINTS = {\n \"names\": [\n {\n \"key\": \"name-norestrict\",\n \"reg\": \"[v]+[a]{1}l?u*e\",\n \"restrict\": False,\n \"exclusions\": {\"100140\": 1},\n },\n {\n \"key\": \"name-restrict\",\n \"reg\": \"[v]+[a]{1}l?u*e\",\n \"restrict\": True,\n \"exclusions\": {},\n },\n ],\n \"regex\": [\n {\n \"key\": \"reg[e]x{1}-norestrict\",\n \"reg\": \"[v]+[a]{1}l?u*e\",\n \"restrict\": False,\n \"exclusions\": {\"100140\": 1},\n },\n {\n \"key\": \"reg[e]x{1}-restrict\",\n \"reg\": \"[v]+[a]{1}l?u*e\",\n \"restrict\": True,\n \"exclusions\": {},\n },\n ],\n}\n\n\n@pytest.fixture(\n scope=\"session\", params=[True, False], ids=[\"ignore_alphanum\", \"no_ignore_alphanum\"]\n)\ndef ignore_alphanum(request):\n return request.param\n\n\n@pytest.fixture(scope=\"class\")\ndef wafparam_config(cli, request, ignore_alphanum):\n cli.revert_and_enable()\n # Apply WAF_PARAM_CONSTRAINTS\n wafpolicy = cli.call(f\"doc get {TEST_CONFIG_NAME} wafpolicies\")\n for k in (\"args\", \"headers\", \"cookies\"):\n wafpolicy[0][k] = WAF_PARAM_CONSTRAINTS\n wafpolicy[0][\"ignore_alphanum\"] = ignore_alphanum\n cli.call(\n f\"doc update {TEST_CONFIG_NAME} wafpolicies /dev/stdin\", inputjson=wafpolicy\n )\n\n cli.publish_and_apply()\n\n\n@pytest.fixture(scope=\"function\", params=[\"name\", \"regex\"])\ndef name_regex(request):\n return request.param\n\n\n@pytest.fixture(scope=\"function\", params=[\"restrict\", \"norestrict\"])\ndef restrict(request):\n return request.param\n\n\nclass TestWAFParamsConstraints:\n def test_allowlisted_value(\n self, wafparam_config, section, name_regex, restrict, target\n ):\n paramname = name_regex + \"-\" + restrict\n assert target.is_reachable(\n f\"/allowlisted-value-{paramname}\", **{section: {paramname: \"value\"}}\n ), f\"Not reachable despite allowlisted {section} value\"\n\n def test_non_allowlisted_value_restrict(\n self, wafparam_config, section, name_regex, target, ignore_alphanum\n ):\n paramname = name_regex + \"-restrict\"\n if ignore_alphanum:\n assert target.is_reachable(\n f\"/blocklisted-value-{paramname}-restrict-ignore_alphanum\",\n **{section: {paramname: \"invalid\"}},\n ), f\"Not reachable despite alphanum blocklisted {section} value (restrict is enabled)\"\n else:\n assert not target.is_reachable(\n f\"/blocklisted-value-{paramname}-restrict\",\n **{section: {paramname: \"invalid\"}},\n ), f\"Reachable despite blocklisted {section} value (restrict is enabled)\"\n\n def test_non_allowlisted_value_norestrict_nowafmatch(\n self, wafparam_config, section, name_regex, target\n ):\n paramname = name_regex + \"-norestrict\"\n assert target.is_reachable(\n f\"/blocklisted-value-{paramname}\", **{section: {paramname: \"invalid\"}}\n ), f\"Not reachable despite 'restricted' not checked (non-matching {section} value)\"\n\n def test_non_allowlisted_value_norestrict_wafmatch(\n self, wafparam_config, section, name_regex, target\n ):\n paramname = name_regex + \"-norestrict\"\n assert not target.is_reachable(\n f\"/blocklisted-value-{paramname}-wafmatch\",\n **{section: {paramname: \"../../../../../\"}},\n ), f\"Reachable despite matching wafsig 100116 (non-matching {section} value)\"\n\n def test_non_allowlisted_value_norestrict_wafmatch_excludesig(\n self, wafparam_config, section, name_regex, target\n ):\n paramname = name_regex + \"-norestrict\"\n assert target.is_reachable(\n f\"/blocklisted-value-{paramname}-wafmatch-excludedsig\",\n **{section: {paramname: \"htaccess\"}},\n ), f\"Not reachable despite excludesig for rule 100140 ({section} value)\"\n\n\n# --- WAF Rules tests (formerly WAF Signatures) ---\n\n\n@pytest.fixture(\n scope=\"function\", params=[(100140, \"htaccess\"), (100112, \"../../../../../\")]\n)\ndef wafrules(request):\n return request.param\n\n\nclass TestWAFRules:\n def test_wafsig(self, wafparam_config, target, section, wafrules, ignore_alphanum):\n ruleid, rulestr = wafrules\n has_nonalpha = \".\" in rulestr\n if ignore_alphanum and not has_nonalpha:\n assert target.is_reachable(\n f\"/wafsig-{section}\", **{section: {\"key\": rulestr}}\n ), f\"Unreachable despite ignore_alphanum=True for rule {ruleid}\"\n else:\n assert not target.is_reachable(\n f\"/wafsig-{section}\", **{section: {\"key\": rulestr}}\n ), f\"Reachable despite matching rule {ruleid}\"\n","sub_path":"e2e/test_e2e.py","file_name":"test_e2e.py","file_ext":"py","file_size_in_byte":61626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"635946616","text":"\"\"\"Main module for Bottle application.\"\"\"\n\n\nimport os\n\nimport bottle\nfrom bottle import template, request\nfrom bottle.ext import sqlalchemy\nfrom sqlalchemy import create_engine, Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom utils import validate_url\n\napplication = bottle.Bottle()\n\nBase = declarative_base()\nengine = create_engine('sqlite:///bookmarks.db', echo=True)\nplugin = sqlalchemy.Plugin(\n engine,\n Base.metadata,\n keyword='db',\n create=True,\n commit=True,\n use_kwargs=False\n)\napplication.install(plugin)\n\n\nclass Bookmark(Base):\n\n \"\"\"Schema for the Bookmark model.\"\"\"\n\n __tablename__ = 'bookmarks'\n\n id = Column(Integer, primary_key=True)\n category = Column(String, nullable=False)\n title = Column(String, nullable=False)\n url = Column(String, nullable=False)\n\n def __init__(self, category, title, url):\n \"\"\"Initialize a new bookmark.\"\"\"\n self.category = category\n self.title = title\n self.url = url\n\n def __repr__(self):\n \"\"\"Representation of a bookmark instance.\"\"\"\n return '')\ndef blogs(option, db):\n \"\"\"Create dynamically the template according to menu option passed.\"\"\"\n option = option.replace('_', ' ')\n bookmarks = db.query(Bookmark).filter(Bookmark.category==option).all()\n return template('base', title=option.capitalize(), results=bookmarks)\n\n\n@application.route('/add', method=\"POST\")\ndef add_bookmark(db):\n \"\"\"Add bookmark to database.\"\"\"\n category = request.forms.get('category')\n title = request.forms.get('title')\n url = request.forms.get('url')\n if title != '' and url != '':\n url = validate_url(url)\n if url:\n if category == '':\n category = 'uncategorized'\n new_bookmark = Bookmark(category, title, url)\n db.add(new_bookmark)\n db.commit()\n message = \"Added!\"\n else:\n message = 'Error: Url seems invalid.'\n else:\n message = \"Error: Title and Url are required.\"\n bookmarks = db.query(Bookmark.category).all()\n return template('index', categories=bookmarks, alert_message=message)\n\n\nif __name__ == '__main__':\n from bottle import run\n run(application, reloader=True, host='localhost', port=8080, debug=True)\n","sub_path":"bottle_app.py","file_name":"bottle_app.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"258520625","text":"import rospy\n\nfrom ..mission_state import MissionState, Parameter\nfrom proc_control.msg import TargetReached\nfrom proc_control.srv import SetPositionTarget\n\n\nclass MoveRelativeXY(MissionState):\n\n def __init__(self):\n MissionState.__init__(self)\n self.set_local_target = None\n self.target_reach_sub = None\n\n self.actual_position_x = 0.0\n self.actual_position_y = 0.0\n self.just_one_time = 0\n self.target_reached = False\n\n def define_parameters(self):\n self.parameters.append(Parameter('param_distance_x', 1.0, 'Distance to travel'))\n self.parameters.append(Parameter('param_distance_y', 1.0, 'Distance to travel'))\n self.parameters.append(Parameter('param_distance_yaw', 0, 'Distance to travel'))\n\n def get_outcomes(self):\n return ['succeeded', 'aborted', 'preempted']\n\n def target_reach_cb(self, data):\n self.target_reached = data.target_is_reached\n\n def initialize(self):\n rospy.wait_for_service('/proc_control/set_local_target')\n self.set_local_target = rospy.ServiceProxy('/proc_control/set_local_target', SetPositionTarget)\n\n self.target_reach_sub = rospy.Subscriber('/proc_control/target_reached', TargetReached, self.target_reach_cb)\n\n try:\n self.set_local_target(self.param_distance_x,\n self.param_distance_y,\n 0.0,\n 0.0,\n 0.0,\n self.param_distance_yaw)\n except rospy.ServiceException as exc:\n rospy.loginfo('Service did not process request: ' + str(exc))\n\n rospy.loginfo('Set relative position x = %f' % self.param_distance_x)\n rospy.loginfo('Set relative position y = %f' % self.param_distance_y)\n\n def run(self, ud):\n if self.target_reached > 0:\n return 'succeeded'\n\n def end(self):\n self.target_reach_sub.unregister()\n","sub_path":"src/controller_mission/state/moveRelativeXY.py","file_name":"moveRelativeXY.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"359972316","text":"import os\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\n\ndef active_node(node, code):\n return (node[0], node[1], node[0] == code)\n\n\nclass MainHandler(webapp.RequestHandler):\n def get(self, code):\n if code == '':\n code = 'uk'\n\n template_values = {\n 'locations': [\n active_node(('uk', 'United Kingdom'), code),\n active_node(('row', 'Rest of world'), code),\n active_node(('can', 'Canada'), code),\n active_node(('fra', 'France'), code),\n active_node(('ger', 'Germany'), code),\n active_node(('ire', 'Ireland'), code),\n active_node(('seasia', 'SE Asia'), code),\n active_node(('usa', 'U.S.A.'), code),\n active_node(('aus', 'Australia'), code)\n ],\n 'current': code,\n 'rows': [['Top', 'Middle'], ['Middle1', 'Middle2'],['Middle3'], ['Right1', 'Right2']]\n }\n path = os.path.join(os.path.dirname(__file__), 'templates/index.html')\n self.response.out.write(template.render(path, template_values))\n\napp = webapp.WSGIApplication([('/(.*)', MainHandler)], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"607376290","text":"class menu():\n\tdef __init__(self,chicken,fries,burgers,finished):\n\t\tself.chicken = chicken\n\t\tself.fries = fries\n\t\tself.burgers =burgers\n\t\tself.finished = finished\n\t\tself.total_items = chicken + fries + burgers\nimport datetime\n\nmenu_dict ={'chicken':1.85,'fries':0.89,'burgers':1.00}\n\nday = datetime.datetime.today().weekday()\norder_1 = menu(1,2,9,True)\n# order_1 is what you woukld use for input\nprint('Enter None if you dont want to order an item \\n eneter the amount of each number')\n# print(day)\nprices =[menu_dict['chicken'],menu_dict['fries'],menu_dict['burgers']]\nprint(prices)\n\ntotal = 0\n\nif order_1.chicken == None:\n\tprint('i assume youre vegan then')\n\nelif order_1.chicken > 1 and order_1.burgers > 1 and order_1.fries > 1 and order_1.finished == True:\n\n\ttotal = order_1.chicken * prices[0] + order_1.burgers * prices[2] + order_1.fries * prices[1]\n\tprint('Amount Due')\n\tprint(total)\nelif order_1.chicken > 1 or order_1.fries > 1 or order_1.burgers > 1:\n\n\ttotal = order_1.chicken * prices[0] + order_1.burgers * prices[2] + order_1.fries * prices[1]\n\tprint('Amount Due:')\n\tprint(total)\n\n\nelif order_1.chicken > 0 and order_1.burgers > 0 and order.fries > 0 and order_1.finished == True:\n\n\ttotal = order1.chicken * prices[0] + order_1.burgers * prices[2] + order_1.fries * prices[1]\n\tprint('Amount Due:')\n\tprint(total)\n\nelif order_1.chicken > 0 and order_1.burgers > 0 and order.fries > 0 and order_1.finished == True and day == 1:\n\n\ttotal = order1.chicken * prices[0] + order_1.burgers * prices[2] + order_1.fries * prices[1]\n\n\ttotal = total * 0.75\n\tprint('Amount Due:')\n\tprint(total)\n\nelif order_1.chicken > 0 and order_1.burgers > 0 and order.fries > 0 and order_1.finished == True and day == 1 and total_items >= 8:\n\ttotal =total * 0.50\n\tprint(total)\nelif order_1.finished == False:\n\tprint('take your sweet time')\n\nelse:\n\tprint('i get it youre not hungry yet please dont hesitate to come back next time')\n","sub_path":"chickenshop.py","file_name":"chickenshop.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"252394520","text":"# -*- coding: utf-8 -*-\n\n# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is\n# holder of all proprietary rights on this computer program.\n# You can only use this computer program if you have closed\n# a license agreement with MPG or you get the right to use the computer\n# program from someone who is authorized to grant you that right.\n# Any use of the computer program without a valid license is prohibited and\n# liable to prosecution.\n#\n# Copyright©2020 Max-Planck-Gesellschaft zur Förderung\n# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute\n# for Intelligent Systems. All rights reserved.\n#\n# Contact: ps-license@tuebingen.mpg.de\n\n\nimport torch\n\n\ndef get_reduction_method(reduction='mean'):\n if reduction == 'mean':\n return torch.mean\n elif reduction == 'sum':\n return torch.sum\n elif reduction == 'none':\n return lambda x: x\n else:\n raise ValueError('Unknown reduction method: {}'.format(reduction))\n","sub_path":"expose/losses/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"235632424","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render,redirect\n\nfrom django.http import HttpResponse\n\nfrom myapp.models import Corporation\n\nfrom myapp.forms import LoginForm\n\n\n# Create your views here.\n\ndef login(request):\n username = \"not Logged in\"\n if request.method == 'POST':\n #Get the posted form\n MyLoginForm = LoginForm(request.POST)\n if MyLoginForm.is_valid():\n username = MyLoginForm.cleaned_data['username']\n else:\n MyLoginForm = LoginForm()\n return render(request,'login.html', {'username': username})\n\ndef operations(request):\n company = Corporation(website=\"www.google.com\",mail=\"raagavendran@gmail.com\", name=\"raaga3\", phonenumber=\"545454\")\n company.save()\n objects = Corporation.objects.all()\n string=\"printing all corp entry in db:<BR>\"\n for obj in objects:\n string += obj.name+\"<BR>\"\n raaga = Corporation.objects.get(name=\"raaga\")\n string += \"printing one entry <BR>\"\n string += raaga.name\n string += \"<br>Deleting an entry<br>\"\n raaga.delete()\n company = Corporation(website=\"www.google.com\", mail=\"raagavendran@gmail.com\", name=\"raaga\", phonenumber=\"545454\")\n company.save()\n string += \"updating one entry <BR>\"\n raaga = Corporation.objects.get(name=\"raaga\")\n company.name = \"raaga\"\n company.save()\n return HttpResponse(string)\n\n\n\n\ndef hello(request):\n text = \"\"\"<h1> welcome</h1>\"\"\"\n return HttpResponse(text)\n\ndef review(request):\n return redirect(\"https://www.google.co.in/\")\n\ndef hello1(request):\n text = \"\"\"<h1> welcome hello1</h1>\"\"\"\n return HttpResponse(text)\n\n\ndef viewArticle(request, articleid, month, year):\n text = \"<h1>article number: %s</h1>\" % articleid + \"<h1>month:%s</h1>\" % month + \" <h1>year:%s</h1>\" % year\n return HttpResponse(text)\n\n\ndef hellohtm(request):\n import datetime\n today = datetime.datetime.now()\n daysofweek = ['mon', 'tues', 'wed', 'thurs', 'friday', 'sat', 'sun']\n return render(request, \"hello.html\", {\"today\": today, \"days_of_week\": daysofweek})\n","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"183466179","text":"from functions import *\nfrom tkinter import *\n\nroot = Tk()\n\nroot.geometry(\"600x400\")\nroot.title(\"Labels and Buttons\")\n\nlabel = Label(text=\"This is a Label.\")\nlabel.pack()\n\nbutton = Button(text=\"CLICK ME\", command=createLabel)\nbutton.pack()\n\nroot.mainloop()\n","sub_path":"py/Tkinter/GUI/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"202214981","text":"# 导入函数库\nimport jqdata\nfrom jqlib.technical_analysis import *\n\n\ndef initialize(context):\n # 设置我们要操作的股票池\n g.stocks = ['000001.XSHE', '000002.XSHE', '000004.XSHE', '000005.XSHE']\n # 设定沪深300作为基准\n set_benchmark('000300.XSHG')\n # 开启动态复权模式(真实价格)\n set_option('use_real_price', True)\n\n\ndef handle_data(context, data):\n # 循环每只股票\n for security in g.stocks:\n # 得到股票之前5天的平均价\n vwap = data[security].vwap(5)\n # 得到上一时间点股票平均价\n price = data[security].close\n # 得到当前资金余额\n cash = context.portfolio.cash\n # 如果上一时间点价格小于5天平均价×0.996,并且持有该股票,卖出\n if price < vwap * 0.996 and context.portfolio.positions[\n security].closeable_amount > 0:\n # 下入卖出单\n order(security, -500)\n # 记录这次卖出\n log.info(\"卖出股票 %s\" % (security))\n # 如果上一时间点价格大于5天平均价×1.008,并且有现金余额,买入\n elif price > vwap * 1.008 and cash > 0:\n # 下入买入单\n order(security, 500)\n # 记录这次买入\n log.info(\"买入股票 %s\" % (security))\n","sub_path":"joinquant/多股票持仓量化策略实战案例.py","file_name":"多股票持仓量化策略实战案例.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"367018295","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport time\nimport datetime\nimport threading\nimport logging\n\nimport config\nimport fslib\nimport log_db\nimport log2event\nimport pc_input \nimport pcresult\n\n_logger = logging.getLogger(__name__)\n\n\ndef pc_log(conf, top_dt, end_dt, dur, area):\n\n _logger.info(\"job start ({0} - {1} in {2})\".format(top_dt, end_dt, area))\n\n edict, evmap = log2event.log2event(conf, top_dt, end_dt, dur, area)\n _logger.info(\"{0} events found in given term of log data\".format(\n len(edict)))\n\n if len(edict) > 2:\n threshold = conf.getfloat(\"dag\", \"threshold\")\n graph = pc_input.pc(edict, threshold)\n else:\n _logger.info(\"insufficient events({0}), return empty dag\".format(\\\n len(edict)))\n graph = pc_input.empty_dag()\n\n output = pcresult.PCOutput(conf)\n output.make(graph, evmap, top_dt, end_dt, dur, area)\n output.dump()\n _logger.info(\"job done, output {0}\".format(output.filename))\n return output\n\n\ndef thread_name(conf, top_dt, end_dt, dur, area):\n dirname = conf.get(\"dag\", \"output_dir\")\n l_header = []\n l_header.append(dirname)\n l_header.append(\"/\")\n l_header.append(area)\n l_header.append(\"_\")\n if conf.getdur(\"dag\", \"unit_diff\") == datetime.timedelta(days = 1):\n l_header.append(top_dt.strftime(\"%Y%m%d\"))\n else:\n l_header.append(top_dt.strftime(\"%Y%m%d_%H%M%S\"))\n return \"\".join(l_header)\n\n\ndef pc_all_args(conf):\n ld = log_db.LogData(conf)\n \n w_term = conf.getterm(\"dag\", \"whole_term\")\n if w_term is None:\n w_top_dt, w_end_dt = ld.whole_term()\n else:\n w_top_dt, w_end_dt = w_term\n term = conf.getdur(\"dag\", \"unit_term\")\n diff = conf.getdur(\"dag\", \"unit_diff\")\n dur = conf.getdur(\"dag\", \"stat_bin\")\n\n l_args = []\n top_dt = w_top_dt\n while top_dt < w_end_dt:\n end_dt = top_dt + term\n l_area = conf.getlist(\"dag\", \"area\")\n if \"each\" in l_area:\n l_area.pop(l_area.index(\"each\"))\n l_area += [\"host_\" + host for host\n in ld.whole_host(top_dt, end_dt)]\n for area in l_area:\n l_args.append((conf, top_dt, end_dt, dur, area))\n top_dt = top_dt + diff\n return l_args\n\n #l_args = []\n #l_area = conf.getlist(\"dag\", \"area\")\n #if \"each\" in l_area:\n # l_area.pop(l_area.index(\"each\"))\n # l_area += [\"host_\" + host for host\n # in ld.whole_host(top_dt, end_dt)]\n #for area in l_area:\n # top_dt = w_top_dt\n # while top_dt < w_end_dt:\n # end_dt = top_dt + term\n # l_args.append((conf, top_dt, end_dt, dur, area))\n # top_dt = top_dt + diff\n #return l_args\n\n\ndef pc_mthread(l_args, pal=1):\n\n start_dt = datetime.datetime.now()\n _logger.info(\"pc_log task start ({0} jobs)\".format(len(l_args)))\n\n l_thread = [threading.Thread(name = thread_name(*args),\n target = pc_log, args = args) for args in l_args]\n\n l_job = []\n while len(l_thread) > 0:\n if len(l_job) < pal:\n job = l_thread.pop(0)\n job.start()\n l_job.append(job)\n else:\n time.sleep(1)\n l_job = [j for j in l_job if j.is_alive()]\n else:\n for job in l_job:\n job.join()\n\n end_dt = datetime.datetime.now()\n _logger.info(\"pc_log task done ({0})\".format(end_dt - start_dt))\n\n\nif __name__ == \"__main__\":\n \n usage = \"usage: {0} [options]\".format(sys.argv[0])\n import optparse\n op = optparse.OptionParser(usage)\n op.add_option(\"-c\", \"--config\", action=\"store\",\n dest=\"conf\", type=\"string\", default=config.DEFAULT_CONFIG_NAME,\n help=\"configuration file path\")\n op.add_option(\"-p\", \"--parallel\", action=\"store\", dest=\"pal\", type=\"int\",\n default=1, help=\"multithreading\")\n op.add_option(\"-r\", action=\"store_true\", dest=\"rflag\",\n default=False, help=\"using pcalg library in R\")\n (options, args) = op.parse_args()\n\n conf = config.open_config(options.conf)\n config.set_common_logging(conf, _logger, [])\n\n fslib.mkdir(conf.get(\"dag\", \"output_dir\"))\n l_args = pc_all_args(conf)\n pc_mthread(l_args, options.pal)\n\n\n","sub_path":"logcausality/pc_log.py","file_name":"pc_log.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"83797624","text":"import glob\nfrom astropy.io import fits\nfrom astropy import visualization\nimport pylab as pl\n\n\nimport os\nimport time\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy import units as u\nfrom astropy.stats import mad_std\nimport pylab as pl\nimport radio_beam\nimport glob\nfrom spectral_cube import SpectralCube,DaskSpectralCube\nfrom spectral_cube.lower_dimensional_structures import Projection\n\nfrom casatools import image\nia = image()\n\nif os.getenv('NO_PROGRESSBAR') is None:\n from dask.diagnostics import ProgressBar\n pbar = ProgressBar()\n pbar.register()\n\nnthreads = 1\nscheduler = 'synchronous'\n\nos.environ['TEMPDIR'] = '/blue/adamginsburg/adamginsburg/tmp/'\n\nif os.getenv('DASK_THREADS') is not None:\n try:\n nthreads = int(os.getenv('DASK_THREADS'))\n if nthreads > 1:\n scheduler = 'threads'\n else:\n scheduler = 'synchronous'\n except (TypeError,ValueError):\n nthreads = 1\n scheduler = 'synchronous'\n\ndefault_lines = {'n2hp': '93.173700GHz',\n 'sio': '217.104984GHz',\n 'h2co303': '218.222195GHz',\n '12co': '230.538GHz',\n 'h30a': '231.900928GHz',\n 'h41a': '92.034434GHz',\n \"c18o\": \"219.560358GHz\",\n }\nspws = {3: list(range(4)),\n 6: list(range(7)),}\n\nsuffix = '.image'\n\ncwd = os.getcwd()\nbasepath = '/orange/adamginsburg/ALMA_IMF/2017.1.01355.L/imaging_results'\nos.chdir(basepath)\nprint(f\"Changed from {cwd} to {basepath}, now running cube metadata assembly\")\n\nglobal then\nthen = time.time()\ndef dt():\n global then\n now = time.time()\n print(f\"Elapsed: {now-then}\")\n then = now\n\n\ncolnames_apriori = ['Field', 'Band', 'Config', 'spw', 'line', 'suffix', 'filename', 'bmaj', 'bmin', 'bpa', 'mod_date', 'wcs_restfreq', 'minfreq', 'maxfreq']\ncolnames_fromheader = ['imsize', 'cell', 'threshold', 'niter', 'pblimit', 'pbmask', 'restfreq', 'nchan', 'width', 'start', 'chanchunks', 'deconvolver', 'weighting', 'robust', 'git_version', 'git_date', ]\n\nrows = []\n\nfor field in \"G337.92 W43-MM3 G328.25 G351.77 W43-MM2 G327.29 G338.93 W51-E G353.41 G008.67 W43-MM1 G010.62 W51-IRS2 G012.80 G333.60\".split():\n for band in (3,6):\n for config in ('7M12M', '12M'):\n for line in default_lines:\n for suffix in (\".image\", \".contsub.image\"):\n globblob = f\"{field}_B{band}*_{config}_*{line}{suffix}\"\n fn = glob.glob(globblob)\n if any(fn):\n print(f\"Found some matches for fn {fn}, using {fn[0]}.\")\n fn = fn[0]\n else:\n print(f\"Found no matches for glob {globblob}\")\n continue\n\n mod_date = time.ctime(os.path.getmtime(fn))\n\n ia.open(fn)\n history = {x.split(\":\")[0]:x.split(\": \")[1] for x in ia.history()}\n ia.close()\n\n if os.path.exists(fn+\".fits\"):\n cube = SpectralCube.read(fn+\".fits\", use_dask=True)\n cube.use_dask_scheduler(scheduler, num_workers=nthreads)\n else:\n cube = SpectralCube.read(fn)\n cube.use_dask_scheduler(scheduler, num_workers=nthreads)\n cube = cube.rechunk()\n if hasattr(cube, 'beam'):\n beam = cube.beam\n else:\n beams = cube.beams\n # use the middle-ish beam\n beam = beams[len(beams)//2]\n\n print(cube)\n\n spw = int(fn.split('spw')[1][0])\n\n minfreq = cube.spectral_axis.min()\n maxfreq = cube.spectral_axis.max()\n restfreq = cube.wcs.wcs.restfrq\n\n row = [field, band, config, spw, line, suffix, fn, beam.major.value, beam.minor.value, beam.pa.value, mod_date, restfreq, minfreq, maxfreq] + [history[key] if key in history else '' for key in colnames_fromheader]\n rows.append(row)\n\n\n for spw in spws[band]:\n for suffix in (\".image\", \".contsub.image\"):\n print(f\"Beginning field {field} band {band} config {config} spw {spw} suffix {suffix}\")\n globblob = f\"{field}_B{band}_spw{spw}_{config}_spw{spw}{suffix}\"\n fn = glob.glob(globblob)\n if any(fn):\n print(f\"Found some matches for fn {fn}, using {fn[0]}.\")\n fn = fn[0]\n else:\n print(f\"Found no matches for glob {globblob}\")\n continue\n\n mod_date = time.ctime(os.path.getmtime(fn))\n\n ia.open(fn)\n history = {x.split(\":\")[0]:x.split(\": \")[1] for x in ia.history()}\n ia.close()\n\n line = 'none'\n\n if os.path.exists(fn+\".fits\"):\n cube = SpectralCube.read(fn+\".fits\", use_dask=True)\n else:\n cube = SpectralCube.read(fn)\n if hasattr(cube, 'beam'):\n beam = cube.beam\n else:\n beams = cube.beams\n beam = beams[len(beams)//2]\n\n minfreq = cube.spectral_axis.min()\n maxfreq = cube.spectral_axis.max()\n restfreq = cube.wcs.wcs.restfrq\n\n row = [field, band, config, spw, line, suffix, fn, beam.major.value, beam.minor.value, beam.pa.value, mod_date, restfreq, minfreq, maxfreq] + [history[key] if key in history else '' for key in colnames_fromheader]\n rows.append(row)\n\nfrom astropy.table import Table\ncolnames = colnames_apriori+colnames_fromheader\ncolumns = list(map(list, zip(*rows)))\ntbl = Table(columns, names=colnames)\nprint(tbl)\nfrom pathlib import Path\ntbldir = Path('/orange/adamginsburg/web/secure/ALMA-IMF/tables')\ntbl.write(tbldir / 'cube_metadata.ecsv', overwrite=True)\ntbl.write(tbldir / 'cube_metadata.ipac', format='ascii.ipac', overwrite=True)\ntbl.write(tbldir / 'cube_metadata.html', format='ascii.html', overwrite=True)\ntbl.write(tbldir / 'cube_metadata.tex', overwrite=True)\ntbl.write(tbldir / 'cube_metadata.js.html', format='jsviewer')\n\nos.chdir(cwd)\n","sub_path":"analysis/cube_metadata_grid.py","file_name":"cube_metadata_grid.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"285269808","text":"from bs4 import BeautifulSoup as bs\nimport json\nfrom selenium import webdriver\nimport sys\n\n# Number of pages to sc\npages = 1\nif len(sys.argv) > 1:\n pages = int(sys.argv[1])\n\n# Selenium browser to open the URL and get page source\nbrowser = webdriver.Chrome('./chromedriver')\nbrowser.implicitly_wait(30)\nurl_list = []\n\n# Visit start pages and obtain restaurant URLS\nfor page in range(1, pages + 1):\n browser.get('https://www.zomato.com/bangalore/restaurants?page=' + str(page))\n html_data = browser.page_source\n\n # BeautifulSoup library to parse the HTML data\n soup = bs(html_data, 'html.parser')\n\n # Get list of all restaurant URLs from start page\n url_list += [a['href'] for a in soup.findAll('a', attrs={'class' : 'result-title hover_feedback zred bold ln24 fontsize0 '})]\n\nrestaurant_data = {}\n\n# Visit each URL and scrape required data using\n# the attributes that were obtained manually\nfor url in url_list:\n restaurant_data[url] = {}\n browser.get(url)\n html_data = browser.page_source\n soup = bs(html_data, 'html.parser')\n\n restaurant_data[url]['name'] = soup.find('a', attrs={'class' : 'ui large header left'}).text.strip()\n map_data = soup.find('div', attrs={'class' : 'resmap-img'})['data-url']\n restaurant_data[url]['location'] = tuple(map(float, map_data.split('markers=')[1].split(',')[:2]))\n restaurant_data[url]['rating'] = soup.find('div', attrs={'class': 'rating-div'}).text.strip()\n restaurant_data[url]['reviews'] = [a.contents[2].strip() for a in soup.findAll('div', attrs={'class' : 'rev-text'})]\n\nbrowser.quit()\n\n# Store obtained data in the form of JSON object\nwith open(\"restaurant_data.json\", 'w') as outfile:\n json.dump(restaurant_data, outfile)","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"507101582","text":"from socket import socket\nfrom nordicsemi.dfu.dfu_transport_network import DfuTransportUDP\nfrom nordicsemi.dfu.dfu_transport import DfuTransport, DfuEvent, TRANSPORT_LOGGING_LEVEL\nfrom nordicsemi.dfu.dfu import Dfu\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nglobal_bar = None\ndef update_progress(progress=0):\n if global_bar:\n global_bar.update(progress)\n\ndef update(package, outgoing_ip, sock):\n \"\"\"Perform a Device Firmware Update on a device with a bootloader that supports UART serial DFU.\"\"\"\n do_network(package, outgoing_ip, sock)\n\ndef do_network(package, outgoing_ip, sock):\n logger.info(\"Doing network \")\n\n udp_backend = DfuTransportUDP(outgoing_ip, sock,\n flow_control=False, prn=0, do_ping=True,\n timeout=1.0)\n\n udp_backend.register_events_callback(\n DfuEvent.PROGRESS_EVENT, update_progress)\n\n dfu = Dfu(zip_file_path=package, dfu_transport=udp_backend,\n connect_delay=3)\n dfu.dfu_send_images()\n\n \"\"\" if logger.getEffectiveLevel() > logging.INFO:\n with click.progressbar(length=dfu.dfu_get_total_size()) as bar:\n global global_bar\n global_bar = bar\n dfu.dfu_send_images()\n else:\n \"\"\"","sub_path":"nordicsemi/dfu_update.py","file_name":"dfu_update.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"232139899","text":"from __future__ import division\n\nimport os.path\nimport sqlite3\nimport ConfigParser\nfrom collections import defaultdict, Counter\n\nimport pandas as pd\n\n\nclass Analysis:\n \"\"\"Analysis.\n\n Parameters\n ----------\n sql_path : str\n Path to SQL file containing in silico PCR result. Table must include\n columns 'id', 'fw_match', 'rv_match', 'is_amplified' and 'taxonomy'.\n analyzer : Analyzer\n Analyzer must have methods 'run', 'filter' and 'output'.\n out_dir : str\n Directory the report(s) output to.\n \n Attributes\n ----------\n df : pandas.DataFrame\n Table 'testprimer' from SQL file.\n \"\"\"\n\n def __init__(self, sql_path, out_dir, analyzer=None):\n if not os.path.exists(sql_path):\n raise\n self._sql_path = sql_path\n self.out_dir = out_dir\n self.analyzer = analyzer\n\n with sqlite3.connect(self._sql_path) as conn:\n self.df = pd.read_sql(\"SELECT * FROM testprimer;\", conn)\n\n def execute(self):\n if not self.analyzer:\n raise\n result = self.analyzer.run(self.df)\n filtered = self.analyzer.filter(result)\n return self.analyzer.output(filtered, self.out_dir)\n\n\nclass TaxaCoverage:\n\n def run(self, df):\n data = defaultdict(Counter)\n for index, r in df.iterrows():\n is_amplified = bool(r['is_amplified'])\n taxa = r['taxonomy'].split(';')\n for i in range(len(taxa)):\n taxon = ';'.join(taxa[:i+1])\n data[taxon].update([is_amplified])\n \n coverage = pd.DataFrame(data).T.fillna(0).reset_index() \\\n .rename(columns={'index':'taxonomy', True:'match', False:'mismatch'})\n coverage['coverage'] = coverage['match'] / (coverage['match'] + coverage['mismatch'])\n return coverage\n\n def filter(self, coverage):\n \"\"\"Result selector.\n \n Only display domain level, phylum level and human disease related\n pathogen coverage.\n \"\"\"\n domain = coverage[coverage['taxonomy'].str.count(';')==0]\n phylum = coverage[coverage['taxonomy'].str.count(';')==1]\n\n # human disease related pathogens\n configpath = os.path.join(\n os.path.expanduser('~'),\n '.testprimer',\n 'config'\n )\n if not os.path.isfile(configpath):\n raise\n config = ConfigParser.ConfigParser()\n config.read(configpath)\n try:\n pathogenlist = config.get('TaxaCoverage', 'pathogens').strip().split(',')\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:\n raise e\n\n genus = coverage[(coverage['taxonomy'].str.startswith('Bacteria')) & (coverage['taxonomy'].str.count(';')==5)]\n data = defaultdict(list)\n for candidate in pathogenlist:\n row = genus[genus['taxonomy'].str.endswith(candidate)]\n data['pathogen'].append(candidate) \n if row.shape[0] != 0: \n data['taxonomy'].append(row.iloc[0]['taxonomy'])\n data['mismatch'].append(row.iloc[0]['mismatch'])\n data['match'].append(row.iloc[0]['match'])\n data['coverage'].append(row.iloc[0]['coverage'])\n else:\n data['taxonomy'].append(None)\n data['mismatch'].append(None)\n data['match'].append(None)\n data['coverage'].append(None)\n pathogen = pd.DataFrame(data, columns=['pathogen', 'taxonomy', 'mismatch', 'match', 'coverage'])\n\n return [domain, phylum, pathogen]\n\n def output(self, filtered, out_dir):\n writer = pd.ExcelWriter(os.path.join(out_dir, 'coverage.xlsx'))\n domain, phylum, pathogen = filtered\n domain.to_excel(writer, 'domain', index=False)\n phylum.to_excel(writer, 'phylum', index=False)\n pathogen.to_excel(writer, 'pathogen', index=False)\n writer.save()\n return\n\n\ndef report(sql_path, out_dir, taxa_coverage):\n '''Main module entrance '''\n\n analysis = Analysis(sql_path, out_dir)\n\n if taxa_coverage:\n analysis.analyzer = TaxaCoverage()\n analysis.execute()\n\n return\n","sub_path":"testprimer/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"504108524","text":"\n\"\"\" Manages the bookcase, i.e., repository of items\n \n \n \"\"\"\n\n\nfrom api.Utilities import *\n\nfrom api.Archiver import *\n\nfrom api.BibFile import *\n\nfrom api.TableFile import *\n\n\n\nclass Librarian :\n \n \"\"\" Manages the bookcase, i.e., repository of items\n\n\n \"\"\"\n\n \n # address ( drive etc )\n\n address = None\n\n # bookcase ( path to a volume or directory )\n\n bookcaseDirectory = None\n\n # error\n\n error = None\n\n # list of erroneous items\n\n errorList = None\n\n # directory containing items exported from bookcase\n\n exportDirectory = None\n\n # logs\n\n logClassify = None\n\n logDownload = None\n \n logIndex = None\n \n # local directory of selected object on doc server\n\n local = None\n \n # current file or directory ( last uploaded or downloaded )\n\n path = None\n\n # selected item on server ( last downloaded or uploaded )\n\n selected = None\n\n # items to classiy ( path to a volume or a directory )\n\n classifyDirectory = None\n\n # lists of things to do (manually)\n\n todoList = None\n \n # list of uploaded items\n\n uploadList = None\n \n \n\n def __init__ ( self ) :\n\n \"\"\" constructor \"\"\"\n\n self.setDefault()\n\n\n\n\n def buildIndex ( self ) :\n \n \"\"\" builds the indexes and verifies the bookcase\n\n \n \"\"\"\n\n if not self.checkAccess() : return False\n\n if utilities.isEmpty( bibFile.typeList ) : return False\n\n # deletes the indexes\n\n items = utilities.directoryContent( self.bookcaseDirectory, annotate = True )\n\n for item in items :\n\n if item.endswith( os.sep ) : continue\n\n if item.startswith( \"index\" ) : utilities.fileDelete( self.bookcaseDirectory + item )\n\n \n # resets the log. After checkaccess we know that bookcase is defined and accessible\n \n utilities.fileWrite(\n self.bookcaseDirectory + os.sep + self.logIndex,\n \"#date\" + utilities.fieldDelimiter + \\\n \"name\" + utilities.fieldDelimiter + \\\n \"link\" + utilities.fieldDelimiter + \\\n \"problem\" + \"\\n\"\n )\n\n for iType in range( len( bibFile.typeList ) ) :\n\n bibtex = bibFile.typeList[ iType ]\n\n categoryPath = self.bookcaseDirectory + os.sep + bibtex + os.sep\n\n if not utilities.directoryPresent( categoryPath ) : continue\n\n # lists of fields for general index and specific indexes\n\n fieldList = bibFile.fieldMatrix[ iType ]\n\n # paths to indexes\n \n bibtexIndex = self.bookcaseDirectory + \"index\" + \"_\" + bibtex\n\n # creates header if file not here\n\n if not utilities.filePresent( bibtexIndex ) :\n \n utilities.fileWrite(\n bibtexIndex + \".tsv\",\n \"#key\" + utilities.fieldDelimiter + \\\n \"bibtex\" + utilities.fieldDelimiter + \\\n \"link\" + utilities.fieldDelimiter + \\\n utilities.wordsToText( fieldList, utilities.fieldDelimiter ) + \"\\n\"\n )\n \n years = utilities.directoryContent( categoryPath, annotate = True )\n\n for year in years :\n\n if not year.endswith( os.sep ) : continue\n\n if not len( year ) == 5 : continue\n\n year = year[ : -1 ]\n\n if utilities.integer( year ) is None : continue \n\n # this is a date.\n\n # prepares index\n \n yearIndex = self.bookcaseDirectory + \"index\" + \"_\" + bibtex + \"_\" + year\n \n if not utilities.filePresent( yearIndex ) :\n \n utilities.fileWrite(\n yearIndex + \".tsv\",\n \"#key\" + utilities.fieldDelimiter + \\\n \"bibtex\" + utilities.fieldDelimiter + \\\n \"link\" + utilities.fieldDelimiter + \\\n utilities.wordsToText( fieldList, utilities.fieldDelimiter ) + \"\\n\"\n )\n\n\n # checks content\n\n yearPath = categoryPath + year + os.sep\n\n items = utilities.directoryContent( yearPath, annotate = True )\n\n for item in items :\n\n if item.startswith( \"_\" ) : continue\n\n self.indexItem(\n yearPath + item,\n bibtexIndex,\n yearIndex,\n fieldList\n )\n\n\n return True\n\n\n\n\n def check ( self ) :\n\n \"\"\" Checks the connection to server\n\n returns True/False and sets context variable connectedValue to \"true\" or \"false\"\n \n \"\"\"\n\n self.bookcaseDirectory = utilities.getVariable( \"bookcase\", default = \"\" )\n\n self.classifyDirectory = utilities.getVariable( \"classify\", default = \"\" )\n\n self.exportDirectory = utilities.getVariable( \"export\", default = \"\" )\n\n ok1 = self.checkAccess()\n\n ok2 = self.checkAccess( self.classifyDirectory )\n\n ok = ok1 and ok2\n \n if ok : utilities.setVariable( \"connected\", \"true\" )\n\n else : utilities.setVariable( \"connected\", \"false\" )\n \n return ok\n\n\n\n def checkAccess (\n\n self,\n directory = None\n\n ) :\n\n \"\"\" Checks the access to drive and/or to directory( no control of access rights ) \"\"\"\n\n # default is current bookcase\n\n if utilities.isEmpty( directory ) : directory = self.bookcaseDirectory\n \n # empty : no access\n\n if utilities.isEmpty( directory ) : return False\n\n # resets the cache ***FUCKING PYTHON: dircache on network drive works when it wants.\n\n dircache.reset()\n\n # there is a check file, can be detected : good\n \n if utilities.filePresent( directory + \"check.txt\" ) : return True\n\n # no drive : wrong\n \n if not utilities.directoryPresent( directory ) : return False\n\n # cannot read the check file, tries to create it ( users should have write - rights at least )\n \n ok = utilities.fileCreate( directory + \"check.txt\" )\n\n return ok\n \n\n\n def classify (\n\n self,\n path = None,\n directory = None,\n owner = None,\n ) :\n\n \"\"\" builds the indexes and verifies the bookcase \"\"\"\n\n self.error = \"\"\n\n # called on the main to_classify directory\n\n if utilities.isEmpty( directory ) :\n\n # cannot access bookcase\n \n if not self.checkAccess() : \n\n self.writeLogClassify(\n self.bookcaseDirectory,\n \"cannot access bookcase\"\n )\n\n return False\n\n # prepares log file header\n\n utilities.fileWrite(\n self.bookcaseDirectory + os.sep + self.logClassify,\n \"#date\" + utilities.fieldDelimiter + \\\n \"directory\" + utilities.fieldDelimiter + \\\n \"item\" + utilities.fieldDelimiter + \\\n \"problem\" + utilities.fieldDelimiter + \\\n \"link\" + \"\\n\"\n )\n\n # cannot access to classify directory\n\n if not self.checkAccess( self.classifyDirectory ) :\n\n self.writeLogClassify(\n self.classifyDirectory,\n \"cannot access directory\"\n )\n\n return False\n\n # no bib types\n\n if utilities.isEmpty( bibFile.typeList ) :\n\n self.writeLogClassify(\n self.bookcaseDirectory,\n \"list of types undefined (bibfile)\"\n )\n\n return False\n\n # there is a specific item to classify : uploads\n\n if not utilities.isEmpty( path ) :\n\n result = self.upload( path )\n\n return result\n\n\n # check content of directory to classify\n\n if utilities.isEmpty( directory ) : directory = self.classifyDirectory\n \n items = utilities.directoryContent( directory, annotate = True )\n\n## print \"librarian.classify( \", directory, \"owner\", str(owner)\n\n for item in items :\n\n if item.startswith( \"_\" ) : continue\n\n if item.endswith( \".bib\" ) : continue\n\n if item == \"check.txt\" : continue\n\n # owner's subdirectory\n\n if ( ( item.endswith( os.sep ) ) and ( item.startswith( \"owner\" ) ) ) :\n\n user = item[ len( \"owner\" ) : ]\n\n user = utilities.string( user, format = \"split\" ).strip( \" \" + os.sep )\n\n self.classify(\n directory = directory + item,\n owner = user\n )\n\n continue\n\n # user subdirectory (same as owner, for now )\n \n if ( ( item.endswith( os.sep ) ) and ( item.startswith( \"user\" ) ) ) :\n\n user = item[ len( \"user\" ) : ]\n\n user = utilities.string( user, format = \"split\" ).strip( \" \" + os.sep )\n\n self.classify(\n directory = directory + item,\n owner = user\n )\n\n continue\n\n\n\n # here, I have a classifiable item\n\n path = directory + item\n\n local = utilities.localDirectory( path )\n\n backups = utilities.backupsDirectory( path )\n\n name = utilities.pathName( item )\n\n if path.endswith( os.sep ) : extension = os.sep\n\n else : extension = \".\" + utilities.pathExtension( path )\n\n # is there a bib directly in the to_classify directory? moves it to _local/information.bib\n\n bib = directory + name + \".bib\"\n\n if utilities.filePresent ( bib ) :\n\n utilities.directoryCreate( local )\n\n utilities.fileCopy( bib, local + \"information.bib\" )\n\n utilities.fileDelete( bib )\n\n ok = self.upload(\n path,\n replace = False,\n owner = owner\n )\n\n if not ok : continue\n\n # writes to log (uses the bibfile information to determine path in bookcase)\n\n\n shared = utilities.pathShared(\n category = bibFile.bibtex,\n author = bibFile.author,\n description = bibFile.description,\n title = bibFile.title,\n year = bibFile.year,\n extension = extension,\n directory = self.bookcaseDirectory\n )\n\n \n self.writeLogClassify(\n path,\n \"\",\n shared\n )\n\n # removes the item from classifyDirectory\n\n utilities.pathDelete( path )\n\n utilities.directoryDelete( local )\n\n utilities.directoryDelete( backups )\n \n return True\n\n\n\n\n def createBib (\n\n self,\n path = None,\n bibtex = None,\n author = None,\n description = None,\n owner = None,\n title = None,\n year = None,\n initials = None\n ) :\n\n \"\"\" creates a bib file \"\"\"\n\n if utilities.isEmpty( path ) : return False\n\n if utilities.isEmpty( bibtex ) : return False\n\n## print \" createbib (\", bibtex, \",\", author, \",\", description, \",\", title, \",\", year, \",\", initials, \")\"\n\n if utilities.isEmpty( title ) : title = utilities.string( initials, format = \"upper\" )\n\n if utilities.isEmpty( title ) : title = description\n\n title = utilities.string( title, format = \"title\" )\n\n if utilities.isEmpty( owner ) : owner = utilities.getVariable( \"organization\" )\n\n owner = utilities.string( owner, format = \"title\", default = \"?\" )\n\n table = [\n [ \"bibtex\", bibtex ],\n [ \"author\", author ],\n [ \"description\", description ],\n [ \"title\", title ],\n [ \"year\", year ],\n [ \"owner\", owner ],\n ]\n \n\n ok = bibFile.write( path, table )\n\n return ok\n\n \n \n def download (\n\n self,\n owner = None\n ) :\n\n \"\"\" Downloads an item from documentation server (path) and copies it into (target)\n\n \n \"\"\"\n\n utilities.error = \"\"\n\n\n # cannot access bookcase\n \n if not self.checkAccess() : \n\n self.writeLogDownload(\n owner = owner,\n path = self.bookcaseDirectory,\n text = \"cannot access bookcase\"\n )\n\n return False\n\n # prepares log file header\n\n utilities.fileWrite(\n self.bookcaseDirectory + os.sep + self.logDownload,\n \"#date\" + utilities.fieldDelimiter + \\\n \"owner\" + utilities.fieldDelimiter + \\\n \"item\" + utilities.fieldDelimiter + \\\n \"from\" + utilities.fieldDelimiter + \\\n \"comment\" + \"\\n\"\n )\n\n # cannot access to download directory\n\n if not self.checkAccess( self.exportDirectory ) :\n\n self.writeLogDownload(\n owner = owner,\n path = self.exportDirectory,\n text = \"cannot access directory\"\n )\n\n return False\n\n # no bib types\n\n if utilities.isEmpty( bibFile.typeList ) :\n\n self.writeLogDownload(\n owner = owner,\n path = self.bookcaseDirectory,\n text = \"list of types undefined (bibfile)\"\n )\n\n return False\n\n # owner\n\n if utilities.isEmpty( owner ) : owner = utilities.getVariable( \"user\" )\n\n owner = utilities.string( owner, format = \"strictunderscore\", default = \"All\" )\n\n # makes a local directory\n\n directory = self.exportDirectory + \"owner_\" + owner + os.sep\n\n ok = utilities.directoryCreate( directory )\n\n if not ok :\n\n self.writeLogDownload(\n owner = owner,\n path = directory,\n text = \"cannot create directory\"\n )\n\n return False\n\n\n # loop on the indexes\n\n result = True\n\n items = utilities.directoryContent( self.bookcaseDirectory, annotate = True )\n\n for item in items :\n\n if not item.startswith( \"index\" ) : continue\n\n if not item.endswith( \".tsv\" ) : continue\n\n # the index has bibcategory AND year, e.g., index_article_1999.tsv\" : not considered\n \n if not item.count( \"_\" ) == 1 : continue\n\n ok = self.downloadIndex(\n index = self.bookcaseDirectory + item,\n directory = directory,\n owner = owner\n )\n\n if not ok : result = False\n \n return result\n\n\n\n\n\n## def download (\n##\n## self,\n## owner = None\n## ) :\n## \n## \"\"\" downloads items belonging to owner and places them in export\n## \n## \"\"\"\n##\n## if not self.checkAccess() : return False\n##\n## if not self.checkAccess( self.classifyDirectory ) : return False\n##\n## if utilities.isEmpty( bibFile.typeList ) : return False\n##\n##\n## for iType in range( len( bibFile.typeList ) ) :\n##\n## bibtex = bibFile.typeList[ iType ]\n##\n## categoryPath = self.bookcaseDirectory + os.sep + bibtex + os.sep\n##\n## if not utilities.directoryPresent( categoryPath ) : continue\n##\n## years = utilities.directoryContent( categoryPath, annotate = True )\n##\n## for year in years :\n##\n## if not year.endswith( os.sep ) : continue\n##\n## if not len( year ) == 5 : continue\n##\n## year = year[ : -1 ]\n##\n## if utilities.integer( year ) is None : continue\n##\n##\n## yearPath = categoryPath + year + os.sep\n## \n## items = utilities.directoryContent( yearPath, annotate = True )\n##\n## for item in items :\n##\n## if item.startswith( \"_\" ) : continue\n##\n## name = utilities.pathName( item )\n##\n## path = yearPath + item\n##\n## local = utilities.localDirectory( path )\n##\n## backups = utilities.backupsDirectory( path )\n##\n## utilities.pathCopy( path, self.classifyDirectory + item )\n##\n## utilities.directoryCopy( local, self.classifyDirectory + \"_\" + name + os.sep )\n##\n## utilities.directoryCopy( backups, self.classifyDirectory + \"__\" + name + os.sep )\n##\n##\n## # loop on the indexes\n##\n## result = True\n##\n## items = utilities.directoryContent( self.bookcaseDirectory, annotate = True )\n##\n## for item in items :\n##\n## if ( ( item.startswith( \"index\" ) ) and ( item.endswith( \".tsv\" ) ) :\n##\n## ok = self.downloadIndex(\n## index = self.bookcaseDirectory + item,\n## directory = directory,\n## owner = owner\n## )\n##\n## if not ok : result = False\n##\n## \n## return result\n##\n\n\n\n\n\n\n def downloadIndex (\n\n self,\n index = None,\n directory = None,\n owner = None\n ) :\n\n \"\"\" downloads the content of an index that belong to some owner into directory \"\"\"\n\n if utilities.isEmpty( directory ) : return False\n\n table = tableFile.readTable( index, size = 4, variable = True )\n\n if utilities.isEmpty( table ) : return False\n\n owner = utilities.string( owner, format = \"title\", default = None )\n\n # looks for items with right owner (table fields are key bibtex link owner (owner is defined in BibFile.fieldMatrix)\n\n for item in table :\n\n if ( ( not owner is None ) and ( not item[ 3 ] == owner ) ) : continue\n\n## print \" downloadindex found\", item[ 2 ], item[ 3 ]\n\n # path to the _item/index.html file\n \n path = item[ 2 ]\n\n local = utilities.pathDirectory( path )\n\n name = utilities.pathName( local ).strip( \"_ \" )\n\n source = utilities.pathDirectory( local )\n\n # copies _item*, item.*\n\n items = utilities.directoryContent( source, annotate = True )\n\n for item in items :\n\n if not utilities.pathName( item ).strip( \"_ \" ) == name : continue\n\n utilities.pathCopy( source + item, directory + item )\n\n self.writeLogDownload(\n path = directory + item,\n owner = owner,\n source = path,\n text = \"\"\n )\n\n \n \n\n return True\n\n\n\n\n\n\n\n\n def downloadPath (\n\n self,\n path = None,\n target = None\n ) :\n\n \"\"\" Downloads a file or directory from documentation server (path) and copies it into (target)\n\n \n \"\"\"\n\n\n self.selected = path\n \n self.path = utilities.normalizePath( target, normalize = False )\n \n ok = utilities.pathCopy( path, target )\n\n return ok\n\n\n\n\n\n def indexItem (\n\n self,\n path = None,\n bibtexIndex = None,\n yearIndex = None,\n fieldList = None\n ) :\n\n \"\"\" verifies an item of the bookcase and indexes it \"\"\"\n\n if utilities.isEmpty( path ) : return False\n\n if utilities.isEmpty( bibtexIndex ) : return False\n\n if utilities.isEmpty( yearIndex ) : return False\n\n if utilities.isEmpty( fieldList ) : return False\n\n local = utilities.localDirectory( path )\n \n bib = local + \"information.bib\"\n\n## backups = utilities.backupsDirectory( path ) \n##\n## backup = backups + \"backup.zip\"\n\n # checks bib file and backup\n\n if not utilities.filePresent( bib ) :\n\n self.writeLogIndex( path, \"missing bib file\" )\n\n return False\n\n## # no backup : creates it\n##\n## if not utilities.filePresent( backup ) :\n##\n## # creates a zip that contains the item with no date or other information in name.\n##\n## ok = archiver.backup(\n## path = path,\n## target = \"backup.zip\",\n## directory = backups\n## )\n##\n## if not ok :\n##\n## self.writeLogIndex( path, \"missing backup file - could not create it\" )\n##\n## return False\n## \n## # backups the local directory in the same backup\n##\n## ok = archiver.backup(\n## path = local,\n## target = \"backup.zip\",\n## directory = backups\n## )\n##\n## if not ok :\n##\n## self.writeLogIndex( path, \"missing backup file - could not complete it\" )\n##\n## return False\n##\n## # notifies creation\n##\n## self.writeLogIndex( path, \"create backup file\" )\n\n \n\n # checks content of .bib\n\n bibFile.read( bib )\n\n if utilities.isEmpty( bibFile.bibtex ) :\n\n self.writeLogIndex( path, \"invalid bib file\" )\n\n return False\n\n if not bibFile.checkComplete() :\n\n self.writeLogIndex( path, \"missing in bibtex file: \" + utilities.wordsToText( bibFile.missingList ) )\n\n return False\n\n## if not bibFile.checkComplete( [ \"file\", \"bib\", \"zip\" ] ) :\n\n if not bibFile.checkComplete( [ \"file\", \"bib\" ] ) :\n\n self.writeLogIndex( path, \"missing in bibtex file: \" + utilities.wordsToText( bibFile.missingList ) )\n\n return False\n\n \n # check the place according to content of bib file\n\n if path.endswith( os.sep ) : extension = os.sep\n\n else : extension = \".\" + utilities.pathExtension( path )\n\n shared = utilities.pathShared(\n category = bibFile.bibtex,\n author = bibFile.author,\n description = bibFile.description,\n title = bibFile.title,\n year = bibFile.year,\n extension = extension,\n directory = self.bookcaseDirectory\n )\n\n if not shared == utilities.slashPath( path ) :\n\n self.writeLogIndex(\n path,\n \"incorrect place (correct location: \" + shared + \") - move back to : \" + self.classifyDirectory\n )\n\n return False\n\n # rewrites the access html file, information.html\n\n createFlag = not utilities.filePresent( local + \"information.html\" ) \n\n ok = self.writeHtml( path )\n\n if not ok :\n\n self.writeLogIndex( path, \"could not update information.html\" )\n\n return False\n\n elif createFlag :\n\n self.writeLogIndex( path, \"create information.html file\" )\n \n # places current item in indexes, uses bibfile information\n\n self.indexPath(\n path,\n bibtexIndex + \".tsv\",\n yearIndex + \".tsv\",\n fieldList\n )\n\n # appends to the bib files\n\n text = os.linesep + bibFile.text.strip( \" \\n\\t\\r,\" ).replace( \"\\n\", os.linesep ) + os.linesep\n \n utilities.fileAppend( bibtexIndex + \".bib\", text )\n \n utilities.fileAppend( yearIndex + \".bib\", text )\n \n\n return True\n \n \n\n \n\n def indexPath (\n\n self,\n path = None,\n bibtexPath = None,\n yearPath = None,\n fieldList = None,\n ) :\n\n \"\"\" adds an item to the indexes, uses bibfile information.\n\n no control of correctness\n \n \"\"\"\n\n # bibtex type\n \n bibtex = utilities.string( bibFile.bibtex, default = \"\" )\n\n # key to control duplicates\n \n key = utilities.string( bibFile.title, format = \"initials\", default = \"\" )\n\n # path to the information.html file that contains links\n\n local = utilities.localDirectory( path )\n\n html = \"file:///\" + utilities.slashPath( local + \"information.html\" )\n\n # texts for general and specific indexes\n\n text = key + utilities.fieldDelimiter + bibtex + utilities.fieldDelimiter + html + utilities.fieldDelimiter\n\n for item in fieldList :\n\n item = \"reference\" + item.capitalize()\n\n index = utilities.index( bibFile.attributeList, item )\n\n if index < 0 :\n\n text = text + utilities.voidCode + utilities.fieldDelimiter\n\n continue\n\n value = bibFile.valueList[ index ].replace( \"{\", \"\" ).replace( \"}\", \"\" )\n\n## if ( ( item == \"referenceFile\" ) or ( item == \"referenceBib\" ) or ( item == \"referenceZip\" ) ) :\n##\n if ( ( item == \"referenceFile\" ) or ( item == \"referenceBib\" ) ) :\n\n value = \"file:///\" + utilities.slashPath( self.bookcaseDirectory + value )\n\n text = text + utilities.asciiToFlat( value, default = utilities.voidCode ) + utilities.fieldDelimiter\n\n text = text + \"\\n\"\n \n utilities.fileAppend( bibtexPath, text )\n\n utilities.fileAppend( yearPath, text )\n\n return True\n\n\n\n\n def setDefault ( self ) :\n\n \"\"\" sets default attributes\"\"\"\n \n self.bookcaseDirectory = utilities.getVariable( \"bookcase\", default = \"\" )\n\n self.classifyDirectory = utilities.getVariable( \"classify\", default = \"\" )\n\n self.logClassify = \"log_classify.tsv\"\n\n self.logDownload = \"log_download.tsv\"\n \n self.logIndex = \"log_index.tsv\"\n\n\n \n\n \n\n\n def setTarget (\n\n self,\n path = None,\n directory = None,\n ) :\n\n \"\"\" Determines the source path self.path and the target self.selected when something is uploaded on a drive \"\"\"\n\n # atribute that keeps track of the current source\n \n self.path = utilities.normalizePath( path, normalize = False )\n\n # target's name (includes extension)\n\n address = self.bookcaseDirectory\n\n prefix = utilities.getVariable( \"type\", default = \"\" )\n\n year = utilities.getVariable( \"year\", default = \"\" )\n\n target = utilities.normalizePath( address + os.sep + prefix + os.sep + year + os.sep, normalize = False )\n\n if utilities.isEmpty( target ) :\n\n utilities.error = \"upload - no folder available\"\n\n return False\n\n # there is a subdirectory\n\n directory = utilities.string( directory, default = \"\" )\n\n # uploads the object\n\n name = utilities.pathLastNameWithExtension( path )\n\n target = utilities.normalizePath( target + os.sep + directory + os.sep + name, normalize = False )\n\n self.selected = target\n\n return True\n\n\n \n \n \n def upload (\n\n self,\n path = None,\n complete = True,\n replace = True,\n owner = None,\n ) :\n\n \"\"\" Uploads a file or directory on documentation server.\n\n Name is the new name ( directory and extension are conserved )\n\n\n Owner is the owner or none. If it is defined, overrides the field of bibtex\n\n if complete is True, uploads the local directory too\n\n if replace is true, can replace previous doc in bookcase, otherwise error.\n \n \"\"\"\n\n utilities.error = \"\"\n\n if not utilities.pathPresent( path ) : return False\n\n # source (old)\n \n oldPath = utilities.normalizePath( path, normalize = False )\n\n oldDirectory = utilities.pathDirectory( path )\n \n oldName = utilities.pathNameExtension( path )\n\n if oldPath.endswith( os.sep ) : oldExtension = os.sep\n\n else : oldExtension = \".\" + utilities.pathExtension( oldPath )\n\n oldLocal = utilities.localDirectory( path )\n\n oldBackups = utilities.backupsDirectory( path )\n \n # parses the file name\n\n bibtex, author, description, year, initials = utilities.parseShared( path )\n\n # undefined author is unknown, and date is 9999\n\n if utilities.isEmpty( year ) : year = \"9999\"\n\n if utilities.isEmpty( author ) : author = \"unknown\"\n\n # keeps the description from name, to complete bib file\n\n defaultDescription = description\n \n## print \"librarian.upload\", path\n## \n## print \" \", bibtex,author, description, year, initials\n\n extension = utilities.pathExtension( path )\n\n bibPath = oldLocal + os.sep + \"information.bib\"\n\n bibFlag = utilities.filePresent( bibPath )\n\n # no bib file, name allows creation, creates\n\n if ( ( not bibFlag ) and ( not utilities.isEmpty( bibtex ) ) ) :\n\n bibFlag = self.createBib(\n bibPath,\n bibtex = bibtex,\n author = author,\n description = description,\n year = year,\n owner = owner,\n initials = initials\n )\n\n\n # no way , there is no bibtex file\n\n if not bibFlag :\n\n self.writeLogClassify(\n oldPath,\n \"no sufficient bibtex information (incomplete name and no file *.bib)\"\n )\n\n return False\n \n\n # reads the bib file \n \n bibFile.read( bibPath, bind = False )\n\n bibtex = bibFile.bibtex\n\n year = utilities.string( bibFile.year, default = \"\" )\n\n author = utilities.string( bibFile.author, default = \"\" )\n\n description = utilities.string( bibFile.description )\n\n if utilities.isEmpty( description ) : description = defaultDescription\n\n title = utilities.string( bibFile.title, default = \"\" )\n\n if utilities.isEmpty( bibtex ) :\n\n self.writeLogClassify(\n oldPath,\n \"bibtex category undefined\"\n )\n\n return False\n\n if utilities.isEmpty( year ) :\n\n self.writeLogClassify(\n oldPath,\n \"year undefined\"\n )\n\n return False\n\n if not bibFile.checkComplete() :\n\n self.writeLogClassify(\n oldPath,\n \"missing in bibtex file: \" + utilities.wordsToText( bibFile.missingList )\n )\n\n return False\n\n # determines the path and name in the bookcase\n\n newPath = utilities.pathShared(\n category = bibtex,\n author = author,\n description = description,\n title = title,\n year = year,\n extension = oldExtension,\n directory = self.bookcaseDirectory\n )\n \n newName = utilities.pathName( newPath )\n\n newExtension = oldExtension\n\n newName = newName + \".\" + newExtension\n \n newLocal = utilities.localDirectory( newPath )\n\n newBackups = utilities.backupsDirectory( newPath )\n\n # already present\n\n if ( ( not bool( replace ) ) and ( utilities.pathPresent( newPath ) ) ) :\n\n self.writeLogClassify(\n oldPath,\n \"already present in bookcase: \" + newPath\n )\n\n return False \n\n # copies the file\n\n ok = utilities.pathCopy( oldPath, newPath )\n\n if not ok :\n\n self.writeLogClassify(\n oldPath,\n \"could not copy in bookcase\"\n )\n\n return False\n\n # copies the local directory\n\n ok = utilities.directoryCopy ( oldLocal, newLocal )\n\n if not ok :\n\n self.writeLogClassify(\n oldPath,\n \"could not copy local directory in bookcase\"\n )\n\n return False\n\n## # creates a zip that contains the item with no date or other information in name.\n##\n## ok = archiver.backup(\n## path = newPath,\n## target = \"backup.zip\",\n## directory = newBackups\n## )\n##\n## if not ok :\n##\n## self.writeLogClassify( oldName, \"could not create backup in bookcase\" )\n##\n## return False\n## \n##\n##\n## # backups the local directory in the same backup\n##\n## ok = archiver.backup(\n## path = newLocal,\n## target = \"backup.zip\",\n## directory = newBackups\n## )\n##\n## if not ok :\n##\n## self.writeLogClassify( oldName, \"could not complete backup in bookcase\" )\n##\n## return False\n\n # writes the bib file again, with the paths to bib, item and zip\n\n bibFile.setAttributes(\n owner = owner,\n filePath = newPath,\n )\n\n ok = bibFile.write( path = newLocal + \"information.bib\" )\n\n if not ok :\n\n self.writeLogClassify(\n oldPath,\n \"could not create bib file in bookcase\"\n )\n\n return False\n \n # creates a html with links to document & bib\n\n ok = self.writeHtml( newPath )\n\n if not ok :\n\n self.writeLogClassify(\n oldPath,\n \"could not create information.html file in bookcase\"\n )\n\n return False\n\n return True\n\n\n\n def writeHtml (\n\n self,\n path = None\n ) :\n\n \"\"\" creates _local/information.html, with links to the item & the bib \"\"\"\n\n local = utilities.localDirectory( path )\n\n # stores previous values of variables, initializes them to copy the template\n\n utilities.pushVariables( [ \"shared\", \"author\", \"title\", \"year\" ] )\n\n utilities.setVariable( \"shared\", path )\n\n utilities.setVariable( \"author\", bibFile.author.replace( \"{\", \"\" ).replace( \"}\", \"\" ).replace( \"~\", \" \" ) )\n \n utilities.setVariable( \"title\", bibFile.title.replace( \"{\", \"\" ).replace( \"}\", \"\" ) )\n \n utilities.setVariable( \"year\", bibFile.year )\n\n templatePath = utilities.getVariable( \"procedures\" ) + \"_common\" + os.sep + \"documents\" + os.sep + \"information.html\"\n\n ok = utilities.fileCopy(\n templatePath,\n local + \"information.html\",\n instantiate = True\n )\n \n\n # restores the variables\n\n utilities.popVariables( [ \"shared\", \"author\", \"title\", \"year\" ] )\n\n return ok \n\n\n\n def writeLogClassify (\n\n self,\n path = None,\n text = None,\n target = None,\n date = None,\n \n ) :\n\n \"\"\" status of item \"path\", error and/or target in bookcase \"\"\"\n\n\n if utilities.isEmpty( date ) : date = clock.date()\n \n if not utilities.isEmpty( path ) :\n\n directory = utilities.pathDirectory( path )\n\n name = utilities.pathLastNameWithExtension( path )\n\n else :\n\n directory = \"\"\n\n name = \"\"\n\n if not utilities.isEmpty( target ) :\n\n html = utilities.localDirectory( target ) + \"information.html\"\n\n html = \"file:///\" + utilities.slashPath( html )\n\n else :\n\n html = \"\"\n \n \n text = utilities.string( text, default = \"\" )\n\n # there is a message : sets the error\n\n if not utilities.isEmpty( text ) :\n \n if utilities.isEmpty( name ) : error = text\n\n else : error = name + \"-\" + text\n\n self.error = error\n\n # no access to bookcase cannot write log\n \n if not utilities.directoryPresent( self.bookcaseDirectory ) : return\n \n # there is a file : writes it in log\n\n if not utilities.isEmpty( name ) :\n\n flat = utilities.asciiToFlat( text, default = utilities.voidCode )\n\n utilities.fileAppend(\n self.bookcaseDirectory + os.sep + self.logClassify,\n date + utilities.fieldDelimiter +\n directory + utilities.fieldDelimiter + \\\n name + utilities.fieldDelimiter + \\\n flat + utilities.fieldDelimiter + \\\n html + utilities.fieldDelimiter + \"\\n\"\n )\n\n## print name, \"-\", text\n\n\n def writeLogDownload (\n\n self,\n path = None,\n text = None,\n source = None,\n date = None,\n owner = None,\n \n ) :\n\n \"\"\" status of item \"path\", error and/or target in bookcase \"\"\"\n\n if utilities.isEmpty( date ) : date = clock.date()\n\n text = utilities.string( text, default = \"\" )\n\n path = utilities.string( path, default = utilities.voidCode )\n\n owner = utilities.string( owner, default = utilities.voidCode )\n\n source= utilities.string( source, default = utilities.voidCode )\n\n # there is a message : sets the error\n\n if not utilities.isEmpty( text ) :\n\n error = text\n\n self.error = error\n\n # no access to bookcase cannot write log\n \n if not utilities.directoryPresent( self.bookcaseDirectory ) : return\n \n # there is a file : writes it in log\n\n flat = utilities.asciiToFlat( text, default = \"\" )\n\n utilities.fileAppend(\n self.bookcaseDirectory + os.sep + self.logDownload,\n date + utilities.fieldDelimiter + \\\n owner + utilities.fieldDelimiter + \\\n path + utilities.fieldDelimiter + \\\n source + utilities.fieldDelimiter + \\\n flat + utilities.fieldDelimiter + \"\\n\"\n )\n\n\n\n\n \n\n def writeLogIndex (\n\n self,\n path = None,\n text = None,\n target = None,\n date = None,\n \n ) :\n\n \"\"\" status of item \"path\", error and/or target in bookcase \"\"\"\n\n if utilities.isEmpty( date ) : date = clock.date()\n \n if not utilities.isEmpty( path ) :\n\n name = utilities.pathLastNameWithExtension( path )\n\n html = utilities.localDirectory( path ) + \"information.html\"\n\n html = \"file:///\" + utilities.slashPath( html )\n\n else :\n\n name = \"\"\n\n html = \"\"\n\n text = utilities.string( text, default = \"\" )\n\n # there is a message : sets the error\n\n if not utilities.isEmpty( text ) :\n \n if utilities.isEmpty( name ) : error = text\n\n else : error = name + \"-\" + text\n\n self.error = error\n\n # no access to bookcase cannot write log\n \n if not utilities.directoryPresent( self.bookcaseDirectory ) : return\n \n # there is a file : writes it in log\n\n if not utilities.isEmpty( html ) :\n \n flat = utilities.asciiToFlat( text, default = \"\" )\n\n utilities.fileAppend(\n self.bookcaseDirectory + os.sep + self.logIndex,\n date + utilities.fieldDelimiter + \\\n name + utilities.fieldDelimiter + \\\n html + utilities.fieldDelimiter + \\\n flat + utilities.fieldDelimiter + \"\\n\"\n )\n\n\n\n\n\n\n\n\n\n \n# -----------------------------------\n# creates the global singleton object if not already here\n#\n\nif not \"librarian\" in globals() : librarian = Librarian()\n","sub_path":"TelecommandeEtudiant/basicKeyLogger/library/api/Librarian.py","file_name":"Librarian.py","file_ext":"py","file_size_in_byte":39646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"216238372","text":"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Video Dataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ctypes\nimport _ctypes\n\nimport tensorflow\nfrom tensorflow import dtypes\nfrom tensorflow.compat.v1 import data\nfrom tensorflow_io import _load_library\n\ndef load_dependency_and_library(p):\n \"\"\"load_dependency_and_library\"\"\"\n for library in p:\n # First try load all dependencies with RTLD_LOCAL\n entries = []\n for dependency in p[library]:\n try:\n entries.append(ctypes.CDLL(dependency))\n except OSError:\n pass\n if len(entries) == len(p[library]):\n # Dependencies has been satisfied, load dependencies again with RTLD_GLOBAL, no error is expected\n for dependency in p[library]:\n ctypes.CDLL(dependency, mode=ctypes.RTLD_GLOBAL)\n # Load video_op\n return _load_library(library)\n # Otherwise we dlclose and retry\n entries.reverse()\n for entry in entries:\n _ctypes.dlclose(entry._handle) # pylint: disable=protected-access\n raise NotImplementedError(\"could not find ffmpeg after search through \", p)\n\nvideo_ops = load_dependency_and_library({\n '_video_ops_ffmpeg_3.4.so': [\n \"libavformat.so.57\",\n \"libavformat.so.57\",\n \"libavutil.so.55\",\n \"libswscale.so.4\",\n ],\n '_video_ops_ffmpeg_2.8.so': [\n \"libavformat-ffmpeg.so.56\",\n \"libavcodec-ffmpeg.so.56\",\n \"libavutil-ffmpeg.so.54\",\n \"libswscale-ffmpeg.so.3\",\n ],\n '_video_ops_libav_9.20.so': [\n \"libavformat.so.54\",\n \"libavcodec.so.54\",\n \"libavutil.so.52\",\n \"libswscale.so.2\",\n ],\n})\n\nclass VideoDataset(data.Dataset):\n \"\"\"A Video File Dataset that reads the video file.\"\"\"\n\n def __init__(self, filenames):\n \"\"\"Create a `VideoDataset`.\n\n `VideoDataset` allows a user to read data from a video file with\n ffmpeg. The output of VideoDataset is a sequence of (height, weight, 3)\n tensor in rgb24 format.\n\n For example:\n\n ```python\n dataset = VideoDataset(\"/foo/bar.mp4\")\n iterator = dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n while True:\n try:\n print(sess.run(next_element))\n except tf.errors.OutOfRangeError:\n break\n ```\n\n Args:\n filenames: A `tf.string` tensor containing one or more filenames.\n \"\"\"\n self._filenames = tensorflow.convert_to_tensor(\n filenames, dtype=dtypes.string, name=\"filenames\")\n super(VideoDataset, self).__init__()\n\n def _inputs(self):\n return []\n\n def _as_variant_tensor(self):\n return video_ops.video_dataset(self._filenames)\n\n @property\n def output_classes(self):\n return tensorflow.Tensor\n\n @property\n def output_shapes(self):\n return tensorflow.TensorShape([None, None, 3])\n\n @property\n def output_types(self):\n return dtypes.uint8\n","sub_path":"tensorflow_io/video/python/ops/video_dataset_ops.py","file_name":"video_dataset_ops.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"198450849","text":"import os\n\nframeWidth = 24\nframeHeight = 32\n\nstartX = frameWidth * 0\nstartY = frameHeight * 3\n\nxNum = 3\nyNum = 1\n\nscalePercent = 200\n\ncommand = \"convert -crop %dx%d+%d+%d ./a.png ./b.png\" % (frameWidth * xNum, frameHeight * yNum, startX, startY)\nos.system(command)\n\ncommand = \"convert ./b.png -transparent #C1A5C8 -threshold 0,0,0,0 -fill #444444 -opaque #ffffff ./c.png\"\nos.system(command)\n\ncommand = \"convert ./c.png -interpolate Nearest -filter point -resize %d%% ./d.png\" % (scalePercent)\nos.system(command)\n","sub_path":"scripts/py/crop_sprites.py","file_name":"crop_sprites.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"536128855","text":"import gzip\nimport json as _json\nimport logging\nimport numbers\nimport os\nimport re\nfrom collections import UserList\nfrom http import cookiejar\nfrom typing import Any, Callable, Dict, List, Optional, Union\nfrom urllib.parse import urljoin\n\nimport requests.utils\nfrom requests import Response, Session\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3 import Retry\nfrom requests.structures import CaseInsensitiveDict\n\nfrom cognite.client import utils\nfrom cognite.client.data_classes._base import CogniteFilter, CogniteResource, CogniteUpdate\nfrom cognite.client.exceptions import CogniteAPIError, CogniteNotFoundError\n\nlog = logging.getLogger(\"cognite-sdk\")\n\n\nclass BlockAll(cookiejar.CookiePolicy):\n return_ok = set_ok = domain_return_ok = path_return_ok = lambda self, *args, **kwargs: False\n netscape = True\n rfc2965 = hide_cookie2 = False\n\n\nclass RetryWithMaxBackoff(Retry):\n def get_backoff_time(self):\n return min(utils._client_config._DefaultConfig().max_retry_backoff, super().get_backoff_time())\n\n\ndef _init_requests_session():\n session = Session()\n session_with_retry = Session()\n\n cookies_policy = BlockAll()\n session.cookies.set_policy(cookies_policy)\n session_with_retry.cookies.set_policy(cookies_policy)\n\n config = utils._client_config._DefaultConfig()\n adapter = HTTPAdapter(\n max_retries=RetryWithMaxBackoff(\n total=config.max_retries,\n read=0,\n backoff_factor=0.5,\n status_forcelist=[429],\n method_whitelist=False,\n raise_on_status=False,\n ),\n pool_maxsize=config.max_connection_pool_size,\n )\n adapter_with_retry = HTTPAdapter(\n max_retries=RetryWithMaxBackoff(\n total=config.max_retries,\n backoff_factor=0.5,\n status_forcelist=config.status_forcelist,\n method_whitelist=False,\n raise_on_status=False,\n ),\n pool_maxsize=config.max_connection_pool_size,\n )\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n session_with_retry.mount(\"http://\", adapter_with_retry)\n session_with_retry.mount(\"https://\", adapter_with_retry)\n\n if config.disable_ssl:\n import urllib3\n\n urllib3.disable_warnings()\n session.verify = False\n session_with_retry.verify = False\n\n return session, session_with_retry\n\n\n_REQUESTS_SESSION, _REQUESTS_SESSION_WITH_RETRY = _init_requests_session()\n\n\nclass APIClient:\n _RESOURCE_PATH = None\n _LIST_CLASS = None\n\n # TODO: This following set should be generated from the openapi spec somehow.\n RETRYABLE_POST_ENDPOINTS = {\n \"/assets/list\",\n \"/assets/byids\",\n \"/assets/search\",\n \"/events/list\",\n \"/events/byids\",\n \"/events/search\",\n \"/files/list\",\n \"/files/byids\",\n \"/files/search\",\n \"/files/downloadlink\",\n \"/timeseries/byids\",\n \"/timeseries/search\",\n \"/timeseries/data\",\n \"/timeseries/data/list\",\n \"/timeseries/data/latest\",\n \"/timeseries/data/delete\",\n \"/sequences/byids\",\n \"/sequences/search\",\n \"/sequences/data\",\n \"/sequences/data/list\",\n \"/sequences/data/delete\",\n \"/datasets/list\",\n \"/datasets/aggregate\",\n \"/datasets/byids\",\n \"/relationships/list\",\n \"/relationships/byids\",\n }\n\n def __init__(self, config: utils._client_config.ClientConfig, api_version: str = None, cognite_client=None):\n self._request_session = _REQUESTS_SESSION\n self._request_session_with_retry = _REQUESTS_SESSION_WITH_RETRY\n\n self._config = config\n self._api_version = api_version\n self._cognite_client = cognite_client\n\n self._CREATE_LIMIT = 1000\n self._LIST_LIMIT = 1000\n self._RETRIEVE_LIMIT = 1000\n self._DELETE_LIMIT = 1000\n self._UPDATE_LIMIT = 1000\n\n def _delete(self, url_path: str, params: Dict[str, Any] = None, headers: Dict[str, Any] = None):\n return self._do_request(\"DELETE\", url_path, params=params, headers=headers, timeout=self._config.timeout)\n\n def _get(self, url_path: str, params: Dict[str, Any] = None, headers: Dict[str, Any] = None):\n return self._do_request(\"GET\", url_path, params=params, headers=headers, timeout=self._config.timeout)\n\n def _post(\n self, url_path: str, json: Dict[str, Any] = None, params: Dict[str, Any] = None, headers: Dict[str, Any] = None\n ):\n return self._do_request(\n \"POST\", url_path, json=json, headers=headers, params=params, timeout=self._config.timeout\n )\n\n def _put(self, url_path: str, json: Dict[str, Any] = None, headers: Dict[str, Any] = None):\n return self._do_request(\"PUT\", url_path, json=json, headers=headers, timeout=self._config.timeout)\n\n def _do_request(self, method: str, url_path: str, **kwargs):\n is_retryable, full_url = self._resolve_url(method, url_path)\n\n json_payload = kwargs.get(\"json\")\n headers = self._configure_headers(self._config.headers.copy())\n headers.update(kwargs.get(\"headers\") or {})\n\n if json_payload:\n data = _json.dumps(json_payload, default=utils._auxiliary.json_dump_default)\n kwargs[\"data\"] = data\n if method in [\"PUT\", \"POST\"] and not os.getenv(\"COGNITE_DISABLE_GZIP\", False):\n kwargs[\"data\"] = gzip.compress(data.encode())\n headers[\"Content-Encoding\"] = \"gzip\"\n\n kwargs[\"headers\"] = headers\n\n if is_retryable:\n res = self._request_session_with_retry.request(method=method, url=full_url, **kwargs)\n else:\n res = self._request_session.request(method=method, url=full_url, **kwargs)\n\n if not self._status_is_valid(res.status_code):\n self._raise_API_error(res, payload=json_payload)\n self._log_request(res, payload=json_payload)\n return res\n\n def _configure_headers(self, additional_headers):\n headers = CaseInsensitiveDict()\n headers.update(requests.utils.default_headers())\n if self._config.token is None:\n headers[\"api-key\"] = self._config.api_key\n elif isinstance(self._config.token, str):\n headers[\"Authorization\"] = \"Bearer {}\".format(self._config.token)\n elif isinstance(self._config.token, Callable):\n headers[\"Authorization\"] = \"Bearer {}\".format(self._config.token())\n else:\n raise TypeError(\"'token' must be str, Callable, or None.\")\n headers[\"content-type\"] = \"application/json\"\n headers[\"accept\"] = \"application/json\"\n headers[\"x-cdp-sdk\"] = \"CognitePythonSDK:{}\".format(utils._auxiliary.get_current_sdk_version())\n headers[\"x-cdp-app\"] = self._config.client_name\n if \"User-Agent\" in headers:\n headers[\"User-Agent\"] += \" \" + utils._auxiliary.get_user_agent()\n else:\n headers[\"User-Agent\"] = utils._auxiliary.get_user_agent()\n headers.update(additional_headers)\n return headers\n\n def _resolve_url(self, method: str, url_path: str):\n if not url_path.startswith(\"/\"):\n raise ValueError(\"URL path must start with '/'\")\n base_url = self._get_base_url_with_base_path()\n full_url = base_url + url_path\n is_retryable = self._is_retryable(method, full_url)\n return is_retryable, full_url\n\n def _get_base_url_with_base_path(self):\n base_path = \"/api/{}/projects/{}\".format(self._api_version, self._config.project) if self._api_version else \"\"\n return urljoin(self._config.base_url, base_path)\n\n def _is_retryable(self, method, path):\n valid_methods = [\"GET\", \"POST\", \"PUT\", \"DELETE\", \"PATCH\"]\n match = re.match(\"(?:http|https)://[a-z\\d.:]+(?:/api/v1/projects/[^/]+)?(/.+)\", path)\n\n if not match:\n raise ValueError(\"Path {} is not valid. Cannot resolve whether or not it is retryable\".format(path))\n if method not in valid_methods:\n raise ValueError(\"Method {} is not valid. Must be one of {}\".format(method, valid_methods))\n path_end = match.group(1)\n\n if method in [\"GET\", \"PUT\", \"PATCH\"]:\n return True\n if method == \"POST\" and path_end in self.RETRYABLE_POST_ENDPOINTS:\n return True\n return False\n\n def _retrieve(\n self, id: Union[int, str], cls=None, resource_path: str = None, params: Dict = None, headers: Dict = None\n ):\n cls = cls or self._LIST_CLASS._RESOURCE\n resource_path = resource_path or self._RESOURCE_PATH\n try:\n res = self._get(\n url_path=utils._auxiliary.interpolate_and_url_encode(resource_path + \"/{}\", str(id)),\n params=params,\n headers=headers,\n )\n return cls._load(res.json(), cognite_client=self._cognite_client)\n except CogniteAPIError as e:\n if e.code != 404:\n raise\n\n def _retrieve_multiple(\n self,\n wrap_ids: bool,\n cls=None,\n resource_path: str = None,\n ids: Union[List[int], int] = None,\n external_ids: Union[List[str], str] = None,\n ignore_unknown_ids=None,\n headers: Dict = None,\n ):\n cls = cls or self._LIST_CLASS\n resource_path = resource_path or self._RESOURCE_PATH\n all_ids = self._process_ids(ids, external_ids, wrap_ids=wrap_ids)\n id_chunks = utils._auxiliary.split_into_chunks(all_ids, self._RETRIEVE_LIMIT)\n\n ignore_unknown = {} if ignore_unknown_ids is None else {\"ignoreUnknownIds\": ignore_unknown_ids}\n tasks = [\n {\"url_path\": resource_path + \"/byids\", \"json\": {\"items\": id_chunk, **ignore_unknown}, \"headers\": headers}\n for id_chunk in id_chunks\n ]\n tasks_summary = utils._concurrency.execute_tasks_concurrently(\n self._post, tasks, max_workers=self._config.max_workers\n )\n\n if tasks_summary.exceptions:\n try:\n utils._concurrency.collect_exc_info_and_raise(tasks_summary.exceptions)\n except CogniteNotFoundError:\n if self._is_single_identifier(ids, external_ids):\n return None\n raise\n\n retrieved_items = tasks_summary.joined_results(lambda res: res.json()[\"items\"])\n\n if self._is_single_identifier(ids, external_ids):\n return cls._RESOURCE._load(retrieved_items[0], cognite_client=self._cognite_client)\n return cls._load(retrieved_items, cognite_client=self._cognite_client)\n\n def _list_generator(\n self,\n method: str,\n cls=None,\n resource_path: str = None,\n limit: int = None,\n chunk_size: int = None,\n filter: Dict = None,\n sort: List[str] = None,\n other_params: Dict = None,\n headers: Dict = None,\n ):\n if limit == -1 or limit == float(\"inf\"):\n limit = None\n cls = cls or self._LIST_CLASS\n resource_path = resource_path or self._RESOURCE_PATH\n total_items_retrieved = 0\n current_limit = self._LIST_LIMIT\n if chunk_size and chunk_size <= self._LIST_LIMIT:\n current_limit = chunk_size\n next_cursor = None\n filter = filter or {}\n current_items = []\n while True:\n if limit:\n num_of_remaining_items = limit - total_items_retrieved\n if num_of_remaining_items < self._LIST_LIMIT:\n current_limit = num_of_remaining_items\n\n if method == \"GET\":\n params = filter.copy()\n params[\"limit\"] = current_limit\n params[\"cursor\"] = next_cursor\n if sort is not None:\n params[\"sort\"] = sort\n res = self._get(url_path=resource_path, params=params, headers=headers)\n elif method == \"POST\":\n body = {\"filter\": filter, \"limit\": current_limit, \"cursor\": next_cursor, **(other_params or {})}\n if sort is not None:\n body[\"sort\"] = sort\n res = self._post(url_path=resource_path + \"/list\", json=body, headers=headers)\n else:\n raise ValueError(\"_list_generator parameter `method` must be GET or POST, not {}\".format(method))\n last_received_items = res.json()[\"items\"]\n total_items_retrieved += len(last_received_items)\n\n if not chunk_size:\n for item in last_received_items:\n yield cls._RESOURCE._load(item, cognite_client=self._cognite_client)\n else:\n current_items.extend(last_received_items)\n if len(current_items) >= chunk_size:\n items_to_yield = current_items[:chunk_size]\n current_items = current_items[chunk_size:]\n yield cls._load(items_to_yield, cognite_client=self._cognite_client)\n\n next_cursor = res.json().get(\"nextCursor\")\n if total_items_retrieved == limit or next_cursor is None:\n if chunk_size and current_items:\n yield cls._load(current_items, cognite_client=self._cognite_client)\n break\n\n def _list(\n self,\n method: str,\n cls=None,\n resource_path: str = None,\n limit: int = None,\n filter: Dict = None,\n other_params=None,\n partitions=None,\n sort=None,\n headers: Dict = None,\n ):\n if partitions:\n if limit not in [None, -1, float(\"inf\")]:\n raise ValueError(\"When using partitions, limit should be `None`, `-1` or `inf`.\")\n if sort is not None:\n raise ValueError(\"When using sort, partitions is not supported.\")\n return self._list_partitioned(\n partitions=partitions,\n cls=cls,\n resource_path=resource_path,\n filter=filter,\n other_params=other_params,\n headers=headers,\n )\n\n cls = cls or self._LIST_CLASS\n resource_path = resource_path or self._RESOURCE_PATH\n items = []\n for resource_list in self._list_generator(\n cls=cls,\n resource_path=resource_path,\n method=method,\n limit=limit,\n chunk_size=self._LIST_LIMIT,\n filter=filter,\n sort=sort,\n other_params=other_params,\n headers=headers,\n ):\n items.extend(resource_list.data)\n return cls(items, cognite_client=self._cognite_client)\n\n def _list_partitioned(\n self,\n partitions,\n cls=None,\n resource_path: str = None,\n filter: Dict = None,\n other_params=None,\n headers: Dict = None,\n ):\n cls = cls or self._LIST_CLASS\n resource_path = resource_path or self._RESOURCE_PATH\n\n def get_partition(partition):\n next_cursor = None\n retrieved_items = []\n while True:\n body = {\n \"filter\": filter or {},\n \"limit\": self._LIST_LIMIT,\n \"cursor\": next_cursor,\n \"partition\": partition,\n **(other_params or {}),\n }\n res = self._post(url_path=resource_path + \"/list\", json=body, headers=headers)\n retrieved_items.extend(res.json()[\"items\"])\n next_cursor = res.json().get(\"nextCursor\")\n if next_cursor is None:\n break\n return retrieved_items\n\n tasks = [(\"{}/{}\".format(i + 1, partitions),) for i in range(partitions)]\n tasks_summary = utils._concurrency.execute_tasks_concurrently(get_partition, tasks, max_workers=partitions)\n if tasks_summary.exceptions:\n raise tasks_summary.exceptions[0]\n return cls._load(tasks_summary.joined_results(), cognite_client=self._cognite_client)\n\n def _aggregate(\n self,\n resource_path: str = None,\n filter: Union[CogniteFilter, Dict] = None,\n aggregate: str = None,\n fields: List[str] = None,\n headers: Dict = None,\n cls=None,\n ):\n utils._auxiliary.assert_type(filter, \"filter\", [dict, CogniteFilter], allow_none=True)\n utils._auxiliary.assert_type(fields, \"fields\", [list], allow_none=True)\n if isinstance(filter, CogniteFilter):\n filter = filter.dump(camel_case=True)\n elif isinstance(filter, Dict):\n filter = utils._auxiliary.convert_all_keys_to_camel_case(filter)\n resource_path = resource_path or self._RESOURCE_PATH\n body = {\"filter\": filter or {}}\n if aggregate is not None:\n body[\"aggregate\"] = aggregate\n if fields is not None:\n body[\"fields\"] = fields\n res = self._post(url_path=resource_path + \"/aggregate\", json=body, headers=headers)\n return [cls(**agg) for agg in res.json()[\"items\"]]\n\n def _create_multiple(\n self,\n items: Union[List[Any], Any],\n cls: Any = None,\n resource_path: str = None,\n params: Dict = None,\n headers: Dict = None,\n limit=None,\n ):\n cls = cls or self._LIST_CLASS\n resource_path = resource_path or self._RESOURCE_PATH\n limit = limit or self._CREATE_LIMIT\n single_item = not isinstance(items, list)\n if single_item:\n items = [items]\n\n items_split = []\n for i in range(0, len(items), limit):\n if isinstance(items[i], CogniteResource):\n items_chunk = [item.dump(camel_case=True) for item in items[i : i + limit]]\n else:\n items_chunk = [item for item in items[i : i + limit]]\n items_split.append({\"items\": items_chunk})\n\n tasks = [(resource_path, task_items, params, headers) for task_items in items_split]\n summary = utils._concurrency.execute_tasks_concurrently(self._post, tasks, max_workers=self._config.max_workers)\n\n def unwrap_element(el):\n if isinstance(el, dict):\n return cls._RESOURCE._load(el)\n else:\n return el\n\n def str_format_element(el):\n if isinstance(el, CogniteResource):\n dumped = el.dump()\n if \"external_id\" in dumped:\n return dumped[\"external_id\"]\n return dumped\n return el\n\n summary.raise_compound_exception_if_failed_tasks(\n task_unwrap_fn=lambda task: task[1][\"items\"],\n task_list_element_unwrap_fn=unwrap_element,\n str_format_element_fn=str_format_element,\n )\n created_resources = summary.joined_results(lambda res: res.json()[\"items\"])\n\n if single_item:\n return cls._RESOURCE._load(created_resources[0], cognite_client=self._cognite_client)\n return cls._load(created_resources, cognite_client=self._cognite_client)\n\n def _delete_multiple(\n self,\n wrap_ids: bool,\n resource_path: str = None,\n ids: Union[List[int], int] = None,\n external_ids: Union[List[str], str] = None,\n params: Dict = None,\n headers: Dict = None,\n extra_body_fields: Dict = None,\n ):\n resource_path = resource_path or self._RESOURCE_PATH\n all_ids = self._process_ids(ids, external_ids, wrap_ids)\n id_chunks = utils._auxiliary.split_into_chunks(all_ids, self._DELETE_LIMIT)\n tasks = [\n {\n \"url_path\": resource_path + \"/delete\",\n \"json\": {\"items\": chunk, **(extra_body_fields or {})},\n \"params\": params,\n \"headers\": headers,\n }\n for chunk in id_chunks\n ]\n summary = utils._concurrency.execute_tasks_concurrently(self._post, tasks, max_workers=self._config.max_workers)\n summary.raise_compound_exception_if_failed_tasks(\n task_unwrap_fn=lambda task: task[\"json\"][\"items\"],\n task_list_element_unwrap_fn=utils._auxiliary.unwrap_identifer,\n )\n\n def _update_multiple(\n self,\n items: Union[List[Any], Any],\n cls: Any = None,\n resource_path: str = None,\n params: Dict = None,\n headers: Dict = None,\n ):\n cls = cls or self._LIST_CLASS\n resource_path = resource_path or self._RESOURCE_PATH\n patch_objects = []\n single_item = not isinstance(items, (list, UserList))\n if single_item:\n items = [items]\n\n for item in items:\n if isinstance(item, CogniteResource):\n patch_objects.append(self._convert_resource_to_patch_object(item, cls._UPDATE._get_update_properties()))\n elif isinstance(item, CogniteUpdate):\n patch_objects.append(item.dump())\n else:\n raise ValueError(\"update item must be of type CogniteResource or CogniteUpdate\")\n patch_object_chunks = utils._auxiliary.split_into_chunks(patch_objects, self._UPDATE_LIMIT)\n\n tasks = [\n {\"url_path\": resource_path + \"/update\", \"json\": {\"items\": chunk}, \"params\": params, \"headers\": headers}\n for chunk in patch_object_chunks\n ]\n\n tasks_summary = utils._concurrency.execute_tasks_concurrently(\n self._post, tasks, max_workers=self._config.max_workers\n )\n tasks_summary.raise_compound_exception_if_failed_tasks(\n task_unwrap_fn=lambda task: task[\"json\"][\"items\"],\n task_list_element_unwrap_fn=lambda el: utils._auxiliary.unwrap_identifer(el),\n )\n updated_items = tasks_summary.joined_results(lambda res: res.json()[\"items\"])\n\n if single_item:\n return cls._RESOURCE._load(updated_items[0], cognite_client=self._cognite_client)\n return cls._load(updated_items, cognite_client=self._cognite_client)\n\n def _search(\n self,\n search: Dict,\n filter: Union[Dict, CogniteFilter],\n limit: int,\n cls: Any = None,\n resource_path: str = None,\n params: Dict = None,\n headers: Dict = None,\n ):\n utils._auxiliary.assert_type(filter, \"filter\", [dict, CogniteFilter], allow_none=True)\n if isinstance(filter, CogniteFilter):\n filter = filter.dump(camel_case=True)\n elif isinstance(filter, dict):\n filter = utils._auxiliary.convert_all_keys_to_camel_case(filter)\n cls = cls or self._LIST_CLASS\n resource_path = resource_path or self._RESOURCE_PATH\n res = self._post(\n url_path=resource_path + \"/search\",\n json={\"search\": search, \"filter\": filter, \"limit\": limit},\n params=params,\n headers=headers,\n )\n return cls._load(res.json()[\"items\"], cognite_client=self._cognite_client)\n\n @staticmethod\n def _convert_resource_to_patch_object(resource, update_attributes):\n dumped_resource = resource.dump(camel_case=True)\n has_id = \"id\" in dumped_resource\n has_external_id = \"externalId\" in dumped_resource\n\n patch_object = {\"update\": {}}\n if has_id:\n patch_object[\"id\"] = dumped_resource.pop(\"id\")\n elif has_external_id:\n patch_object[\"externalId\"] = dumped_resource.pop(\"externalId\")\n\n for key, value in dumped_resource.items():\n if utils._auxiliary.to_snake_case(key) in update_attributes:\n patch_object[\"update\"][key] = {\"set\": value}\n return patch_object\n\n @staticmethod\n def _process_ids(\n ids: Union[List[int], int, None], external_ids: Union[List[str], str, None], wrap_ids: bool\n ) -> List:\n if external_ids is None and ids is None:\n raise ValueError(\"No ids specified\")\n if external_ids and not wrap_ids:\n raise ValueError(\"externalIds must be wrapped\")\n\n if isinstance(ids, numbers.Integral):\n ids = [ids]\n elif isinstance(ids, list) or ids is None:\n ids = ids or []\n else:\n raise TypeError(\"ids must be int or list of int\")\n\n if isinstance(external_ids, str):\n external_ids = [external_ids]\n elif isinstance(external_ids, list) or external_ids is None:\n external_ids = external_ids or []\n else:\n raise TypeError(\"external_ids must be str or list of str\")\n\n if wrap_ids:\n ids = [{\"id\": id} for id in ids]\n external_ids = [{\"externalId\": external_id} for external_id in external_ids]\n\n all_ids = ids + external_ids\n\n return all_ids\n\n @staticmethod\n def _is_single_identifier(ids, external_ids):\n single_id = isinstance(ids, numbers.Integral) and external_ids is None\n single_external_id = isinstance(external_ids, str) and ids is None\n return single_id or single_external_id\n\n @staticmethod\n def _status_is_valid(status_code: int):\n return status_code < 400\n\n @staticmethod\n def _raise_API_error(res: Response, payload: Dict):\n x_request_id = res.headers.get(\"X-Request-Id\")\n code = res.status_code\n missing = None\n duplicated = None\n extra = {}\n try:\n error = res.json()[\"error\"]\n if isinstance(error, str):\n msg = error\n elif isinstance(error, Dict):\n msg = error[\"message\"]\n missing = error.get(\"missing\")\n duplicated = error.get(\"duplicated\")\n for k, v in error.items():\n if k not in [\"message\", \"missing\", \"duplicated\", \"code\"]:\n extra[k] = v\n else:\n msg = res.content\n except Exception:\n msg = res.content\n\n error_details = {\"X-Request-ID\": x_request_id}\n if payload:\n error_details[\"payload\"] = payload\n if missing:\n error_details[\"missing\"] = missing\n if duplicated:\n error_details[\"duplicated\"] = duplicated\n error_details[\"headers\"] = res.request.headers.copy()\n APIClient._sanitize_headers(error_details[\"headers\"])\n if res.history:\n for res_hist in res.history:\n log.debug(\n \"REDIRECT AFTER HTTP Error {} {} {}: {}\".format(\n res_hist.status_code, res_hist.request.method, res_hist.request.url, res_hist.content\n )\n )\n log.debug(\"HTTP Error {} {} {}: {}\".format(code, res.request.method, res.request.url, msg), extra=error_details)\n raise CogniteAPIError(msg, code, x_request_id, missing=missing, duplicated=duplicated, extra=extra)\n\n @staticmethod\n def _log_request(res: Response, **kwargs):\n method = res.request.method\n url = res.request.url\n status_code = res.status_code\n\n extra = kwargs.copy()\n extra[\"headers\"] = res.request.headers.copy()\n APIClient._sanitize_headers(extra[\"headers\"])\n if extra[\"payload\"] is None:\n del extra[\"payload\"]\n\n http_protocol_version = \".\".join(list(str(res.raw.version)))\n\n log.debug(\"HTTP/{} {} {} {}\".format(http_protocol_version, method, url, status_code), extra=extra)\n\n @staticmethod\n def _sanitize_headers(headers: Optional[Dict]):\n if headers is None:\n return\n if \"api-key\" in headers:\n headers[\"api-key\"] = \"***\"\n if \"Authorization\" in headers:\n headers[\"Authorization\"] = \"***\"\n","sub_path":"cognite/client/_api_client.py","file_name":"_api_client.py","file_ext":"py","file_size_in_byte":27649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"468829225","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 24 14:46:35 2020\n\n@author: akash\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np; np.random.seed(1)\nplt.rcParams[\"figure.figsize\"] = 5,2\n\nx = np.linspace(0,50)\ny = np.cumsum(np.random.randn(50))+6\n\nfig, (ax,ax2) = plt.subplots(nrows=2, sharex=True)\n\nextent = [x[0]-(x[1]-x[0])/2., x[-1]+(x[1]-x[0])/2.,0,1]\nax.imshow(y[np.newaxis,:], cmap=\"plasma\", aspect=\"auto\", extent=extent)\nax.set_yticks([])\nax.set_xlim(extent[0], extent[1])\n\nax2.plot(x,y)\nplt.colorbar()\nplt.tight_layout()\nplt.show()","sub_path":"list_heatmap_with_graph.py","file_name":"list_heatmap_with_graph.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"499086100","text":"'''\nCreated on Jul 15, 2011\n\n@package: internationalization\n@copyright: 2011 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Nistor Gabriel\n\nContains the internationalization setup files.\n'''\n\n# --------------------------------------------------------------------\n\nNAME = 'internationalization'\nGROUP = 'internationalization'\nVERSION = '1.0'\nDESCRIPTION = 'Provides the scanning and persistance for the localized messages'\nAUTHOR = 'Gabriel Nistor'\nAUTHOR_EMAIL = 'gabriel.nistor@sourcefabric.org'\nKEYWORDS = ['Ally', 'REST', 'plugin', 'internationalization']\nINSTALL_REQUIRES = ['ally-api >= 1.0', \n 'ally-plugin >= 1.0', \n 'support-sqlalchemy >= 1.0', \n 'support-cdm >= 1.0']","sub_path":"plugins/internationalization/__plugin__/internationalization/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"378237266","text":"inp = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\nturns = 1\ntotal = []\ndic = {'13': 0, '23': 1, '33': 2, '12': 3, '22': 4, '32': 5, '11': 6, '21': 7, '31': 8}\n\n\ndef printing():\n print(\"---------\")\n for i in range(0, 9, 3):\n print(f'| {inp[i]} {inp[i+1]} {inp[i+2]} |')\n print(\"---------\")\n\n\ndef check_status():\n global total\n first = [inp[0] + inp[1] + inp[2], inp[3] + inp[4] + inp[5], inp[6] + inp[7] + inp[8]]\n second = [inp[0] + inp[3] + inp[6], inp[1] + inp[4] + inp[7], inp[2] + inp[5] + inp[8]]\n third = [inp[2] + inp[4] + inp[6]]\n fourth = [inp[0] + inp[4] + inp[8]]\n total = first + second + third + fourth\n if 'OOO' in total:\n print('O wins')\n exit()\n elif 'XXX' in total:\n print('X wins')\n exit()\n elif ' ' not in inp:\n print('Draw')\n exit()\n\n\ndef user_input():\n try:\n input2 = [int(i) for i in input('Enter the coordinates: > ').split()]\n coord = ''.join([str(i) for i in input2])\n try:\n if inp[dic[coord]] != ' ':\n print('This cell is occupied! Choose another one!')\n return user_input()\n else:\n return game(coord)\n except KeyError:\n print('Coordinates should be from 1 to 3!')\n return user_input()\n except ValueError:\n print('You should enter numbers!')\n return user_input()\n\n\ndef game(coord):\n global turns\n if turns % 2:\n inp[dic[coord]] = 'X'\n else:\n inp[dic[coord]] = 'O'\n turns += 1\n printing()\n check_status()\n user_input()\n\n\nprinting()\nuser_input()\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"204595571","text":"from typing import List\nfrom foodvisor.models.food import Food\n\n\nclass FoodType:\n STARTER = 'starter'\n DISH = 'dish'\n DESERT = 'desert'\n\n\nclass FoodService:\n @staticmethod\n def getFoodFromGivenType(foodList: List[Food], foodType: str):\n return list(filter(lambda f: f['type'] == foodType, foodList))\n\n @staticmethod\n def getFoodByGivenCalories(starters: List[Food], dishs: List[Food], deserts: List[Food], calories: int):\n if calories <= 0:\n return {\n 'error': 'Calories count is invalid',\n 'calories': calories,\n }\n\n meal = { 'starter': None, 'dish': None, 'desert': None }\n\n startersLen = len(starters)\n dishsLen = len(dishs)\n desertsLen = len(deserts)\n\n startersCursor = 0\n dishsCursor = dishsLen - 1\n desertsCursor = desertsLen - 1\n\n currentCalories = 0\n diff = starters[startersLen - 1]['cal'] + dishs[dishsLen - 1]['cal'] + deserts[desertsLen - 1]['cal'] + 1\n\n while startersCursor < startersLen and dishsCursor >= 0 and desertsCursor >= 0:\n currentCalories = starters[startersCursor]['cal'] + dishs[dishsCursor]['cal'] + deserts[desertsCursor]['cal']\n\n if currentCalories - calories < diff:\n meal['starter'] = starters[startersCursor],\n meal['dish'] = dishs[dishsCursor],\n meal['desert'] = deserts[desertsCursor],\n diff = abs(currentCalories - calories)\n\n if currentCalories == calories or\\\n (calories - calories * 0.1) <= currentCalories <= (calories + calories * 0.1):\n break\n\n if currentCalories >= calories - calories * 0.1:\n dishsCursor -= 1\n desertsCursor -= 1\n else:\n startersCursor += 1\n\n if not (calories - calories * 0.1) <= currentCalories <= (calories + calories * 0.1):\n return {\n 'error': 'No meal in given range',\n 'calories': currentCalories,\n 'target': calories,\n }\n return {\n 'meal': [meal['starter'][0], meal['dish'][0], meal['desert'][0]],\n 'calories': currentCalories,\n 'target': calories,\n }\n\n","sub_path":"foodvisor/common/foodService.py","file_name":"foodService.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"499254246","text":"#!/usr/bin/python\n# coding: utf-8\nfrom algorithm.sort.basic_sort import *\nimport pytest\nfrom random import randint\nimport time\nfrom functools import wraps\nimport json\nfrom copy import deepcopy\n\nfunc_time = {}\nres = {}\n\n\ndef run_timer(f):\n @wraps(f)\n def function_timer(*args, **kwargs):\n ts = time.time()\n result = f(*args, **kwargs)\n te = time.time()\n print(\"\\nTotal time running {0}: {1} seconds\".format(f.__name__, str(te - ts)))\n func_time[f.__name__] = te - ts\n return result\n\n return function_timer\n\n\n@pytest.fixture(scope=\"module\", params=[100, 1000, 10000, 100000, 1000000])\ndef create_list(request):\n print(\"start\")\n nums = []\n for i in range(request.param):\n nums.append(randint(0, request.param))\n yield nums\n print(\"end\")\n # print(func_time)\n # 用copy赋值,千万不用直接赋值,对func_time的修改会影响前面的结果\n res[request.param] = func_time.copy()\n # print(res)\n jsonObj = json.dumps(res)\n\n f = open('./logs.json', 'w')\n f.write(jsonObj)\n f.close()\n\n\n#\n# @run_timer\n# def test_bubble_sort(create_list):\n# origin_list = create_list.copy()\n# sorted_list = bubble_sort(origin_list)\n# # print(sorted_list, create_list, origin_list)\n# assert sorted_list != create_list\n#\n#\n# @run_timer\n# def test_insert_sort(create_list):\n# origin_list = create_list.copy()\n# sorted_list = insert_sort(origin_list)\n# # print(sorted_list, create_list)\n# assert sorted_list != create_list\n#\n# @run_timer\n# def test_select_sort(create_list):\n# origin_list = create_list.copy()\n# sorted_list = select_sort(origin_list)\n# assert sorted_list != create_list\n\n\n@run_timer\ndef test_shell_sort_two(create_list):\n origin_list = create_list.copy()\n sorted_list = shell_sort_two(origin_list)\n assert sorted_list != create_list\n\n\n@run_timer\ndef test_quick_sort(create_list):\n origin_list = create_list.copy()\n sorted_list = quick_sort(origin_list, 0, len(origin_list) - 1)\n assert sorted_list != create_list\n\n\n@run_timer\ndef test_heap_sort(create_list):\n origin_list = create_list.copy()\n heap_sort(origin_list)\n assert origin_list != create_list\n\n\n@run_timer\ndef test_merge_sort(create_list):\n origin_list = create_list.copy()\n sorted_list = merge_sort(origin_list)\n assert sorted_list != create_list\n\n\n@run_timer\ndef test_radix_sort(create_list):\n origin_list = create_list.copy()\n sorted_list = radix_sort(origin_list)\n assert sorted_list != create_list","sub_path":"algorithm/sort/tests/test_sorts.py","file_name":"test_sorts.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"222982850","text":"import os\nimport platform\nfrom peyetribe import EyeTribe\ntry:\n from useTobii_minimal import g, tstamp, dis, killTrackerConnection\nexcept (ConnectionRefusedError, ImportError):\n print(\"ConnectionRefusedError: useTobii_minimal nicht importiert\")\n pass\nimport time\nimport datetime\nimport numpy as np\nos_system = platform.system()\nprint(os_system)\nif os_system == \"Windows\":\n import win32api\n print(\"Win32api in CoordinateData importiert\")\nelse:\n from Quartz.CoreGraphics import CGEventCreate,CGEventGetLocation\n print(\"Quartz in CoordinateData importiert\")\n \nclass connecteyetracker(object):\n def __init__(self):\n self.os_system = platform.system()\n try:\n \n self.tracker = EyeTribe()\n self.tracker.connect()\n self.n = self.tracker.next()\n self.tracker.pushmode()\n self.tracker_type = \"eyetribe\"\n self.cursor_type = \"none\"\n except (ConnectionRefusedError,ImportError):\n print(\"ConnectionRefusedError oder ImportError, Eyetribe Server läuft nicht \\noder pyetribe modul nicht vorhanden\")\n self.tracker_type = \"mouse\"\n self.cursor_type = \"tcross\"\n n=0\n pass\n if self.tracker_type == \"mouse\":\n try: \n x = g[0]\n y = g[1]\n #print(x,y)\n self.tracker_type = 'Tobii/SMI'\n self.cursor_type = \"none\"\n modulor_fading = 5\n start_fading = 30\n except (ConnectionRefusedError, ImportError, NameError):\n print(\"ConnectionRefusedError oder ImportError, Tobii oder SMI Server läuft nicht \\noder useTobii_minimal modul nicht vorhanden\")\n self.tracker_type = \"mouse\"\n n=0\n pass\n \n print(\"\\n{} ist angeschlossen\".format(self.tracker_type))\n print(\"\\nCurser Type ist {}\\n\".format(self.cursor_type))\n \n def getmouseposition(self): \n if self.os_system == \"Windows\":\n x, y = win32api.GetCursorPos()\n else:\n ourEvent = CGEventCreate(None)\n currentpos=CGEventGetLocation(ourEvent)\n x,y = currentpos\n return(x,y) \n \n def geteyeposition(self):\n if self.tracker_type == \"eyetribe\":\n self.n = self.tracker.next()\n j = str(self.n)\n m = j.split(';')\n x = int(m[7])\n y = int(m[8])\n \n if self.tracker_type == \"Tobii/SMI\":\n x = g[0]\n y = g[1]\n #print(x,y)\n \n if self.tracker_type == \"mouse\":\n x,y = self.getmouseposition()\n \n return (x,y)\n\n def geteyepositionkalibriert(self,dx,dy):\n if self.tracker_type == \"eyetribe\":\n x_median = []\n y_median = []\n n = self.tracker.next()\n j = str(n)\n m = j.split(';')\n eyetime = datetime.datetime.fromtimestamp(float(m[0]))\n timenow = datetime.datetime.now()\n diff = timenow - eyetime\n dt = divmod(diff.total_seconds(),60)\n while dt[1] > 0.2:\n #print(\"Delta t: \", dt[1])\n n = self.tracker.next()\n j = str(n)\n m = j.split(';')\n eyetime = datetime.datetime.fromtimestamp(float(m[0]))\n timenow = datetime.datetime.now()\n diff = timenow - eyetime\n dt = divmod(diff.total_seconds(),60)\n for i in range(3):\n x_median.append(int(m[7]))\n y_median.append(int(m[8]))\n if np.median(x_median)!=0:\n x = int(np.median(x_median))+dx\n y = int(np.median(y_median))+dy\n else:\n x = int(np.median(x_median))\n y = int(np.median(y_median))\n \n if self.tracker_type == \"Tobii/SMI\":\n x_median = []\n y_median = []\n #print(\"distanze: \",dis[0])\n #print(tstamp[0])\n if dis[0] == 0 and g[0] != 0:\n cluster = 3\n \n else:\n cluster = 3\n for i in range(cluster):\n x_median.append(g[0])\n y_median.append(g[1])\n if int(np.median(x_median))!=0:\n x = int(np.median(x_median))+dx\n y = int(np.median(y_median))+dy\n else:\n x = int(np.median(x_median))\n y = int(np.median(y_median))\n if self.tracker_type == \"mouse\":\n x,y = self.getmouseposition()\n return(x,y)\n \n def gettracker_type(self):\n return(self.tracker_type)\n \n def getcursor_type(self):\n return(self.cursor_type)\n \n def disconnectSMI(self):\n killTrackerConnection()\n exit()\n \n \nif __name__ == '__main__':\n mycheckplatformObject = connecteyetracker()\n for i in range(100):\n x,y = mycheckplatformObject.geteyepositionkalibriert(0,0)\n print(x,y)\n time.sleep(0.2)\n ","sub_path":"CoordinateData.py","file_name":"CoordinateData.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"108426459","text":"# Evaluation script for the PRIM2018 Spectral super resolution Challenge\r\n#\r\n# * Provide input and output directories as arguments\r\n# * Validation files should be found in the '/ref' subdirectory of the input dir\r\n# * Input validation files are expected .fla format\r\n\r\n\r\nimport numpy as np\r\nimport sys\r\nimport os\r\n\r\nimport spectral.io.envi as envi\r\nimport spectral\r\nspectral.settings.envi_support_nonlowercase_params = True\r\n # module to read ENVI images in python\r\nfrom skimage.measure import compare_ssim # to calculate ssim\r\n\r\n\r\n\r\n\r\nMRAEs = {}\r\nMSEs = {}\r\nSSIMs = {}\r\nSIDs = {}\r\nAPPSAs = {}\r\nPSNRs = {}\r\n\r\n\r\n#root_dir = \"/mnt/md0/CSIRO/projects/2018_May_ECCV_challange/scoring_program/program_track1\"\r\n#os.chdir(root_dir)\r\n\r\ndef get_ref_from_file(filename):\r\n fla_file = envi.open(filename + '.hdr', filename + '.fla')\r\n im = fla_file.load(scale=False)\r\n return im\r\n\r\ndef mse(imageA, imageB):\r\n\t# 'Mean Squared Error'\r\n\terr = np.sum((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)\r\n\terr /= float(imageA.shape[0] * imageA.shape[1] * imageA.shape[2])\r\n\treturn err\r\n \r\ndef find_psnr(imageA, imageB):\r\n\t# 'Mean Squared Error'\r\n mse = np.sum((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)\r\n mse /= float(imageA.shape[0] * imageA.shape[1] * imageA.shape[2])\r\n PIXEL_MAX = 65536\r\n psnr = 20 * np.log10(PIXEL_MAX / np.sqrt(mse + 1e-3))\r\n return psnr\r\n \r\ndef find_sid(gt, rc):\r\n N = gt.shape[2]\r\n err = np.zeros(N)\r\n for i in range(N):\r\n err[i] = abs(np.sum(rc[:,:,i] * np.log10((rc[:,:,i] + 1e-3)/(gt[:,:,i] + 1e-3))) +\r\n np.sum(gt[:,:,i] * np.log10((gt[:,:,i] + 1e-3)/(rc[:,:,i] + 1e-3))))\r\n return err / (gt.shape[1] * gt.shape[0])\r\n \r\ndef find_appsa(gt,rc):\r\n \r\n nom = np.sum(gt * rc, axis=2)\r\n denom = np.linalg.norm(gt, axis=2) * np.linalg.norm(rc, axis=2)\r\n \r\n cos = np.where((nom/(denom + 1e-3)) > 1, 1, (nom/(denom + 1e-3)))\r\n appsa = np.arccos(cos)\r\n \r\n return np.sum(appsa)/(gt.shape[1] * gt.shape[0])\r\n \r\n# input and output directories given as arguments\r\n[_, input_dir, output_dir] = sys.argv\r\n#####\r\n#input_dir = \"input\"\r\n#output_dir = \"output\"\r\n\r\nvalidation_files = os.listdir(input_dir +'/ref')\r\n\r\nfor f in validation_files:\r\n # Read ground truth data\r\n if not(os.path.splitext(f)[1] in '.fla'):\r\n print('skipping '+f)\r\n continue\r\n gt = get_ref_from_file(input_dir + '/ref/' + os.path.splitext(f)[0])\r\n # Read user submission\r\n rc = get_ref_from_file(input_dir + '/res/' + os.path.splitext(f)[0])\r\n # compute MRAE\r\n diff = gt - rc\r\n abs_diff = np.abs(diff)\r\n relative_abs_diff = np.divide(abs_diff,gt + 1) # added epsilon to avoid division by zero.\r\n MRAEs[f] = np.mean(relative_abs_diff)\r\n print(\"f:\")\r\n print(f)\r\n print(\"MRAEs[f]:\")\r\n print(MRAEs[f])\r\n # compute SID\r\n SIDs[f] = find_sid(gt, rc)\r\n print(\"SIDs[f]:\")\r\n print(SIDs[f])\r\n # compute Mean Squared Error'\r\n MSEs[f] = mse(gt, rc)\r\n print(\"MSEs[f]:\")\r\n print(MSEs[f])\r\n # calculate ssim\r\n SSIMs[f] = compare_ssim(gt, rc)\r\n print(\"SSIMs[f]:\")\r\n print(SSIMs[f])\r\n # calculate appsa\r\n APPSAs[f] = find_appsa(gt, rc)\r\n print(\"APPSAs[f]:\")\r\n print(APPSAs[f])\r\n # calculate PSNR\r\n PSNRs[f] = find_psnr(gt, rc)\r\n print(\"PSNRs[f]:\")\r\n print(PSNRs[f])\r\n \r\n\r\nMRAE = np.mean(list(MRAEs.values()))\r\nMSE = np.mean(list(MSEs.values()))\r\nSSIM = np.mean(list(SSIMs.values()))\r\nSID = np.mean(list(SIDs.values()))\r\nAPPSA = np.mean(list(APPSAs.values()))\r\nPSNR = np.mean(list(PSNRs.values()))\r\nprint(\"MRAE:\\n\"+MRAE.astype(str))\r\nprint(\"MSE:\\n\"+MSE.astype(str))\r\nprint(\"SSIM:\\n\"+SSIM.astype(str))\r\nprint(\"SID:\\n\"+SID.astype(str))\r\nprint(\"APPSA:\\n\"+APPSA.astype(str))\r\nprint(\"PSNR:\\n\"+PSNR.astype(str))\r\n\r\n\r\n\r\nwith open(output_dir + '/scores.txt', 'w') as output_file:\r\n # write MRAE in score.txt\r\n output_file.write(\"MRAE:\"+MRAE.astype(str))\r\n # write MSE in score.txt\r\n output_file.write(\"\\nMSE:\"+MSE.astype(str))\r\n # write SSIM in score.txt\r\n output_file.write(\"\\nSSIM:\"+SSIM.astype(str))\r\n # write SID in score.txt\r\n output_file.write(\"\\nSID:\"+SID.astype(str))\r\n # write SID in score.txt\r\n output_file.write(\"\\nAPPSA:\"+APPSA.astype(str))\r\n # write SID in score.txt\r\n output_file.write(\"\\nPSNR:\"+PSNR.astype(str))","sub_path":"code/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"179019051","text":"import time\nfrom unittest import TestCase\n\nfrom api_throttler import FixedWindowThrottler\n\n\nclass TestFixedWindowThrottler(TestCase):\n\n def setUp(self) -> None:\n self.throttler = FixedWindowThrottler(2, 5)\n\n def test_continuous_calls(self):\n allowed_calls = 0\n for i in range(5):\n if not self.throttler.is_throttled(key=\"test_key\"):\n allowed_calls += 1\n time.sleep(1)\n self.assertEqual(allowed_calls, 2)\n\n def test_periodic_calls(self):\n allowed_calls = 0\n for i in range(10):\n if i in {0, 4, 5, 6}:\n if not self.throttler.is_throttled(key=\"test_key\"):\n allowed_calls += 1\n time.sleep(1)\n self.assertEqual(allowed_calls, 4)\n","sub_path":"tests/fixed_window_throttler_test.py","file_name":"fixed_window_throttler_test.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"192592755","text":"from django.shortcuts import render\nfrom books.models import Book\n\n\ndef books_view(request, date=None):\n template = 'books/books_list.html'\n\n context = {}\n if date:\n books = Book.objects.filter(pub_date=date)\n books_less = Book.objects.filter(pub_date__lt=date).order_by(\"-pub_date\").first()\n books_greater = Book.objects.filter(pub_date__gt=date).order_by(\"pub_date\").first()\n if books_less:\n books_less.pub_date = str(books_less.pub_date)\n context['books_less'] = books_less\n if books_greater:\n books_greater.pub_date = str(books_greater.pub_date)\n context['books_greater'] = books_greater\n else:\n books = Book.objects.all()\n for book in books:\n book.pub_date = str(book.pub_date)\n context['books'] = books\n return render(request, template, context)\n\n\n\n","sub_path":"models_list_displaying/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"651103820","text":"import numpy as np # fundamental package for scientific computing\nimport pyrealsense2 as rs # Intel RealSense cross-platform open-source API\nimport cv2\nimport numba as nb\n\nprint(\"Environment Ready\")\n\n# Configure depth and color streams\npipeline = rs.pipeline()\nconfig = rs.config()\n\nDEPTH_W = 848\nDEPTH_H = 480\nCOLOR_W = 848\nCOLOR_H = 480\nconfig.enable_stream(rs.stream.depth, DEPTH_W, DEPTH_H, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, COLOR_W, COLOR_H, rs.format.bgr8, 30)\n\n# Set green screen background.\nGREENSCREEN_DEPTH = 3 # in meters\nbg_img_r = np.full((COLOR_H,COLOR_W),0)\nbg_img_g = np.full((COLOR_H,COLOR_W),255)\nbg_img_b = np.full((COLOR_H,COLOR_W),0)\nbg_img = np.dstack((bg_img_r, bg_img_g, bg_img_b))\nprint(f\"Background image shape: {bg_img.shape}\")\n\n# or Select an image here\n\n\n@nb.njit(parallel=True)\ndef assemble_greenscreen_img(rs_img, bg_img, depth_array, green_screen_threshold=GREENSCREEN_DEPTH):\n output_img = np.empty(rs_img.shape).astype(np.uint8)\n for i in nb.prange(rs_img.shape[0]):\n for j in nb.prange(rs_img.shape[1]):\n if depth_array[i, j] < green_screen_threshold:\n output_img[i, j] = rs_img[i, j].astype(np.uint8)\n else:\n output_img[i, j] = bg_img[i, j].astype(np.uint8)\n return output_img\n\n# Start streaming\nprofile = pipeline.start(config)\ndepth_scale = profile.get_device().first_depth_sensor().get_depth_scale()\n\ntry:\n while True:\n\n # Wait for a coherent pair of frames: depth and color\n frames = pipeline.wait_for_frames()\n # depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n\n\n # Convert images to numpy arrays\n # depth_image = np.asanyarray(depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n align = rs.align(rs.stream.color)\n frames = align.process(frames)\n aligned_depth_frame = frames.get_depth_frame()\n depth = np.asanyarray(aligned_depth_frame.get_data())\n\n if not aligned_depth_frame or not color_frame:\n continue\n\n depth = depth * depth_scale # depth array in meters\n dist, _, _, _ = cv2.mean(depth)\n\n merged_img = assemble_greenscreen_img(color_image, bg_img, depth)\n\n\n # Show images\n cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)\n cv2.imshow('RealSense', merged_img)\n\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n cv2.destroyAllWindows()\n pipeline.stop()\n break\n\nfinally:\n\n # Stop streaming\n cv2.destroyAllWindows()\n pipeline.stop()\n","sub_path":"greenscreen.py","file_name":"greenscreen.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"66884762","text":"# Copyright (C) 2018-present ichenq@outlook.com. All rights reserved.\n# Distributed under the terms and conditions of the Apache License.\n# See accompanying files LICENSE.\n\nimport os\nimport tabugen.typedef as types\nimport tabugen.predef as predef\nimport tabugen.lang as lang\nimport tabugen.version as version\nimport tabugen.util.strutil as strutil\n\n\n# 生成C++加载CSV文件数据代码\nclass CppCsvLoadGenerator:\n TAB_SPACE = ' '\n\n def __init__(self):\n pass\n\n def setup(self, name):\n pass\n\n # 生成array字段的赋值语句\n def gen_array_field_assign(self, prefix: str, typename: str, name: str, value_text: str, tabs: int) -> str:\n space = self.TAB_SPACE * tabs\n content = ''\n content += '%sauto arr = SplitString(%s, \"%s\");\\n' % (space, value_text, predef.PredefDelim1)\n content += '%sfor (size_t i = 0; i < arr.size(); i++)\\n' % space\n content += '%s{\\n' % space\n expr = lang.map_cpp_parse_expr(types.array_element_type(typename), 'arr[i]')\n content += '%s auto val = %s;\\n' % (space, expr)\n content += '%s %s%s.emplace_back(val);\\n' % (space, prefix, name)\n content += '%s}\\n' % space\n return content\n\n # 生成map字段的赋值语句\n def gen_map_field_assign(self, prefix: str, typename: str, name: str, value_text: str, tabs: int) -> str:\n key_type, val_type = types.map_key_value_types(typename)\n space = self.TAB_SPACE * tabs\n content = ''\n content += '%sauto kvs = SplitString(%s, \"%s\");\\n' % (space, value_text, predef.PredefDelim1)\n content += '%sfor (size_t i = 0; i < kvs.size(); i++)\\n' % space\n content += '%s{\\n' % space\n content += '%s auto kv = SplitString(kvs[i], \"%s\");\\n' % (space, predef.PredefDelim2)\n content += '%s ASSERT(kv.size() == 2);\\n' % space\n content += '%s if(kv.size() == 2)\\n' % space\n content += '%s {\\n' % space\n content += '%s auto key = %s;\\n' % (space, lang.map_cpp_parse_expr(key_type, 'kv[0]'))\n content += '%s auto val = %s;\\n' % (space, lang.map_cpp_parse_expr(val_type, 'kv[1]'))\n content += '%s ASSERT(%s%s.count(key) == 0);\\n' % (space, prefix, name)\n content += '%s %s%s.emplace(std::make_pair(key, val));\\n' % (space, prefix, name)\n content += '%s }\\n' % space\n content += '%s}\\n' % space\n return content\n\n # 生成字段赋值语句\n def gen_field_assign(self, prefix: str, origin_typename: str, name: str, value_text: str, tabs: int) -> str:\n content = ''\n space = self.TAB_SPACE * tabs\n if origin_typename.startswith('array'):\n content += self.gen_array_field_assign(prefix, origin_typename, name, value_text, tabs)\n elif origin_typename.startswith('map'):\n content += self.gen_map_field_assign(prefix, origin_typename, name, value_text, tabs)\n elif origin_typename == 'string':\n content += '%s%s%s = StripWhitespace(%s).as_string();\\n' % (space, prefix, name, value_text)\n else:\n expr = lang.map_cpp_parse_expr(origin_typename, value_text)\n content += '%s%s%s = %s;\\n' % (space, prefix, name, expr)\n return content\n\n # 内部嵌入class的赋值\n def gen_inner_fields_assign(self, struct, prefix: str, rec_name: str, tabs: int) -> str:\n inner_fields = struct['inner_fields']\n inner_class_type = struct[\"options\"][predef.PredefInnerTypeClass]\n inner_var_name = struct[\"options\"][predef.PredefInnerFieldName]\n assert len(inner_class_type) > 0 and len(inner_var_name) > 0\n\n start = inner_fields['start']\n end = inner_fields['end']\n step = inner_fields['step']\n assert start > 0 and end > 0 and step > 1\n\n space = self.TAB_SPACE * tabs\n col = start\n content = ''\n content += '%sfor (size_t i = 1; i <= %s.size(); i++)\\n' % (space, rec_name)\n content += '%s{\\n' % space\n content += '%s %s::%s val;\\n' % (space, struct['camel_case_name'], inner_class_type)\n for i in range(step):\n field = struct['fields'][col + i]\n origin_typename = field['original_type_name']\n field_name = strutil.remove_suffix_number(field['camel_case_name'])\n text = '%s {\\n' % space\n text += '%s auto key = StringPrintf(\"%s%%d\", i);\\n' % (space, field_name)\n text += '%s auto iter = %s.find(key);\\n' % (space, rec_name)\n text += '%s if (iter != %s.end()) {\\n' % (space, rec_name)\n text += self.gen_field_assign('val.', origin_typename, field_name, 'iter->second', tabs + 3)\n text += '%s } else {\\n' % space\n text += '%s break;\\n' % space\n text += '%s }\\n' % space\n text += '%s }\\n' % space\n content += text\n content += ' %s%s.push_back(val);\\n' % (prefix, inner_var_name)\n content += '%s}\\n' % space\n content += '%s%s%s.shrink_to_fit();\\n' % (space, prefix, inner_var_name)\n return content\n\n # 生成`ParseFrom`方法\n def gen_parse_method(self, struct) -> str:\n content = ''\n inner_start_col = -1\n inner_end_col = -1\n inner_field_done = False\n if 'inner_fields' in struct:\n inner_start_col = struct['inner_fields']['start']\n inner_end_col = struct['inner_fields']['end']\n\n content += '// parse %s from string fields\\n' % struct['name']\n content += 'int %s::ParseFrom(const std::unordered_map<std::string, std::string>& record, %s* ptr)\\n' % (struct['name'], struct['name'])\n content += '{\\n'\n content += ' ASSERT(ptr != nullptr);\\n'\n content += ' std::unordered_map<std::string, std::string>::const_iterator iter;\\n'\n for col, field in enumerate(struct['fields']):\n if inner_start_col <= col < inner_end_col:\n if not inner_field_done:\n inner_field_done = True\n content += self.gen_inner_fields_assign(struct, 'ptr->', 'record', 1)\n else:\n origin_typename = field['original_type_name']\n content += ' iter = record.find(\"%s\");\\n' % field['name']\n content += ' if (iter != record.end()) {\\n'\n content += self.gen_field_assign('ptr->', origin_typename, field['name'], 'iter->second', 2)\n content += ' }\\n'\n\n content += ' return 0;\\n'\n content += '}\\n\\n'\n return content\n\n # 生成KV模式的`ParseFrom`方法\n def gen_kv_parse_method(self, struct):\n keyidx = predef.PredefKeyColumn\n validx = predef.PredefValueColumn\n typeidx = predef.PredefValueTypeColumn\n assert keyidx >= 0 and validx >= 0 and typeidx >= 0\n\n rows = struct['data_rows']\n content = ''\n content += '// parse %s from string fields\\n' % struct['name']\n content += 'int %s::ParseFrom(const std::unordered_map<std::string, std::string>& fields, %s* ptr)\\n' % (\n struct['name'], struct['name'])\n content += '{\\n'\n content += ' ASSERT(ptr != nullptr);\\n'\n content += ' std::unordered_map<std::string, std::string>::const_iterator iter;\\n'\n for row in rows:\n name = row[keyidx].strip()\n origin_typename = row[typeidx].strip()\n content += ' iter = fields.find(\"%s\");\\n' % name\n content += ' if (iter != fields.end()) {\\n'\n content += self.gen_field_assign('ptr->', origin_typename, name, 'iter->second', 2)\n content += ' }\\n'\n content += ' return 0;\\n'\n content += '}\\n\\n'\n return content\n\n # 生成源文件定义\n def gen_cpp_source(self, struct) -> str:\n if struct['options'][predef.PredefParseKVMode]:\n return self.gen_kv_parse_method(struct)\n else:\n return self.gen_parse_method(struct)\n\n # class静态函数声明\n def generate_method_declare(self, struct) -> str:\n content = ''\n if struct['options'][predef.PredefParseKVMode]:\n content += ' static int ParseFrom(const std::unordered_map<std::string, std::string>& fields, %s* ptr);\\n' % struct['name']\n return content\n content += ' static int ParseFrom(const std::unordered_map<std::string, std::string>& fields, %s* ptr);\\n' % struct['name']\n return content\n\n def generate(self, descriptors, args, headerfile) -> str:\n cpp_include_headers = [\n '#include \"%s\"' % os.path.basename(headerfile),\n '#include <stddef.h>',\n '#include <assert.h>',\n '#include <memory>',\n '#include <fstream>',\n ]\n extra_headers = args.extra_cpp_includes.split(',')\n for header in extra_headers:\n text = '#include \"%s\"' % header\n cpp_include_headers.append(text)\n\n cpp_content = '// This file is auto-generated by Tabular v%s, DO NOT EDIT!\\n\\n' % version.VER_STRING\n if args.cpp_pch is not None:\n pchfile = '#include \"%s\"' % args.cpp_pch\n cpp_include_headers = [pchfile] + cpp_include_headers\n\n cpp_content += '\\n'.join(cpp_include_headers) + '\\n\\n'\n cpp_content += 'using namespace std;\\n\\n'\n cpp_content += '#ifndef ASSERT\\n'\n cpp_content += '#define ASSERT assert\\n'\n cpp_content += '#endif\\n\\n'\n\n if args.package is not None:\n cpp_content += '\\nnamespace %s {\\n\\n' % args.package\n\n static_var_content = ''\n\n class_content = ''\n for struct in descriptors:\n class_content += self.gen_cpp_source(struct)\n\n cpp_content += static_var_content\n cpp_content += class_content\n if args.package is not None:\n cpp_content += '\\n} // namespace %s \\n' % args.package # namespace\n return cpp_content\n","sub_path":"tabugen/generator/cpp/gen_csv_load.py","file_name":"gen_csv_load.py","file_ext":"py","file_size_in_byte":10009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"628238644","text":"#inputs\nhrs = input(\"Enter Hours:\")\nh = float(hrs)\nrate = input(\"Enter Hrs rate:\")\nr=float(rate)\n#minimum number of hours\nminh = 40\n#condition\nif h>40:\n extrah=h-40\n gp = extrah*1.5*r + 40*r\nelse:\n gp = h*r\n#output\nprint (gp)\n","sub_path":"ex3.1.py","file_name":"ex3.1.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"277083568","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport os\n\nimport sh\nimport yaml\nfrom behave import *\nfrom assertpy import assert_that\n\nfrom qingstor.sdk.config import Config\nfrom qingstor.sdk.service.qingstor import QingStor\n\nconfig = Config().load_user_config()\nqingstor = QingStor(config)\ntest_config_file_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__), os.path.pardir)\n)\nwith open(test_config_file_path + '/test_config.yaml') as f:\n test_data = yaml.load(f)\n f.close()\nbucket = qingstor.Bucket(test_data['bucket_name'], test_data['zone'])\nbucket.put()\nqsctl = sh.Command(\"qsctl\")\n\n\n@given(u'a set of local files')\ndef step_impl(context):\n sh.mkdir(\"tmp\").wait()\n for row in context.table:\n sh.dd(\n \"if=/dev/zero\", \"of=tmp/\" + row[\"name\"], \"bs=1048576\",\n \"count=\" + row[\"count\"]\n )\n\n\n@when(u'copy to QingStor key')\ndef step_impl(context):\n for row in context.table:\n qsctl(\n \"cp\",\n \"tmp/{filename}\".format(filename=row[\"name\"]),\n \"qs://{bucket}/{filename}\".format(\n bucket=test_data['bucket_name'], filename=row[\"name\"]\n )\n ).wait()\n\n\n@then(u'QingStor should have key')\ndef step_impl(context):\n resp = bucket.list_objects()\n assert_that(sorted([i[\"key\"] for i in resp[\"keys\"]])\n ).is_equal_to(sorted([row[\"name\"] for row in context.table]))\n\n for row in context.table:\n bucket.delete_object(row[\"name\"])\n\n\n@when(u'copy to QingStor keys recursively')\ndef step_impl(context):\n qsctl(\n \"cp\",\n \"tmp\",\n \"qs://{bucket}\".format(bucket=test_data['bucket_name']),\n \"-r\"\n ).wait()\n\n\n@then(u'QingStor should have keys')\ndef step_impl(context):\n resp = bucket.list_objects()\n assert_that(sorted([i[\"key\"] for i in resp[\"keys\"]])\n ).is_equal_to(sorted([row[\"name\"] for row in context.table]))\n\n sh.rm(\"-rf\", \"tmp\")\n\n\n@when(u'copy to local file')\ndef step_impl(context):\n for row in context.table:\n qsctl(\n \"cp\",\n \"qs://{bucket}/{filename}\".format(\n bucket=test_data['bucket_name'], filename=row[\"name\"]\n ),\n \"tmp/{filename}\".format(filename=row[\"name\"]),\n ).wait()\n\n\n@then(u'local should have file')\ndef step_impl(context):\n output = sh.ls(\"tmp\").stdout.decode(\"utf-8\")\n ok = True\n for row in context.table:\n if row[\"name\"] not in output:\n ok = False\n break\n assert_that(ok).is_equal_to(True)\n\n sh.rm(\"-rf\", \"tmp\")\n\n\n@when(u'copy to local files recursively')\ndef step_impl(context):\n qsctl(\n \"cp\",\n \"qs://{bucket}\".format(\n bucket=test_data[\"bucket_name\"],\n ),\n \"tmp\",\n \"-r\",\n ).wait()\n\n\n@then(u'local should have files')\ndef step_impl(context):\n output = sh.ls(\"tmp\").stdout.decode(\"utf-8\")\n ok = True\n for row in context.table:\n if row[\"name\"] not in output:\n ok = False\n break\n assert_that(ok).is_equal_to(True)\n\n sh.rm(\"-rf\", \"tmp\")\n\n for row in context.table:\n bucket.delete_object(row[\"name\"])\n","sub_path":"scenarios/steps/cp.py","file_name":"cp.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"527577788","text":"e2f = {\"dog\":\"chien\", \"cat\":\"chat\", \"walrus\":\"morse\"}\n#print(e2f[\"walrus\"])\n\nf2e = {}\ntuplist = e2f.items()\nfor k, v in tuplist:\n nuevatup = (v, k)\n f2e[v] = k\n\n#print(f2e[\"chien\"])\nprint(set(e2f))","sub_path":"e2f.py","file_name":"e2f.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"252384896","text":"#\n# Copyright (c) 2008-2015 Thierry Florac <tflorac AT ulthar.net>\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n\n\"\"\"PyAMS_i18n.negotiator module\n\nThis module defines a I18n negotiator utility, which is responsible of decoding browser\nsettings to extract preferred languages.\n\nIt also provides Pyramid request properties to set locale.\n\"\"\"\n\nfrom persistent import Persistent\nfrom pyramid.interfaces import IRequest\nfrom zope.container.contained import Contained\nfrom zope.i18n.interfaces import INegotiator as IZopeNegotiator\nfrom zope.i18n.locales import locales\nfrom zope.interface import Interface, implementer\nfrom zope.schema.fieldproperty import FieldProperty\nfrom zope.traversing.interfaces import ITraversable\n\nfrom pyams_i18n.interfaces import INegotiator, LANGUAGE_CACHE_KEY\nfrom pyams_utils.adapter import ContextRequestAdapter, adapter_config\nfrom pyams_utils.i18n import get_browser_language\nfrom pyams_utils.registry import query_utility, utility_config\n\n\n__docformat__ = 'restructuredtext'\n\n\n@implementer(INegotiator)\nclass Negotiator(Persistent, Contained):\n \"\"\"Language negotiator utility\"\"\"\n\n policy = FieldProperty(INegotiator['policy'])\n server_language = FieldProperty(INegotiator['server_language'])\n offered_languages = FieldProperty(INegotiator['offered_languages'])\n cache_enabled = FieldProperty(INegotiator['cache_enabled'])\n\n def __init__(self):\n self.server_language = 'en'\n\n def get_language(self, request):\n # pylint: disable=too-many-branches,too-many-return-statements\n \"\"\"See :intf:`INegotiator`\"\"\"\n\n # lang parameter, if defined, is of higher priority\n if 'lang' in request.params:\n return request.params['lang']\n\n policies = self.policy.split(' --> ')\n for policy in policies:\n\n # check server policy\n if policy == 'server':\n if self.server_language:\n return self.server_language\n\n # check session policy\n elif policy == 'session':\n if self.cache_enabled:\n try:\n cached = request.annotations[LANGUAGE_CACHE_KEY]\n return cached\n except AttributeError:\n return self.server_language\n except KeyError:\n try:\n session = request.session\n lang = session.get('language')\n if lang is not None:\n request.annotations[LANGUAGE_CACHE_KEY] = lang\n return lang\n except (AttributeError, KeyError):\n return self.server_language\n else:\n try:\n session = request.session\n lang = session.get('language')\n if lang is not None:\n return lang\n except AttributeError:\n return self.server_language\n\n # check browser policy\n elif policy == 'browser':\n lang = get_browser_language(request)\n if lang is not None:\n return lang\n\n return self.server_language\n\n @staticmethod\n def clear_cache(request):\n \"\"\"Clear cached language value\"\"\"\n try:\n del request.annotations[LANGUAGE_CACHE_KEY]\n except (AttributeError, KeyError):\n pass\n\n\n@adapter_config(name='lang', required=(Interface, IRequest), provides=ITraversable)\nclass LangNamespaceTraverser(ContextRequestAdapter):\n \"\"\"++lang++ namespace traverser\n\n This traverser is mainly used for backward compatibility with previous Zope 3 websites.\n \"\"\"\n\n def traverse(self, name, furtherpath=None): # pylint: disable=unused-argument\n \"\"\"Traverse to set request parameter to given language attribute\"\"\"\n if name != '*':\n self.request.GET['lang'] = name\n return self.context\n\n\ndef locale_negotiator(request):\n \"\"\"Negotiate language based on server, browser, request and user settings\n\n Locale is extracted from request's \"lang\" parameter, from browser settings or from\n negotiator utility\n \"\"\"\n negotiator = query_utility(INegotiator)\n if negotiator is not None:\n locale_name = negotiator.get_language(request)\n else:\n locale_name = get_browser_language(request)\n if not locale_name:\n registry = request.registry\n locale_name = registry.settings.get('pyramid.default_locale_name', 'en')\n if '-' in locale_name:\n # remove 'sub-locale' to prevent Babel and Zope exceptions for unknown locales\n locale_name = locale_name.split('-')[0]\n return locale_name\n\n\ndef get_locale(request):\n \"\"\"Get zope.i18n \"locale\" attribute\"\"\"\n return locales.getLocale(request.locale_name)\n\n\n@utility_config(provides=IZopeNegotiator)\nclass ZopeNegotiator:\n \"\"\"Zope language negotiator\"\"\"\n\n def getLanguage(self, langs, env): # pylint: disable=invalid-name,unused-argument,no-self-use\n \"\"\"Get current language negotiator\"\"\"\n return locale_negotiator(env)\n","sub_path":"src/pyams_i18n/negotiator.py","file_name":"negotiator.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"579302893","text":"# 61. Given a linked list, rotate the list to the right by k places, where k is non-negative.\n\n# Example 1:\n# Input: 1->2->3->4->5->NULL, k = 2\n# Output: 4->5->1->2->3->NULL\n# Explanation:\n# rotate 1 steps to the right: 5->1->2->3->4->NULL\n# rotate 2 steps to the right: 4->5->1->2->3->NULL\n\n# Example 2:\n# Input: 0->1->2->NULL, k = 4\n# Output: 2->0->1->NULL\n# Explanation:\n# rotate 1 steps to the right: 2->0->1->NULL\n# rotate 2 steps to the right: 1->2->0->NULL\n# rotate 3 steps to the right: 0->1->2->NULL\n# rotate 4 steps to the right: 2->0->1->NULL\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def rotateRight(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n if head == None or head.next == None or k == 0:\n return head\n list_len = 1\n node = head\n while node.next != None:\n node = node.next\n list_len += 1\n if k % list_len == 0:\n return head\n node = head\n for i in range(list_len - k % list_len - 1):\n node = node.next\n tmp_head = node.next\n node.next = None\n node = tmp_head\n while node.next != None:\n node = node.next\n node.next = head\n return tmp_head\n\n\n# 62. A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).\n# The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).\n# How many possible unique paths are there?\n\n# Note: m and n will be at most 100.\n\n# Example 1:\n# Input: m = 3, n = 2\n# Output: 3\n# Explanation:\n# From the top-left corner, there are a total of 3 ways to reach the bottom-right corner:\n# 1. Right -> Right -> Down\n# 2. Right -> Down -> Right\n# 3. Down -> Right -> Right\n\n# Example 2:\n# Input: m = 7, n = 3\n# Output: 28\n\nclass Solution:\n def uniquePaths(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n pane = [[1] * n for i in range(m)]\n for i in range(1, m):\n for j in range(1, n):\n pane[i][j] = pane[i-1][j] + pane[i][j-1]\n return pane[-1][-1]\n\n\n# 63. A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).\n# The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).\n# Now consider if some obstacles are added to the grids. How many unique paths would there be?\n# An obstacle and empty space is marked as 1 and 0 respectively in the grid.\n\n# Note: m and n will be at most 100.\n\n# Example 1:\n# Input:\n# [\n# [0,0,0],\n# [0,1,0],\n# [0,0,0]\n# ]\n# Output: 2\n# Explanation:\n# There is one obstacle in the middle of the 3x3 grid above.\n# There are two ways to reach the bottom-right corner:\n# 1. Right -> Right -> Down -> Down\n# 2. Down -> Down -> Right -> Right\n\nclass Solution:\n def uniquePathsWithObstacles(self, obstacleGrid):\n \"\"\"\n :type obstacleGrid: List[List[int]]\n :rtype: int\n \"\"\"\n if obstacleGrid[-1][-1] or obstacleGrid[0][0] == 1:\n return 0\n if len(obstacleGrid) == 0:\n return 0\n pane = [[1] * len(obstacleGrid[0]) for i in range(len(obstacleGrid))]\n for i in range(1, len(obstacleGrid)):\n pane[i][0] = 0 if obstacleGrid[i][0] == 1 or pane[i-1][0] == 0 else 1\n for i in range(1, len(obstacleGrid[0])):\n pane[0][i] = 0 if obstacleGrid[0][i] == 1 or pane[0][i-1] == 0 else 1\n \n for i in range(1, len(obstacleGrid)):\n for j in range(1, len(obstacleGrid[0])):\n if obstacleGrid[i][j] == 1:\n pane[i][j] = 0\n continue\n else:\n pane[i][j] = pane[i][j-1] + pane[i-1][j]\n return pane[-1][-1]","sub_path":"2018-08-26.py","file_name":"2018-08-26.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"189159859","text":"import joblib\nimport pandas as pd\nfrom sklearn.pipeline import Pipeline\n\nfrom gb_model.config import config as cfg\nfrom gb_model.processing import preprocessing as ppg\nfrom gb_model.processing import features as fts\nfrom gb_model.processing import prediction as pdn\n\n\n# Weather data preprocessing\nwthr_pipe = Pipeline(\n\t[\n\t\t('time_converter', ppg.TimeConverter(timezones=cfg.TZ_OFFSETS)),\n\t\t('time_reindexer', ppg.TimeReindexer()),\n\t\t('missing_imputer', ppg.MissingImputer(cub_vars=cfg.CUB_VARS, lin_vars=cfg.LIN_VARS)),\n\t\t('data_copier', ppg.DataCopier())\n\t]\n)\n\n\n# Feature engineering\nfeat_pipe = Pipeline(\n\t[\n\t\t('weather_extractor', fts.WeatherExtractor()),\n\t\t('time_extractor', fts.TimeExtractor()),\n\t\t('holiday_extractor', fts.HolidayExtractor(countries=cfg.COUNTRIES)),\n\t\t('feat_selector', fts.FeatSelector(feats=cfg.FEATS))\n\t]\n)\n\n\n# Prediction\ndef pred_pipe(df, rare_path, mean_path, sclr_path, model_path,\n use_xgb=True,\n sqft_var='square_feet',\n target_var='meter_reading'):\n\n\t\"\"\"\n\tMake predictions using LightGBM or XGBoost.\n\n\t:param df: (Pandas dataframe) preprocessed data with listed variables\n\t:param rare_path: (pathlib Path object) path to trained rare label categorical encoders\n\t:param mean_path: (pathlib Path object) path to trained mean categorical encoders\n\t:param sclr_path: (pathlib Path object) path to trained standard scalers\n\t:param model_path: (pathlib Path object) path to trained LightGBM models\n\t:param use_xgb: (boolean) whether or not to predict using a XGBoost model\n\t:param sqft_var: (String) name of square footage variable\n\t:param target_var: (String) name of target variable\n\n\t:return: predictions in a list\n\t\"\"\"\n\n\tdf = df.copy()\n\tdf.reset_index(inplace=True)\n\t# tmp = df[['index', 'site_id', 'meter']].copy()\n\tdf.drop(['index', 'site_id'], axis=1, inplace=True)\n\n\tmodel = joblib.load(model_path / 'lgb0.pkl')\n\tdf_list = pdn.split(df)\n\tpreds = []\n\n\tfor i in range(4):\n\t\tre = joblib.load(rare_path / f'rare_enc{str(i)}.pkl')\n\t\tme = joblib.load(mean_path / f'mean_enc{str(i)}.pkl')\n\t\tss = joblib.load(sclr_path / f'scaler{str(i)}.pkl')\n\t\tX = pdn.transform(df_list[i], re, me, ss)\n\n\t\ty_pred = pdn.predict(X, model=model, use_xgb=use_xgb)\n\t\t# y_pred = pdn.predict(X, model_path=(model_path / f'lgb{str(i)}.pkl'), use_xgb=use_xgb)\n\t\ty = df_list[i][[sqft_var]].copy()\n\t\ty[target_var] = y_pred\n\t\ty = pdn.inverse_transform(y)\n\t\tpreds.append(y)\n\n\tpred = pd.concat(preds).sort_index().reset_index()\n\t# pred = pd.merge(tmp, pred, on='index', how='left')\n\t# pred = pdn.convert_site0_units(pred)\n\t\n\tpred = pred[target_var].tolist()\n\treturn pred\n","sub_path":"packages/gb_model/gb_model/processing/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"228130541","text":"import time\nfrom player import HumanPlayer, RandomComputerPlayer, GeniusComputerPlayer # related to __name__ statement below\n\nclass TicTacToe:\n def __init__(self):\n self.board = [' ' for _ in range(9)] # a single list of length 9 to rep 3*3 board\n # the indexes we assign to this list will represent the board\n self.current_winner = None # to keep track of winner\n \n def print_board(self):\n for row in [self.board[i*3:(i+1)*3] for i in range(3)]: \n # the large bracketed statement is indexing into the len 9 list defined above\n # the result of each of those iterations (for i in range (3)) is a row \"chunk\" of the final board\n # the innermost is a slice (i.e., 'start':'end'); 'range(3)' is for each row, meaning indices of 0, 1, and 2 (upper-bound exclusive)\n # the range bracket creates three \"self-contained\" chunks of indices, like this:\n # 0:3 (0,1,2) <-index positions\n # 3:6 (3,4,5) <-index positions\n # 6:8 (6,7,8) <-index positions\n \n print('| ' + ' | '.join(row) + ' |')\n # 1.) print the first pipe; \n # 2.) the join() method intersperses the pipe(s or however many required?) \n # between the iterables present in [row] from above; \n # 3.) print the last pipe.\n \n @staticmethod # static methods are methods bound to the class, not the object\n def print_board_nums(): # this doesn't relate to any specific board, so we don't have to pass in self (makes sense).\n number_board = [[str(i) for i in range(j*3, (j+1)*3)] for j in range(3)]\n # iterating over 'j' feeds in exactly like printing the board, to manufacture all of the row \"chunks\"\n # \"this is saying 'give me the indices of the rows, for all of the rows'.\"\n # \"i\" is the \"x\" or \"o\" String that gets dropped into the (0,1,2) or (3,4,5) or (6,7,8)\n # that is created by the iteration over 'j'\n \n # now, we concatenate the strings and print the board, like above:\n for row in number_board:\n print('| ' + ' | '.join(row) + ' |')\n\n def available_moves(self):\n # the long way to do it:\n # moves = [] # initialize moves to an empty list.\n # for (i,spot) in enumerate(self.board): # basically, it attaches a value (\"x\" or \"o\") to its index place as a tuple.\n # # ['x','x','o'] --> [(0,'x'),(1,'x'),(2,'o')]\n # if spot == ' ': # meaning that it's empty and available for use.\n # moves.append(i) # we append that index to know that it's been taken. \n # return moves\n \n # the same thing, using list comprehension:\n return [i for i, spot in enumerate(self.board) if spot == ' ']\n # basically, this says: \"when enumerating through the tuples of (i, spot),\n # if 'spot' is empty, put it into this list (which is what enumerate() creates for us).\"\n # it then returns this list (makes it available outside of this function)\n \n def empty_squares(self):\n return ' ' in self.board # returns a boolean if the selection is an empty space\n\n # to count the number of empty squares\n def num_empty_squares(self):\n return self.board.count(' ')# returns the available_moves list, so we can count the available spaces\n\n # to actually make a move\n def make_move(self, square, letter):\n # to make a move, be sure it's valid. If valid, returns True; if not, then False.\n if self.board[square] == ' ': # if that square is empty ...\n self.board[square] = letter # ... then the letter goes in that space.\n # now you need to check if you actually won (we'll do this later):\n if self.winner(square, letter): # passes in last move (\"we'll come back to the winner function\")\n self.current_winner = letter # if that's true, then we can assign current_winner to that letter (X or O)\n return True\n return False\n\n def winner(self, square, letter):\n # we have to check all possibilities of three in a row.\n # check rows\n row_ind = square//3 # divide by three and then round down. Ex: if square = 5, divide by 3 = 1.67 (row 1)\n row = self.board[row_ind*3 : (row_ind + 1) * 3] # this just highlights the three possible rows\n if all([spot == letter for spot in row]): # \"if all spots (or whatever) equal the same letter for spots in this row ...\n return True\n \n # check columns\n col_ind = square % 3 # the modulus of 8 is 2 (making it the third column, according to index)\n column = [self.board[col_ind+i*3] for i in range(3)] # Ex.: [1 + (2*3)] = spot 7; tells us all spot indexes on the board by column.\n if all([spot == letter for spot in column]): # same reason as above.\n return True\n\n # check diagonals (we basically hard code these)\n if square % 2 == 0: # these are the only spots possible for either diagonal\n diagonal1 = [self.board[i] for i in [0,4,8]] # <i> is just spot holder as the \"for\" statement traverses 0,4, and 8 and puts them in a list.\n if all([spot == letter for spot in diagonal1]): # a similar checker to above\n return True\n diagonal2 = [self.board[i] for i in [2,4,6]]\n if all([spot == letter for spot in diagonal2]):\n return True\n\n # if all of these checks fail, then there's no winner, so it returns False\n return False\n\n# notice that this function exists outside of the TicTacToe class\ndef play(game, x_player, o_player, print_game=True): # if a human is playing, show the board\n if print_game: #meaning, if we want to see it\n game.print_board_nums() # put print_board_nums against 'game' (but where is 'game' coming from?)\n\n letter = 'X' # a starting letter\n # iterate while the game has empty squares\n # don't worry about a winner; because we'll just return whatever breaks the loop). (I guess we'll see)\n\n # she calls this \"the play loop\" (which makes sense):\n while game.empty_squares(): #to check if the game has empty squares\n # while there are empty spaces, get the next move from the appropriate player:\n if letter == 'O':\n square = o_player.get_move(game)\n else:\n square = x_player.get_move(game)\n\n # define a function to actually make a move\n if game.make_move(square, letter): # meaning, \"is valid\":\n if print_game:\n print(letter + f' makes a move to square {square}')\n game.print_board() # so that we can see what the user just did\n print('') # just an empty next line.\n\n if game.current_winner: # implies \"if True\" and no longer set to None, then this is our checker\n if print_game: # not sure why this line is needed\n print(letter + ' wins!')\n return letter\n\n # after making a single move, we need to alternate letters\n # here, we assign the letter to 'O' if it's currently 'X',\n # otherwise, we just assign 'letter' to 'X' (don't even need the rest of the statement)\n letter = 'O' if letter == 'X' else 'X'\n # another way to do this is below (probably what I would have done):\n # if letter == 'X':\n # letter = 'O'\n # else:\n # letter = 'X' \n # let's introduce a pause of 0.8 seconds\n time.sleep(.8)\n\n if print_game: # here, if we fall out of the while loop, then there was no winner (it was a tie)\n print('It\\'s a tie!')\n\nif __name__ == '__main__': # it seems (?) that this sets 'game.py' as the main file.\n x_player = HumanPlayer('X') # not clear what's going on with these importations.\n o_player = GeniusComputerPlayer('O')\n t = TicTacToe()\n play(t, x_player, o_player, print_game=True)\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"17448990","text":"class MazeSolver:\n\n START = 'S'\n END = 'E'\n WALL = 'X'\n OPEN = ' '\n VISITED = '.'\n DEAD_END = 'D'\n\n def __init__(self, maze):\n self.maze = maze\n self._solved = False\n\n def solve(self):\n for row_index in range(len(self.maze)):\n for column_index in range(len(self.maze[row_index])):\n if self.maze[row_index][column_index] == MazeSolver.START:\n self._solve(row_index, column_index)\n\n def _is_open(self, row, column):\n return 0 <= row < len(self.maze) \\\n and 0 <= column < len(self.maze[row]) \\\n and (self.maze[row][column] == MazeSolver.OPEN\n or self.maze[row][column] == MazeSolver.END) \\\n and not self._solved\n\n def _solve(self, row, column):\n if self.maze[row][column] == MazeSolver.END:\n self.print_maze()\n self._solved = True\n else:\n self.maze[row][column] = MazeSolver.VISITED\n\n # up\n if self._is_open(row - 1, column):\n self._solve(row - 1, column)\n\n # left\n if self._is_open(row, column - 1):\n self._solve(row, column - 1)\n\n # down\n if self._is_open(row+1, column):\n self._solve(row+1, column)\n\n # right\n if self._is_open(row, column+1):\n self._solve(row, column+1)\n\n\n\n if not self._solved:\n self.maze[row][column] = MazeSolver.DEAD_END\n\n def print_maze(self):\n print(\"|\" + '-' * len(self.maze) + \"|\")\n for row in self.maze:\n print(\"|\" + \"\".join(row) + \"|\")\n print(\"|\" + '-' * len(self.maze) + \"|\")\n\nmaze = [\n ['S', 'X', ' ', ' ', ' '],\n [' ', 'X', ' ', 'X', ' '],\n [' ', ' ', ' ', 'X', ' '],\n [' ', 'X', ' ', ' ', ' '],\n [' ', 'X', 'X', 'X', 'E']\n]\n\nmaze_solver = MazeSolver(maze)\nmaze_solver.solve()\nmaze_solver.print_maze()\n","sub_path":"November26th-Recursion/mazesolver.py","file_name":"mazesolver.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"593108759","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 15 09:45:03 2018\n\n@author: 212566876\n\"\"\"\nimport numpy as np\n\nperfs_data = np.loadtxt('AC1H_Perfs.dat', dtype=float, delimiter='\\t', usecols=[0,1,2])\n\n#zData = perfs_data[:,0]\nfid = open('AC1H_perfs_md.txt', 'w')\nfor i in range(perfs_data.shape[0]):\n fid.write(str(perfs_data[i,0]) + '\\t')\n fid.write(str(perfs_data[i,1]) + '\\n')\n\nfid.close()\n","sub_path":"Rotation 3/DSWI/DSWI Interpretation/DSWI Inputs and Outputs - Various Wells/MARATHON_CHALLENGER_C_AC_1H_XMAC_POST/AC1H Fracturing Treatment Data/AC1H_perfs_writeMD.py","file_name":"AC1H_perfs_writeMD.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"56339758","text":"import logging\nimport os\nimport sys\nfrom subprocess import Popen, PIPE, STDOUT\nfrom django.core.files.storage import FileSystemStorage\nfrom django.http import JsonResponse\nfrom rest_framework import generics\nfrom rest_framework import status\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.views.generic import View\nfrom weasyprint import HTML, default_url_fetcher\n\nfrom .render import *\nfrom .serializers import *\nfrom .tasks import *\nfrom .frame_process import *\nfrom .models import VideoProcessing\nfrom package_profile.models import Package\n\n\nimport time\nerrlog = logging.getLogger(\"error_logger\")\ninfolog = logging.getLogger(\"info_logger\")\nwarnlog = logging.getLogger(\"warning_logger\")\n\n\n# change secret key length carefully - it has dependencies\nREPORT_SECRET = '389564578912452'\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\nclass StreamProcess(APIView):\n permission_classes = (IsAuthenticated,)\n parser_classes = (MultiPartParser,)\n def get(self, request):\n content = {}\n content['message'] = 'Only POST method allowed!'\n return Response(content, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def post(self, request):\n\n user = request.user\n ip = get_client_ip(request)\n if user.profile.package.frames_allowed:\n Socket_Access.clear_old_sockets(user)\n access_key = Socket_Access.create_access_key(user)\n print ('IP', ip)\n return Response({\n 'socket_url': \"ws://127.0.0.1:8000/ws/api/frame/{}/\".format(ip),\n 'socket_key': \"{}\".format(access_key)\n }, status= status.HTTP_201_CREATED)\n\n\nclass FrameUpload(APIView):\n permission_classes = (IsAuthenticated,)\n parser_classes = (MultiPartParser,)\n\n def get(self, request):\n content = {}\n content['message'] = 'Only POST method allowed!'\n return Response(content, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def post(self, request):\n\n user = request.user\n ip = get_client_ip(request)\n\n if request.FILES and request.FILES['frame'] or request.data['frame']:\n try:\n frame = request.FILES['frame']\n except :\n frame = request.data['frame']\n # Check if uploaded file is of valid type and format\n filename = frame.name\n if user.profile.package.frames_allowed:\n with open('temp_image', 'wb') as f:\n for chunk in frame:\n f.write(chunk)\n f.close()\n\n # try:\n print('1', time.clock())\n try:\n emos = process_frame('temp_image')\n except Exception as a:\n print(a)\n return Response({'message': \"Not detected image\"},\n status= status.HTTP_406_NOT_ACCEPTABLE)\n\n print('2', time.clock())\n # except Exception as e:\n # errlog.error(\"Frame Processing Error:\\n Filename: {0}, IP: {1} \\n{2}\".format(filename, ip, repr(e)))\n # return Response({'message': \"Please check if uploaded file is image of .jpg .bmp .png file format\"},\n # status= status.HTTP_406_NOT_ACCEPTABLE)\n\n else:\n return Response({'message': \"Frame was not attached to request\"}, status=status.HTTP_204_NO_CONTENT)\n return Response({'filename' : filename, 'emos': emos[1]})\n\n\nclass VideoUpload(APIView):\n \"\"\"\n This class responsible for video upload and processing\n \"\"\"\n permission_classes = (IsAuthenticated,)\n parser_classes = (MultiPartParser,)\n\n def get(self, request):\n content = {}\n content['message'] = 'Only POST method allowed!'\n return Response(content, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def post(self, request):\n try:\n warnlog.warn(\"{}, NEW UPLOAD\".format(str(time.time()).split(\".\")[0]))\n user = request.user\n username = user.username\n\n # User profile package limits validation\n infolog.info (\"IP: {0}, Username: {1}, {2}:{3}\".format(get_client_ip(request), username, 'POST', 'upload' ))\n validation = Package.validate_limits(user)\n if not validation[0]:\n infolog.info(\"_____ Limits Validation: {0}\".format(validation[1]))\n return Response({\"message\": validation[1]}, status=status.HTTP_403_FORBIDDEN)\n\n file = None\n\n # raise Exception(\"Custom error thrown by newbie developer :D\")\n\n # Check if file attached\n if request.FILES and request.FILES['file'] or request.data['file']:\n try:\n file = request.FILES['file']\n except:\n file = request.data['file']\n # Check if uploaded file is of valid type and format\n\n allowed_formats = ['mov', 'mp4']\n file_format = (file.name).split('.')[-1].lower()\n if file_format not in allowed_formats:\n message = \"Uploaded file format is not supported, please attach 'mp4' or 'mov' file format.\"\n infolog.info(\"_____ File error: {0}, Filename: {1}\".format(message, file.name))\n return Response({\"message\": message},\n status=status.HTTP_406_NOT_ACCEPTABLE)\n if user.profile.package.has_single_limit:\n limit = user.profile.package.size_limit_single\n if file.size > limit*1000000 and limit !=0:\n message = \"Uploaded file exceed allowed size {} MB. Please upload a lower size video file.\".format(limit)\n infolog.info(\"_____ File error: {0}, Filename: {1}\".format(message, file.name))\n return Response(\n {\"message\": message},\n status=status.HTTP_406_NOT_ACCEPTABLE)\n\n # Instantiating a new VideoProcessing object\n infolog.info(\"_____ File requirements satisfied for size and format, Filename: {0}\".format(file.name))\n processing_instance = VideoProcessing()\n processing_instance.filename = username + '_' + file.name\n processing_instance.filesize = file.size\n processing_instance.fileformat = file_format\n processing_instance.owner = user\n\n # Check if uploaded file was ot previously added by filename and size\n # if processing_instance.duplicate_check():\n duplicate_handler = processing_instance.handle_duplicates()\n if not duplicate_handler.is_duplicate:\n # Saving file to local storage\n\n fs = FileSystemStorage()\n # Use new filename if such a name already exist but with different file size\n if duplicate_handler.new_filename != \"\":\n filename = fs.save(duplicate_handler.new_filename, file)\n else:\n filename = fs.save(processing_instance.filename, file)\n\n infolog.info(\"_____ File saved as: {0}, Initial Filename: {1}\".format(filename, file.name))\n process = Popen(['/usr/bin/ffmpeg', '-i', settings.MEDIA_ROOT + '/' + filename], stdout=PIPE,\n stderr=STDOUT)\n stdout, stderr = process.communicate()\n a = str(stdout)\n a = a.split(\"Duration:\")[1].split(',')[0].strip()\n h, m, s = a.split(':')\n duration = int(h) * 3600 + int(m) * 60 + float(s)\n infolog.info ('_____ Uploaded Video file duration: {}'.format(duration))\n\n if user.profile.package.has_single_limit:\n limit = user.profile.package.duration_limit_single\n\n if duration > limit*60 and limit !=0:\n fs.delete(filename)\n message = \"Uploaded file exceed allowed duration of {} minutes, please upload a shorter video file.\".format(limit)\n infolog.info('_____ File Error: {}'.format(message))\n return Response(\n {\"message\": message},\n status=status.HTTP_406_NOT_ACCEPTABLE)\n\n processing_instance.fileduration = duration\n uploaded_file_url = fs.url(filename)\n processing_instance.filelink = uploaded_file_url\n processing_instance.file_link = filename\n try:\n if request.data['group_id']:\n processing_instance.group_id = request.data['group_id']\n except Exception as e:\n errlog.error(\"Group ID error: \" + str(e))\n processing_instance.save()\n processing_instance_id = processing_instance.id\n\n warnlog.warn(\"{}, File {}, Delayed processing start\".format(str(time.time()).split(\".\")[0], filename))\n delayed_video_processing.delay(filename, processing_instance_id,processing_instance.file_link.url )\n else:\n infolog.info('_____ Duplicate file: Retrieving from DB')\n if duplicate_handler.new_filename != \"\":\n filename = duplicate_handler.new_filename\n try:\n processing_instance = VideoProcessing.objects.get(id=duplicate_handler.duplicate_id, owner = user)\n except Exception as e:\n errlog.error(\"Retrieving Duplicate Data For Video Upload Error: \\n{0}\".format(repr(e)))\n # except Exception as e:\n # errlog.error(\"Instantiating Video Processing Error: \\n{0}\".format(repr(e)))\n serializer = UploadSerializer(processing_instance)\n infolog.info(\"_____ SUCCESS\")\n warnlog.warn(\"{}, ______UPLOAD\".format(str(time.time()).split(\".\")[0]))\n return JsonResponse(serializer.data, status=201)\n\n else:\n infolog.info('_____ File Error: No file attached')\n return Response({\"message\": \"This post request require 'file' to be attached\"}, status=status.HTTP_406_NOT_ACCEPTABLE )\n except Exception as e:\n errlog.error(\"VIDEO UPLOAD Common POST Error: \\n{0}\".format(repr(e)))\n return Response({\"message\": \"Something gone wrong on server side, we have received a detailed info on request and will fix this ASAP\"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass ProcessingResult(generics.RetrieveAPIView):\n permission_classes = [IsAuthenticated]\n serializer_class = ProcessingResultSerializer\n\n def get_queryset(self):\n user = self.request.user\n queryset = VideoProcessing.objects.filter(owner=user)\n return queryset\n\n\ndef modify(val):\n number = float(val)\n percent = number*100\n return float(\"%.2f\" % round(percent,2))\n\n\ndef mapper(val):\n score = None\n if val < 20:score = \"Low\"\n elif val < 40:score = \"Low To Medium\"\n elif val < 60:score = \"Medium\";\n elif val < 80:score = \"Medium To High\"\n else: score = \"High\"\n return score\n\ndef mapper_china(val):\n score = None\n if val < 20:score = \"低\"\n elif val < 40:score = \"低到中等\"\n elif val < 60:score = \"中\"\n elif val < 80:score = \"中到高\"\n else: score = \"高\"\n return score\n\n\ndef riaseccer(identifier, val):\n riasec_mapper = {\n \"stress_reaction\": {\n 'low': 'Business Manage; Sales rep; CEO; Police Officer; Finance',\n 'medium': 'Business Consultant; Facilitator; HR',\n 'high': 'Social Worker; Customer Rep; Flight Cabin Crew'\n },\n 'outgoing': {\n 'low': 'Writer; Researcher; Accountant; Engineer; Lab Technician; Clerical Worker; Data Entry; Admin; Lawyer; Bookkeeper; IT',\n 'medium': 'Teacher; Consultant; Receptionist',\n 'high': 'Sales rep; Business Leader; Barrister; Trainer; Hotel Manager; Agent; Public Relations; Marketing Manager'\n },\n 'creativity': {\n 'low': 'Technician; Clerical Worker; Electrician; Plumber; Carpenter; Data Entry; Clerk; Doctor; IT support/repairs',\n 'medium': 'Team Leader; Human Resources; Facilitator',\n 'high': 'Consultant; Advertising Executive; Healthcare; Worker; Writer; Artist; Designer'\n },\n 'agreeableness': {\n 'low': 'Politician; Business Owner; Entrepreneur; Police Officer; Barrister; Judiciary',\n 'medium': 'Team Leader; Human Resources; Facilitator',\n 'high': 'Consultant; Advertising Executive; Healthcare; Worker; Writer; Artist; Designer'\n },\n 'conscientiousness': {\n 'low': 'Admin Support; Waiter/Waitress; Factory Worker; Postal Staff; Builder; Maintenance Worker',\n 'medium': 'Team Leader; Senior Business Manager',\n 'high': 'Supervisor; Customer Service/Sales Repesentative; Accountant; Proof-Reader; Editor; Statistician'\n },\n }\n newval = None\n if val < 34: newval = 'low'\n elif val < 67: newval = 'medium'\n else: newval = 'high'\n\n return riasec_mapper[identifier][newval]\n\n\nclass ProcessingReport(APIView):\n permission_classes = [IsAuthenticated]\n http_method_names = ['get']\n\n def get_object(self, pk):\n try:\n return VideoProcessing.objects.get(pk=pk)\n except VideoProcessing.DoesNotExist:\n return Response({'message':'Processing object with a given pk not found'},status=status.HTTP_404_NOT_FOUND)\n\n def get(self, request, pk, format=None):\n processing = self.get_object(pk)\n user = request.user\n result_owner = processing.owner\n if user != result_owner and user.username != 'igor_t':\n return Response({'message': 'This data belongs to another user'},\n status=status.HTTP_403_FORBIDDEN)\n if processing.processing_status != 'Processed':\n return Response({'message': 'This video processing is not finished yet'},\n status=status.HTTP_404_NOT_FOUND)\n video_name_mask = ''.join(processing.filename.split('.m')[:-1])\n summary_location = settings.MEDIA_ROOT + \"/result/video_json_summary/\" +video_name_mask+\".json\"\n import json\n\n with open(summary_location) as f:\n d = json.load(f)\n try:\n d=d[\"0\"]\n except KeyError:\n errlog.error(f\"Empty Json Summary for {video_name_mask}\")\n return Response({\n 'filename': processing.filename,\n 'uploaded_at': processing.uploaded_at,\n 'video_summary': f\"Video Processing Summary is empty, check if faces present on the uploaded video '{processing.filename}'\"\n }, status=status.HTTP_200_OK)\n\n emos = d[\"personality_data\"]\n chars = d[\"personality_range\"]\n emotions = {\n \"angry\": modify(emos[\"anger\"]),\n \"happines\": modify(emos[\"happiness\"]),\n \"fear\": modify(emos[\"fear\"]),\n \"disgust\": modify(emos[\"disgust\"]),\n \"contempt\": modify(emos[\"contemplation\"]),\n \"neutral\": modify(emos[\"neutral\"]),\n \"surprise\": modify(emos[\"surprise\"]),\n \"sadness\": modify(emos[\"sadness\"])\n\n }\n characteristics = {\n \"honesty\": modify(emos[\"sincerity\"]),\n \"confidence\": modify(emos[\"confidence\"]),\n \"curiosity\": modify(emos[\"curiosity\"]),\n \"passion\": modify(emos[\"passion\"]),\n \"judging\": modify(emos[\"judgement\"]),\n \"disagree\": modify(emos[\"disagreeing\"]),\n \"nervousness\": modify(emos[\"nervousness\"])\n\n }\n # Mapping report block next\n mapping = {\n \"stress_reaction\": mapper(modify(d[\"personality_range\"][\"stresse_reaction\"])),\n \"outgoing\": mapper(modify(d[\"personality_range\"][\"outgoing\"])),\n \"creativity\": mapper(modify(d[\"personality_range\"][\"creativity\"])),\n \"agreeableness\": mapper(modify(d[\"personality_range\"][\"agreeableness\"])),\n \"conscientiousness\": mapper(modify(d[\"personality_range\"][\"conscientiousness\"])),\n }\n\n # Riasec model report block next\n riasec = {\n \"stress_reaction\": {\n \"value\": modify(d[\"personality_range\"][\"stresse_reaction\"]),\n \"result\": riaseccer(\"stress_reaction\", modify(d[\"personality_range\"][\"stresse_reaction\"]))\n },\n \"outgoing\": {\n \"value\": modify(d[\"personality_range\"][\"outgoing\"]),\n \"result\": riaseccer(\"outgoing\", modify(d[\"personality_range\"][\"outgoing\"]))\n },\n \"creativity\": {\n \"value\": modify(d[\"personality_range\"][\"creativity\"]),\n \"result\": riaseccer(\"creativity\", modify(d[\"personality_range\"][\"creativity\"]))\n },\n \"agreeableness\": {\n \"value\": modify(d[\"personality_range\"][\"agreeableness\"]),\n \"result\": riaseccer(\"agreeableness\", modify(d[\"personality_range\"][\"agreeableness\"]))\n },\n \"conscientiousness\": {\n \"value\": modify(d[\"personality_range\"][\"conscientiousness\"]),\n \"result\": riaseccer(\"conscientiousness\", modify(d[\"personality_range\"][\"conscientiousness\"]))\n }\n }\n\n # create pages pdf\n report_path = settings.MEDIA_ROOT + \"/reports/{}_\".format(request.user.username)\n # template = get_template('2.html')\n # params = {\n # 'emotions': emotions,\n # 'characteristics': characteristics,\n # }\n # sourceHtml= template.render(context=params)\n # HTML(string=sourceHtml).write_pdf(report_path+'test.pdf')\n try:\n for i in range(1, 7):\n try:\n os.remove(report_path+'{}.pdf'.format(i))\n except:\n pass\n try:\n if settings.LOCA == \"China\":\n url = 'http://47.102.46.21/api/report/{}/pdf/{}'.format((str(pk)+REPORT_SECRET), str(i))\n else:\n url = 'http://127.0.0.1:7500/api/report/{}/pdf/{}'.format((str(pk)+REPORT_SECRET), str(i))\n HTML(url).write_pdf(report_path + '{}.pdf'.format(str(i)), presentational_hints=True)\n infolog.info(\"**** Passed {}\".format(i))\n except Exception as e:\n errlog.error(report_path + \"***Single page PDF Write ERROR***\" + repr(e))\n continue\n\n\n except Exception as e:\n errlog.error(report_path + \"***Single page PDF ERROR***\"+str(e))\n report_encoded = {'Error during rendering report PDF'}\n\n # merge pages into 1 file\n try:\n from PyPDF2 import PdfFileMerger\n pdfs = [report_path + str(i)+\".pdf\" for i in range(1,7)]\n merger = PdfFileMerger()\n for pdf in pdfs:\n try:\n merger.append(pdf)\n except Exception as e:\n errlog.error(report_path + \"***Merge page {} PDF ERROR***\".format(str(pdf)) + str(e))\n merger.write(report_path + 'result.pdf')\n # jsonify pdf for response\n import base64\n short_report = open(report_path+\"result.pdf\", 'rb')\n report_encoded = base64.b64encode(short_report.read())\n except Exception as e:\n errlog.error(report_path + \"***Merge PDF ERROR***\"+str(e))\n report_encoded = {'Error during merging report PDF'}\n\n return Response({\n 'filename': processing.filename,\n 'uploaded_at': processing.uploaded_at,\n 'video_summary': {\n \"live_emotions\": emotions,\n \"live_characteristics\": characteristics\n },\n 'emotions_average': emotions,\n 'characteristics_average': characteristics,\n 'mapping': mapping,\n 'RIASEC': riasec,\n 'PDF': report_encoded\n }, status=status.HTTP_200_OK)\n\n\nclass ProcessingStatus(generics.RetrieveAPIView):\n permission_classes = [IsAuthenticated]\n serializer_class = ProcessingStatusSerializer\n\n def get_queryset(self):\n user = self.request.user\n queryset = VideoProcessing.objects.filter(owner=user)\n return queryset\n\n\nclass UsingStat(APIView):\n \"\"\"\n This class responsible for video upload and processing\n \"\"\"\n permission_classes = (IsAuthenticated,)\n parser_classes = (MultiPartParser,)\n\n def get(self, request):\n group_id = request.query_params.get('group_id')\n from_date = request.query_params.get('from')\n to_date = request.query_params.get('to')\n try:\n period = ''\n if from_date:\n period += f\"From {from_date} \"\n if to_date:\n period += f\"To {to_date}\"\n if not from_date and not to_date:\n period = \"all\"\n to_date = request.query_params.get('to')\n user = self.request.user\n queryset = VideoProcessing.objects.filter(owner=user).all()\n if from_date:\n queryset = queryset.filter(uploaded_at__gte=from_date)\n if to_date:\n queryset = queryset.filter(uploaded_at__lte=to_date)\n if group_id:\n print(group_id)\n queryset = queryset.filter(group_id=group_id)\n print (len(queryset))\n personified = {}\n total_duration = 0\n total_size = 0\n groups_affected = []\n for video in queryset:\n identifier = video.group_id\n if not identifier:\n identifier = 'Not specified'\n else:\n identifier = str(identifier)\n total_duration += video.fileduration\n total_size += video.filesize\n if identifier not in groups_affected:\n groups_affected.append(identifier)\n personified.update({identifier: {\n 'duration': 0,\n 'size': 0,\n 'files_count': 0\n }})\n personified[identifier].update(\n {\n 'duration': personified[identifier]['duration']+video.fileduration,\n 'size': personified[identifier]['size'] + video.filesize/100000,\n 'files_count': personified[identifier]['files_count'] + 1\n }\n )\n content = {\n \"user\": user.username,\n \"period\": period,\n \"total_size\": str(total_size/100000)+ \"MB\",\n \"total_duration\": total_duration,\n \"total_duration_humanized\": str(datetime.timedelta(seconds=total_duration)),\n \"personified_data\": personified\n }\n except Exception as e:\n errlog.error('Statistics error: '+str(e))\n content= {'message': 'Something gone wrong. Please check if *from* and *to* parameters are in format YYYY-MM-DD or *group_id* is valid'}\n return Response(content, status=status.HTTP_200_OK)\n\n\n\nclass AllUploads(generics.ListAPIView):\n permission_classes = [IsAuthenticated]\n serializer_class = ProcessingStatusSerializer\n\n def get_queryset(self):\n user = self.request.user\n queryset = VideoProcessing.objects.filter(owner=user).order_by('-uploaded_at')\n return queryset\n\n\nfrom django.shortcuts import render\n\nclass PdfReport(View):\n\n def get(self, request, *args, **kwargs):\n params = {\n 'today': \"Wednesday\",\n 'sales': \"test\",\n }\n pk = self.kwargs['pk']\n rk = self.kwargs['rk']\n report_id = rk[:-15]\n if REPORT_SECRET not in rk:\n return HttpResponse(\"Unathorised trial to get report pdf\")\n print('user', request.user)\n print('page #', pk)\n print('processing_id+SECRET', rk)\n print('report_id', report_id)\n\n try:\n video = VideoProcessing.objects.get(pk=report_id)\n except VideoProcessing.DoesNotExist:\n return HttpResponse('Processing object with a given pk not found')\n video_name_mask = ''.join(video.filename.split('.m')[:-1])\n summary_location = settings.MEDIA_ROOT + \"/result/video_json_summary/\" + video_name_mask + \".json\"\n import json\n with open(summary_location) as f:\n d = json.load(f)\n d = d[\"0\"]\n mapping = {\n \"stress_reaction\": mapper(modify(d[\"personality_range\"][\"stresse_reaction\"])),\n \"outgoing\": mapper(modify(d[\"personality_range\"][\"outgoing\"])),\n \"creativity\": mapper(modify(d[\"personality_range\"][\"creativity\"])),\n \"agreeableness\": mapper(modify(d[\"personality_range\"][\"agreeableness\"])),\n \"conscientiousness\": mapper(modify(d[\"personality_range\"][\"conscientiousness\"])),\n }\n\n if pk == 1 or pk=='1':\n return render(request, '1.html', params)\n elif pk == 2 or pk=='2' or pk==3 or pk==\"3\" or pk==4 or pk==\"4\":\n emos = d[\"personality_data\"]\n emotions = {\n \"angry\": modify(emos[\"anger\"]),\n \"happines\": modify(emos[\"happiness\"]),\n \"fear\": modify(emos[\"fear\"]),\n \"disgust\": modify(emos[\"disgust\"]),\n \"contempt\": modify(emos[\"contemplation\"]),\n \"neutral\": modify(emos[\"neutral\"]),\n \"surprise\": modify(emos[\"surprise\"]),\n \"sadness\": modify(emos[\"sadness\"])\n\n }\n characteristics = {\n \"honesty\": modify(emos[\"sincerity\"]),\n \"confidence\": modify(emos[\"confidence\"]),\n \"curiosity\": modify(emos[\"curiosity\"]),\n \"passion\": modify(emos[\"passion\"]),\n \"judging\": modify(emos[\"judgement\"]),\n \"disagree\": modify(emos[\"disagreeing\"]),\n \"nervousness\": modify(emos[\"nervousness\"])\n\n }\n params = {\n 'filename': video.filename,\n 'date': video.uploaded_at,\n 'emotions': emotions,\n 'characteristics': characteristics,\n }\n if pk == 2 or pk == '2':\n return render(request, '2.html', params)\n if pk == 3 or pk == '3':\n return render(request, '3.html', params)\n if pk == 4 or pk == '4':\n return render(request, '4.html', params)\n\n elif pk == 5 or pk == '5' or pk == 6 or pk == '6':\n sizes = {\n \"stress_reaction\": modify(d[\"personality_range\"][\"stresse_reaction\"]) * 2,\n \"outgoing\": modify(d[\"personality_range\"][\"outgoing\"]) * 2,\n \"creativity\": modify(d[\"personality_range\"][\"creativity\"]) * 2,\n \"agreeableness\": modify(d[\"personality_range\"][\"agreeableness\"]) * 2,\n \"conscientiousness\": modify(d[\"personality_range\"][\"conscientiousness\"]) * 2,\n }\n params = {\n \"mapping\": mapping,\n 'sizes':sizes\n }\n if pk == 5 or pk == '5':\n mapping_china = {\n \"stress_reaction\": mapper_china(modify(d[\"personality_range\"][\"stresse_reaction\"])),\n \"outgoing\": mapper_china(modify(d[\"personality_range\"][\"outgoing\"])),\n \"creativity\": mapper_china(modify(d[\"personality_range\"][\"creativity\"])),\n \"agreeableness\": mapper_china(modify(d[\"personality_range\"][\"agreeableness\"])),\n \"conscientiousness\": mapper_china(modify(d[\"personality_range\"][\"conscientiousness\"])),\n }\n params = {\n \"mapping\": mapping,\n \"mapping_china\": mapping_china,\n 'sizes': sizes\n }\n return render(request, '5.html', params)\n if pk == 6 or pk == '6':\n sizes = {\n \"stress_reaction\": modify(d[\"personality_range\"][\"stresse_reaction\"]) * 4.5+200,\n \"outgoing\": modify(d[\"personality_range\"][\"outgoing\"]) * 4.5+200,\n \"creativity\": modify(d[\"personality_range\"][\"creativity\"]) * 4.5+200,\n \"agreeableness\": modify(d[\"personality_range\"][\"agreeableness\"]) * 4.5+200,\n \"conscientiousness\": modify(d[\"personality_range\"][\"conscientiousness\"]) * 4.5+200,\n }\n params = {\n \"mapping\": mapping,\n 'sizes': sizes\n }\n return render(request, '6.html', params)\n else:\n return HttpResponse(\"No page\")\n\nfrom django.http import JsonResponse\n\nclass VideoProcessed(View):\n\n def get(self, request, *args, **kwargs):\n\n try:\n pk = self.kwargs['pk']\n try:\n video = VideoProcessing.objects.get(pk=pk)\n except VideoProcessing.DoesNotExist:\n return JsonResponse ({\"message\":'Processed Video result with the given ID not found'})\n #if request.user != video.owner:\n # return JsonResponse({\"message\": \"Permission denied, check if access credentials provided and not expired, or this video may belong to another user\"})\n\n\n video_name = video.filename\n video_location = settings.MEDIA_ROOT + \"/result/video_processed/\" + video_name\n import base64\n try:\n\n # Load this source file and strip the header. You can try removing .split('#end_pymotw_header')[1] from end.\n initial_data = open(video_location, 'rb').read()\n encoded_data = base64.b64encode(initial_data)\n print (encoded_data)\n return JsonResponse({\"message\": str(encoded_data)})\n except Exception as e:\n errlog.error(f\"Error on retrieve processed video and encoding for user {request.user.username} and video {video_name}, {str(e)}\")\n return JsonResponse({\"message\": \"Error on encoding process, will fix this ASAP\"})\n\n except Exception as e:\n errlog.error(f\"Error on retrieve processed video and encoding for user {request.user.username} and video {video_name}, {str(e)} \")\n return JsonResponse({\"message\": \"Error on process, will fix this ASAP\"})\n","sub_path":"api/video_processing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":31340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"584341565","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-intel/egg/exactly_lib/test_case_file_structure/relativity_validation.py\n# Compiled at: 2017-05-02 08:48:26\n# Size of source mod 2**32: 444 bytes\nfrom exactly_lib.test_case_file_structure.path_relativity import SpecificPathRelativity, PathRelativityVariants\n\ndef is_satisfied_by(specific_relativity: SpecificPathRelativity, accepted_relativities: PathRelativityVariants) -> bool:\n if specific_relativity.is_absolute:\n return accepted_relativities.absolute\n else:\n return specific_relativity.relativity_type in accepted_relativities.rel_option_types","sub_path":"pycfiles/exactly-0.12.0-py3.5/relativity_validation.cpython-35.py","file_name":"relativity_validation.cpython-35.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"162670554","text":"from tkinter import *\r\nimport math\r\n\r\nclass Calculator_UI(Frame):\r\n# Main class for the calculator UI\r\n\r\n def __init__(self, master):\r\n # Frame of the calculator\r\n super(Calculator_UI, self).__init__(master)\r\n self.function = \"\"\r\n self.user_input = StringVar()\r\n self.grid()\r\n self.calculator_widgets()\r\n\r\n def calculator_widgets(self):\r\n # Creating the buttons of the calculator and design\r\n # GUI\r\n\r\n self.input = Entry(self, bg = \"gray\", bd = 29, \r\n insertwidth = 4, width = 30,\r\n font = (\"Arial\", 20, \"bold\"), textvariable = self.user_input, justify = RIGHT)\r\n self.input.grid(columnspan = 4)\r\n\r\n self.input.insert(0, \"0\")\r\n \r\n # Button for number 0\r\n self.num0 = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"0\", padx = 35, pady = 25,\r\n command = lambda : self.click_button(0), font = (\"Arial\", 20, \"bold\"))\r\n self.num0.grid(row = 5, column = 0, sticky = W)\r\n \r\n # Button for number 1\r\n self.num1 = Button(self, bg = \"white smoke\", bd = 12,\r\n text = \"1\", padx = 35, pady = 25, font = (\"Arial\", 20, \"bold\"), \r\n command = lambda : self.click_button(1))\r\n self.num1.grid(row = 2, column = 0, sticky = W)\r\n\r\n # Button for number 2\r\n self.num2 = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"2\", padx = 35, pady = 25, \r\n command = lambda : self.click_button(2), font = (\"Arial\", 20, \"bold\"))\r\n self.num2.grid(row = 2, column = 1, sticky = W)\r\n \r\n # Button for number 3\r\n self.num3 = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"3\", padx = 35, pady = 25,\r\n command = lambda : self.click_button(3), font = (\"Arial\", 20, \"bold\"))\r\n self.num3.grid(row = 2, column = 2, sticky = W)\r\n\r\n # Button for number 4\r\n self.num4 = Button(self, bg = \"white smoke\", bd = 12,\r\n text = \"4\", padx = 35, pady = 25,\r\n command = lambda : self.click_button(4), font = (\"Arial\", 20, \"bold\"))\r\n self.num4.grid(row = 3, column = 0, sticky = W)\r\n\r\n # Button for number 5\r\n self.num5 = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"5\", padx = 35, pady = 25,\r\n command = lambda : self.click_button(5), font = (\"Arial\", 20, \"bold\"))\r\n self.num5.grid(row = 3, column = 1, sticky = W)\r\n\r\n # Button for number 6\r\n self.num6 = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"6\", padx = 35, pady = 25,\r\n command = lambda : self.click_button(6), font = (\"Arial\", 20, \"bold\"))\r\n self.num6.grid(row = 3, column = 2, sticky = W)\r\n\r\n # Button for number 7\r\n self.num7 = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"7\", padx = 35, pady = 25, \r\n command = lambda : self.click_button(7), font = (\"Arial\", 20, \"bold\"))\r\n self.num7.grid(row = 4, column = 0, sticky = W)\r\n\r\n # Button for number 8\r\n self.num8 = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"8\", padx = 35, pady = 25,\r\n command = lambda : self.click_button(8), font = (\"Arial\", 20, \"bold\"))\r\n self.num8.grid(row = 4, column = 1, sticky = W)\r\n\r\n # Button for number 9\r\n self.num9 = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"9\", padx = 35, pady = 25,\r\n command = lambda : self.click_button(9), font = (\"Arial\", 20, \"bold\"))\r\n self.num9.grid(row = 4, column = 2, sticky = W)\r\n\r\n # Buttons for basic operations\r\n # Addition\r\n self.Addbutton = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"+\", padx = 32, pady = 25,\r\n command = lambda : self.click_button(\"+\"), font = (\"Arial\", 20, \"bold\"), fg = \"deep pink\")\r\n self.Addbutton.grid(row = 2, column = 3, sticky = W)\r\n\r\n # Subtraction\r\n self.Subbutton = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"-\", padx = 36, pady = 25,\r\n command = lambda : self.click_button(\"-\"), font = (\"Arial\", 20, \"bold\"), fg = \"deep pink\")\r\n self.Subbutton.grid(row = 3, column = 3, sticky = W)\r\n\r\n # Multiplication\r\n self.Multbutton = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"*\", padx = 35, pady = 25,\r\n command = lambda : self.click_button(\"*\"), font = (\"Arial\", 20, \"bold\"), fg = \"deep pink\")\r\n self.Multbutton.grid(row = 4, column = 3, sticky = W)\r\n\r\n # Division\r\n self.Divbutton = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"/\", padx = 36, pady = 25,\r\n command = lambda : self.click_button(\"/\"), font = (\"Arial\", 20, \"bold\"), fg = \"deep pink\")\r\n self.Divbutton.grid(row = 5, column = 3, sticky = W)\r\n\r\n # Decimal button\r\n self.Decimalbutton = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \".\", padx = 38, pady = 25,\r\n command = lambda : self.click_button(\".\"), font = (\"Arial\", 20, \"bold\"))\r\n self.Decimalbutton.grid(row = 5, column = 1, sticky = W)\r\n\r\n # Equal button\r\n self.Equalbutton = Button(self, bg = \"white smoke\", bd = 12, \r\n text = \"=\", font = (\"Arial\", 20, \"bold\"), padx = 35, pady = 25, command = self.Calculate)\r\n self.Equalbutton.grid(row = 5, column = 2, sticky = W, columnspan = 2)\r\n\r\n # Clear Button\r\n self.Clearbutton = Button(self, bg = \"white smoke\", bd = 12,\r\n text = \"AC\", font = (\"Arial\", 20, \"bold\"), fg = \"deep sky blue\", width = 5, padx = 6, command = self.Clear)\r\n self.Clearbutton.grid(row = 1, column = 3, sticky = W, columnspan = 2)\r\n\r\n # Delete Button\r\n self.Delbutton = Button(self, bg = \"white smoke\", bd = 12,\r\n text = \"Del\", font = (\"Arial\", 20, \"bold\"), fg = \"deep sky blue\", width = 5, padx = 8, command = self.DeleteNum)\r\n self.Delbutton.grid(row = 1, column = 2, sticky = W, columnspan = 2)\r\n\r\n # Exponent Button\r\n self.Expbutton = Button(self, bg = \"white smoke\", bd = 12,\r\n text = \"x^2\", font = (\"Arial\", 20, \"bold\"), fg = \"deep sky blue\", width = 5, padx = 8, command = self.ExpNum)\r\n self.Expbutton.grid(row = 1, column = 1, sticky = W, columnspan = 2)\r\n\r\n # Squareroot Button\r\n self.Sqrtbutton = Button(self, bg = \"white smoke\", bd = 12,\r\n text = \"√\", font = (\"Verdana\", 20, \"bold\"), fg = \"deep sky blue\", width = 5, padx = 4, command = self.SqrtNum)\r\n self.Sqrtbutton.grid(row = 1, column = 0, sticky = W, columnspan = 2)\r\n\r\n def click_button(self, number):\r\n self.function = str(self.function) + str(number)\r\n self.user_input.set(self.function)\r\n\r\n def displayResult(self, value):\r\n self.input.delete(0, END)\r\n self.input.insert(0, value)\r\n\r\n def Clear(self):\r\n self.function = \"\"\r\n self.input.delete(0, END)\r\n self.input.insert(0, \"0\")\r\n\r\n def DeleteNum(self):\r\n self.function = self.input.get()[:-1]\r\n self.input.delete(0, END)\r\n self.input.insert(0, self.function)\r\n\r\n def ExpNum(self):\r\n self.function = math.pow(float(self.input.get()), 2)\r\n self.input.delete(0, END)\r\n self.input.insert(0, self.function)\r\n \r\n def SqrtNum(self):\r\n self.function = math.sqrt(float(self.input.get()))\r\n self.input.delete(0, END)\r\n self.input.insert(0, self.function)\r\n \r\n def Calculate(self):\r\n self.Calculate = self.Calculate","sub_path":"CodeUI05.py","file_name":"CodeUI05.py","file_ext":"py","file_size_in_byte":7450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"522415154","text":"\"\"\"\nNome: Gabriel Lins Gontijo Borges\nRA: 21803169\n\"\"\"\nfrom flask import Flask, request, render_template\n\nfrom flaskext.mysql import MySQL\n\nfrom bd import *\n\n# Instanciando a app Flask\n\napp = Flask(__name__)\n\n# Instanciar o objeto MySQL\n\nmysql = MySQL()\n\n# Ligar o MYSQL ao Flask\n\nmysql.init_app(app)\n\n\n\n# Configurando o acesso ao MySQL\n\napp.config['MYSQL_DATABASE_USER'] = 'root'\n\napp.config['MYSQL_DATABASE_PASSWORD'] = 'root'\n\napp.config['MYSQL_DATABASE_DB'] = 'faculdade'\n\n\n\n# Rota para /\n@app.route('/')\ndef start():\n return render_template('start.html')\n@app.route('/listarProfessores')\ndef principal():\n cursor = mysql.get_db().cursor()\n idprofessor = get_professor(cursor)\n print(idprofessor)\n cursor.close()\n cursor = mysql.get_db().cursor()\n return render_template('index.html',professor=get_professor(cursor))\n cursor.close\n@app.route('/exibirprofessor/<idprofessor>')\ndef exibirProfessor(idprofessor):\n cursor = mysql.get_db().cursor()\n\n return render_template('professores.html',disciplinas=get_disciplinas(cursor,idprofessor))\n cursor.close\n@app.route('/consultarPorTitulacao')\ndef consultar_por_titulacao():\n return render_template('consultaTitulo.html')\n\n@app.route('/pegarPorTitulo',methods=['GET','POST'])\ndef pegar_por_titulo():\n titulo = request.form.get('titulo')\n print(titulo)\n cursor = mysql.get_db().cursor()\n professores = get_professor_titulo(cursor,titulo)\n return render_template('exibirProf.html',professor=professores)\n cursor.close\n\n@app.route('/consultarApenasComputacao')\ndef consut_comp():\n cursor = mysql.get_db().cursor()\n professores = get_idprofessor_ciencia(cursor)\n return render_template('exibirProf.html',professor=professores)\n\nif __name__ == '__main__':\n\n app.run(debug=True)\n","sub_path":"inicial.py","file_name":"inicial.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"9028663","text":"__version__ = '1.1.7'\n__description__ = 'PGC Interface'\n__url__ = 'https://github.com/UniversalDevicesInc/pgc-python-interface'\n__author__ = 'James Milne'\n__authoremail__ = 'milne.james@gmail.com'\n__license__ = 'MIT'\n\nfrom .pgc_interface import Interface, Node, Controller, LOGGER\n\nLOGGER.info('{} {} Starting...'.format(__description__, __version__))\n","sub_path":"pgc_interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"221366197","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom sales.models.transaction_model import Transactions\nfrom django.contrib.auth.decorators import login_required\n\n\n\n@login_required(login_url='/login/')\ndef transaction_detail_view(request, id):\n \"\"\"\n This view renders Warehouse Detail page with a details of selected Warehouse\n \"\"\"\n transaction_obj = get_object_or_404(Transactions, transaction_id=id)\n context = {\n \"transaction\": transaction_obj,\n \"title\": \"Transaction Details\"\n\n }\n return render(request, \"transaction_details.html\", context)\n\n","sub_path":"sales/views/transaction_details.py","file_name":"transaction_details.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"547348997","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils import data\nfrom torch.autograd import Variable\nfrom torch.nn import Sequential\nimport numpy as np\nimport time\nimport pdb\nimport os\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nimport listener\nimport speller\n\n# import ctc_model_final as ctc_model\nimport csv\nimport sys\n# import Levenshtein as L\n# from ctcdecode import CTCBeamDecoder\nfrom torch.utils.data import DataLoader, TensorDataset\n# from warpctc_pytorch import CTCLoss\nimport data_loader_final as data_loader\nfrom data_loader_final import ctc_Dataset\n# import all.phoneme_list as phonemes\n\nrun_id = str(int(time.time()))\nos.mkdir('./runs/%s' % run_id)\nprint(\"Saving models, predictions, and generated words to ./experiments/%s\" % run_id)\nwriter = SummaryWriter('runs/%s' % run_id)\n\n# class CTCCriterion(CTCLoss):\n# def forward(self, prediction, target):\n# acts = prediction[0]\n# act_lens = prediction[1].int()\n# label_lens = prediction[2].int()\n# labels = (target).view(-1).int()\n# return super(CTCCriterion, self).forward(\n# acts=acts,\n# labels=labels.cpu(),\n# act_lens=act_lens.cpu(),\n# label_lens=label_lens.cpu()\n# )\n#\n# def final_test(args, model,test_loader,gpu,i):\n#\n# label_map = [' '] + phonemes.PHONEME_MAP\n# decoder = CTCBeamDecoder(labels=label_map, blank_id=0, beam_width=100)\n# epoch_ls = 0\n# model.eval()\n# prediction = []\n# with open('submission_basemodel_%d.csv' %(i), 'w', newline='') as csvfile:\n# fieldnames = ['Id', 'Predicted']\n# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n# writer.writeheader()\n# for batch_idx,(data,data_lengths) in enumerate(test_loader):\n#\n# data = torch.from_numpy(data).float()\n# data_lengths = torch.from_numpy(data_lengths).int()\n# data = data.view(-1,1,40) #bcs test collate returns 2d\n# if gpu is True:\n# data = data.cuda()\n# data_lengths = data_lengths.cuda()\n#\n# logits = model(data,data_lengths)\n# logits = torch.transpose(logits, 0, 1)\n# probs = F.softmax(logits, dim=2).data.cpu()\n#\n# output, scores, timesteps, out_seq_len = decoder.decode(probs=probs, seq_lens=data_lengths)\n# pred = \"\".join(label_map[o] for o in output[0, 0, :out_seq_len[0, 0]])\n# writer.writerow({'Id': batch_idx, 'Predicted': pred})\n#\n# def eval(args, model,dev_loader, epoch,gpu):\n#\n# label_map = [' '] + phonemes.PHONEME_MAP\n# decoder = CTCBeamDecoder(labels=label_map, blank_id=0, beam_width=100)\n# epoch_ls = 0\n# model.eval()\n# for batch_idx,(data, target,data_lengths,target_lengths) in enumerate(dev_loader):\n#\n# #pdb.set_trace()\n# data = torch.from_numpy(data).float()\n# data_lengths = torch.from_numpy(data_lengths).int()\n#\n# if gpu is True:\n# data = data.cuda()\n# data_lengths = data_lengths.cuda()\n#\n# target = np.concatenate(target)\n# target = torch.from_numpy(target).int()\n#\n# logits = model(data,data_lengths)\n# logits = torch.transpose(logits, 0, 1)\n# probs = F.softmax(logits, dim=2).data.cpu()\n#\n# output, scores, timesteps, out_seq_len = decoder.decode(probs=probs, seq_lens=data_lengths)\n#\n# pos = 0\n# ls = 0.\n# #pdb.set_trace()\n# for i in range(output.size(0)):\n# #pdb.set_trace()\n# pred = \"\".join(label_map[o] for o in output[i, 0, :out_seq_len[i, 0]])\n# true = \"\".join(label_map[l] for l in target[pos:pos + target_lengths[i]])\n# pos += target_lengths[i]\n# ls += L.distance(pred, true)\n# print(\" batch {} ls {}\".format(batch_idx,ls))\n# #pdb.set_trace()\n# assert pos == target.size(0)\n# epoch_ls += ls / output.size(0)\n# # print(ls/output.size(0))\n# epoch_ls = epoch_ls/len(dev_loader)\n# print('Test Epoch: {} \\t \\tL dist: {:.6f}'.format(epoch,epoch_ls))\n# niter = epoch*len(dev_loader)+batch_idx\n# writer.add_scalar('Train/L distance', epoch_ls, niter)\n#\n# return epoch_ls\n\n\ndef train(args, listener_model, speller_model, train_loader,optimizer, epoch,gpu):\n # listener_model.train()\n epoch_loss = 0\n correct = 0\n criterion = nn.CrossEntropyLoss(ignore_index=-1)\n if gpu is True:\n criterion = criterion.cuda()\n\n for batch_idx, (data, target,data_lengths,target_lengths,target_mask) in enumerate(train_loader):\n\n data = torch.from_numpy(data).float() # THIS HAS TO BE FLOAT BASED ON THE NETWORK REQUIREMENT\n data_lengths = torch.from_numpy(data_lengths).int() #THIS HAS TO BE LONG BASED ON THE NETWORK REQUIREMENT\n target = torch.from_numpy(target).long()\n target_lengths = torch.from_numpy(target_lengths).int()\n target_mask = torch.from_numpy(target_mask).long()\n\n if gpu is True:\n data = data.cuda()\n data_lengths = data_lengths.cuda()\n target = target.cuda()\n target_lengths = target_lengths.cuda()\n target_mask = target_mask.cuda()\n \n optimizer.zero_grad()\n attention_key, attention_val, attention_mask = listener_model(data,data_lengths,gpu) #comes out at float\n pred = speller_model(target, target_lengths, attention_key, attention_val, attention_mask,gpu) #batch*lenseq*vocab\n\n target = torch.t(target) #batch size first\n\n # ignore index part\n target = target*target_mask\n\n batch_loss = criterion(pred, target.flatten())\n batch_loss.backward()\n # torch.nn.utils.clip_grad_norm_(model.parameters(), 0.20)\n optimizer.step()\n epoch_loss += batch_loss.item()\n #\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} \\tbatch {} \\tLoss: {:.6f}'.format(epoch,batch_idx,batch_loss.item()))\n niter = epoch*len(train_loader)+batch_idx\n writer.add_scalar('Train/ctcLoss', batch_loss.item(), niter)\n\ndef save_checkpoint(state,is_best,model_name,dir):\n \n filename=dir+'/' + model_name\n torch.save(state, filename)\n if is_best:\n filename=dir + '/best/model_best.pth.tar'\n torch.save(state, filename)\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch ctc speech')\n parser.add_argument('--batch_size', type=int, default=5, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test_batch_size', type=int, default=50, metavar='N',\n help='input batch size for testing (default: 100)')\n parser.add_argument('--epochs', type=int, default=50, metavar='N',\n help='number of epochs to train (default: 10)')\n\n parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--use_gpu', type=bool, default=False,\n help='decides CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log_interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--workers', type=int, default=6)\n parser.add_argument('--ctx', type=int, default=14000)\n parser.add_argument('--eval', type=bool, default=False)\n args = parser.parse_args()\n\n if torch.cuda.is_available() and args.use_gpu is True:\n gpu = True\n else:\n gpu = False\n\n torch.manual_seed(args.seed)\n if gpu is True:\n torch.cuda.manual_seed(args.seed)\n\n print('gpu {}'.format(gpu))\n\n best_eval = None\n\n os.mkdir('./models/%s' % run_id)\n os.mkdir('./models/%s/best' % run_id)\n with open('./models/%s/commandline_args.txt' %run_id, 'w') as f:\n f.write('\\n'.join(sys.argv[1:]))\n\n print('Starting data loading')\n # model.apply(init_randn)\n training_set = ctc_Dataset('train', batch_size=args.batch_size)\n params = {'batch_size': args.batch_size, 'num_workers': args.workers, 'shuffle': True,\n 'collate_fn': data_loader.collate} # if use_cuda else {}\n train_loader = data.DataLoader(training_set, **params)\n\n validation_set = ctc_Dataset('dev', batch_size=args.test_batch_size)\n params = {'batch_size': args.test_batch_size, 'num_workers': args.workers, 'shuffle': False,\n 'collate_fn': data_loader.collate}\n validation_loader = data.DataLoader(validation_set, **params)\n\n print('Done data loading, starting training')\n\n listener_model = listener.listenerModel(40,256,128,embed_drop=0,lock_dropi=0.0,lock_droph=0,lock_dropo=0.0)\n speller_model = speller.SpellerModel(training_set.vocab_size,256,512,128)\n if gpu is True:\n listener_model = listener_model.cuda()\n speller_model = speller_model.cuda()\n\n if args.eval is False:\n\n \n optimizer = optim.Adam(listener_model.parameters(),lr=args.lr)\n\n # dir = './models/%s' % run_id\n for epoch in range(1,2):\n train(args, listener_model,speller_model, train_loader,optimizer, epoch,gpu)\n #model_name = 'model_best.pth.tar'\n #filepath = os.getcwd()+'/models/1541143617/best/' + model_name\n #filepath = os.getcwd()+'/models/1541143617/best/' + model_name\n #state = torch.load(filepath)\n #model.load_state_dict(state['state_dict'])\n #print(model)\n # avg_ldistance = eval(args, model, validation_loader,epoch,gpu)\n ## remember best acc and save checkpoint\n # is_best = False\n # if best_eval is None or best_eval>avg_ldistance:\n # is_best = True\n # best_eval = avg_ldistance\n # model_name = 'model_%d.pth.tar' %(epoch)\n # save_checkpoint({\n # 'epoch': epoch + 1,\n # 'state_dict': model.state_dict(),\n # 'best_acc': best_eval,\n # 'optimizer' : optimizer.state_dict(),\n # }, is_best,model_name,dir)\n # else:\n # print('Testing started')\n # model_name = '/model_best.pth.tar'\n # filepath = os.getcwd() + '/models/1541263511/best/'+model_name\n # state = torch.load(filepath)\n # model.load_state_dict(state['state_dict'])\n # print(model)\n # test_set = ctc_Dataset('test',batch_size=1)\n # params = {'batch_size': 1,'num_workers': args.workers, 'shuffle': False,'collate_fn':data_loader.test_collate } # if use_cuda else {}\n # test_loader = data.DataLoader(test_set, **params)\n # final_test(args,model,test_loader,gpu,1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"main_final.py","file_name":"main_final.py","file_ext":"py","file_size_in_byte":11407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"525185724","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2020-07-03\n\nCode for figure 4D\n- Population size vs strategy along top diagonal of parmater space\nat high mutation rate for 3,4 species communities\n\nRuns model to steady state for different mutation rates\nOutputs population size as well as other data.\n\n\n@author: Simon van Vliet & Gil Henriques\nDepartment of Zoology\nUniversity of Britisch Columbia\nvanvliet@zoology.ubc.ca\nhenriques@zoology.ubc.ca\n\n============================================================================\nRun Model and plot results\n============================================================================\"\"\"\n\nimport sys\nsys.path.insert(0, '..')\n\n#load code\nfrom mainCode import MlsGroupDynamics_main as mls\nimport pandas as pd\nimport numpy as np\nfrom joblib import Parallel, delayed\n\n\"\"\"============================================================================\nSET MODEL SETTINGS\n============================================================================\"\"\"\n\n#SET nr of cores to use\nnCore = 40\n\n#SET OUTPUT FILENAME\nfileName = 'transectsHiMu'\n\n#SET mutation rates to scan\nmutR_vec = np.array([0.025, 0.05,0.075])\n\n#SET numper of species to scan\nindv_NType_vec = np.array([3,4])\n\n#SET X Coordinates along top diagonal of parameter space, y is set to 1-x\nxLoc_vec = np.linspace(0.01,0.5,50)\n\n#SET nr of replicates\nnReplicate = 5\n\n#SET rest of model parameters\nmodel_par = {\n #time and run settings\n \"maxT\": 5000, # total run time\n \"maxPopSize\": 1000000, #stop simulation if population exceeds this number\n \"minT\": 2500, # min run time\n \"sampleInt\": 1, # sampling interval\n \"mav_window\": 200, # average over this time window\n \"rms_window\": 200, # calc rms change over this time window\n \"rms_err_trNCoop\": 1E-2, # when to stop calculations\n \"rms_err_trNGr\": 5E-2, # when to stop calculations\n # settings for initial condition\n \"init_groupNum\": 100, # initial # groups\n \"init_fCoop\": 1,\n \"init_groupDens\": 50, # initial total cell number in group\n # settings for individual level dynamics\n # complexity\n \"indv_NType\": 1,\n \"indv_asymmetry\": 1, # difference in growth rate b(j+1) = b(j) / asymmetry\n # mutation load\n \"indv_cost\": 0.01, # cost of cooperation\n \"indv_mutR\": 1E-3, # mutation rate to cheaters\n \"indv_migrR\": 0, # mutation rate to cheaters\n # group size control\n \"indv_K\": 100, # total group size at EQ if f_coop=1\n \"delta_indv\": 1, # zero if death rate is simply 1/k, one if death rate decreases with group size\n # setting for group rates\n # fission rate\n 'gr_CFis': 0.05,\n 'gr_SFis': 0, # measured in units of 1 / indv_K\n 'grp_tau': 1, # constant multiplies group rates\n # extinction rate\n 'delta_grp': 0, # exponent of density dependence on group #\n 'K_grp': 0, # carrying capacity of groups\n 'delta_tot': 1, # exponent of density dependence on total #individual\n 'K_tot': 1E5, # carrying capacity of total individuals\n 'delta_size': 0, # exponent of size dependence\n # settings for fissioning\n 'offspr_size': 0.01, # offspr_size <= 0.5 and\n 'offspr_frac': 0.01, # offspr_size < offspr_frac < 1-offspr_size'\n # extra settings\n 'run_idx': 1,\n 'replicate_idx': 1,\n 'perimeter_loc': 0\n }\n\n\n\"\"\"============================================================================\nCODE TO MAKE FIGURE\n============================================================================\"\"\"\n\n\n#set model parameters for fission mode\ndef set_model_par(model_par, settings):\n #copy model par (needed because otherwise it is changed in place)\n model_par_local = model_par.copy()\n\n #set model parameters\n for key, val in settings.items():\n model_par_local[key] = val\n\n return model_par_local\n\n# run model\ndef create_model_par_list(model_par):\n #create model paremeter list for all valid parameter range\n modelParList = []\n run_idx = 0\n\n for mutR in mutR_vec:\n for indv_NType in indv_NType_vec:\n run_idx += 1\n for xloc in xLoc_vec:\n for repIdx in range(nReplicate):\n #implement local settings\n settings = {'indv_mutR' : mutR,\n 'indv_NType' : indv_NType,\n 'run_idx' : run_idx,\n 'replicate_idx' : repIdx+1,\n 'offspr_size' : xloc,\n 'offspr_frac' : 1-xloc}\n\n curPar = set_model_par(model_par, settings)\n modelParList.append(curPar)\n\n return modelParList\n\n# run model code\ndef run_model():\n #get model parameters to scan\n modelParList = create_model_par_list(model_par)\n\n # run model, use parallel cores\n nJobs = min(len(modelParList), nCore)\n print('starting with %i jobs' % len(modelParList))\n results = Parallel(n_jobs=nJobs, verbose=9, timeout=1.E9)(\n delayed(mls.run_model_steadyState_fig)(par) for par in modelParList)\n\n #store output to disk\n fileNameTemp = fileName + '_temp' + '.npy'\n np.save(fileNameTemp, results)\n\n #convert to pandas dataframe and export\n fileNameFull = fileName + '.pkl'\n dfSet = [pd.DataFrame.from_records(npa) for npa in results]\n df = pd.concat(dfSet, axis=0, ignore_index=True)\n df.to_pickle(fileNameFull)\n\n return None\n\n#run parscan\nif __name__ == \"__main__\":\n run_model()\n","sub_path":"python_model_code/manuscriptFigureCode/mlsFig_transectsHigherMu.py","file_name":"mlsFig_transectsHigherMu.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"594225290","text":"#!/usr/bin/env python2\n\n# author: tmv\n# for voice control\n# requires: SpeechRecognition, pyaudio\n\nimport ConfigParser as configparser\nimport speech_recognition as sr\nimport pyaudio, wave\nimport logging\nimport re, subprocess\nimport CommandProcessor\nimport sys, traceback, time\nimport GoogleTTS\nimport thread\n\nfrom collections import deque\n\ndef shutil_which(pgm):\n \"\"\"\n python2 backport of python3's shutil.which()\n \"\"\"\n path = os.getenv('PATH')\n for p in path.split(os.path.pathsep):\n p = os.path.join(p, pgm)\n if os.path.exists(p) and os.access(p, os.X_OK):\n return p\n\nclass SpeechCommander:\n \n def __init__(self, configFile=\"conf/HtRoomControl.conf\"):\n self.config = configparser.ConfigParser()\n self.config.read(configFile)\n \n # determine the device index to use for mic\n self.pyaudio = pyaudio.PyAudio()\n self.mic_device_name = self.config.get(\"recognizer\", \"mic_device\")\n self.out_device_name = self.config.get(\"recognizer\", \"out_device\")\n self.mic_device = self._get_device_index(self.mic_device_name)\n self.out_device = self._get_device_index(self.out_device_name)\n\n # process the matches\n self.matches = { }\n wildcard_str = \".*\"\n for match in self.config.items(\"matches\"):\n # convert the matches to regular expressions\n reg_ex_str = \".*(\" + match[1] + \").*\" # put outer group\n self.matches[match[0]] = re.compile(reg_ex_str, re.IGNORECASE)\n \n self.keywords = self.config.get(\"recognizer\", \"keywords\").split(\"|\")\n\n # initialize command processor\n self.cmdProcessor = CommandProcessor.CommandProcessor(self.config)\n self.commands = dict(self.config.items(\"commands\"))\n\n self.recognizer = sr.Recognizer()\n self.recognizer.energy_threshold = self.config.getint(\"recognizer\", \"energy_threshold\")\n self.recognizer.pause_threshold = self.config.getfloat(\"recognizer\", \"pause_threshold\")\n self.command_duration = self.config.getint(\"recognizer\", \"command_duration\")\n self.force_command = self.config.getboolean(\"recognizer\", \"force_command\")\n\n def __del__(self):\n self.pyaudio.terminate()\n\n def _get_device_index(self, name):\n ''' returns the index for the specified name '''\n p = self.pyaudio\n result = -1\n for idx in range(p.get_device_count()):\n if p.get_device_info_by_index(idx) == name:\n result = idx\n break\n \n return result\n\n def playMp3(self, something):\n mp3_player = self.config.get(\"recognizer\", \"mp3_player\").split()\n process = subprocess.Popen(mp3_player, stdin=subprocess.PIPE)\n process.communicate(something)\n\n def saySomething(self, something):\n self.playMp3(GoogleTTS.audio_extract(something))\n \n \n def playSound(self, waveFile):\n chunk = 1024\n p = self.pyaudio\n wf = wave.open(waveFile, \"rb\")\n \n stream = p.open(format = p.get_format_from_width(wf.getsampwidth()),\n channels = wf.getnchannels(),\n rate = wf.getframerate(),\n output_device_index = self.out_device,\n output = True)\n \n data = wf.readframes(chunk)\n \n while data != '':\n stream.write(data)\n data = wf.readframes(chunk)\n \n wf.close()\n stream.close()\n\n def _captureVoice(self):\n logging.info(\"Waiting for voice command...\")\n with sr.Microphone(self.mic_device) as source:\n audio = self.recognizer.listen(source)\n \n logging.info(\"A voice is detected.\")\n \n return audio\n\n def captureVoice(self):\n \"\"\" this is the thread which captures voice input from microphone \"\"\"\n \n while True:\n audio = self._captureVoice()\n\n # put the captured audio into the queue\n with self.thread_lock:\n self.voiceQueue.append(audio)\n \n\n def _processVoice(self, audio, keyword_mode):\n \n logging.info(\"Processing in {0} mode...\".format(\"keyword\" if keyword_mode else \"command\"))\n\n # process the retrieved audio\n phrases = []\n try:\n predictions = self.recognizer.recognize(audio, True)\n for prediction in predictions:\n phrases.append(prediction[\"text\"])\n logging.info(\"Recognized phrases: {0}\".format(str(phrases)))\n\n # special handling for thank you\n if \"thank you\" in phrases:\n self.saySomething(\"You're welcome\")\n keyword_mode = True and not self.force_command\n return keyword_mode\n\n if len(phrases) > 0:\n if keyword_mode: # looking for keyword\n for keyword in self.keywords:\n if keyword in phrases:\n logging.info(\"'{0}' keyword found.\".format(keyword))\n keyword_mode = False\n self.playMp3(self.keyword_ack_response)\n break\n else:\n # check and execute the command\n command_ref = None\n for phrase in phrases:\n for match_key in self.matches.keys():\n reg_ex = self.matches[match_key]\n # current match can be used as parameter on the commands\n reg_ex_match = reg_ex.match(phrase)\n if reg_ex_match:\n self.match_groups = reg_ex_match.groups()\n command_ref = match_key\n break\n if command_ref:\n break\n \n if command_ref is None:\n raise LookupError(\"No valid commands\")\n\n logging.info(\"Executing '{0}'\".format(command_ref))\n command = self.commands[command_ref]\n if command:\n full_cmd = command.split(\"|\")\n if len(full_cmd) > 1:\n # there's a custom response\n if len(full_cmd[1]) > 0:\n self.saySomething(full_cmd[1])\n else:\n self.playMp3(self.command_ack_response)\n \n # replace any parameterized command with values retrieved from reg ex\n for param_number in range(len(self.match_groups)):\n param_str = \"$\" + str(param_number)\n match_group = self.match_groups[param_number]\n if match_group:\n full_cmd[0] = full_cmd[0].replace(param_str, match_group)\n\n self.cmdProcessor.process_command(full_cmd[0]) \n \n keyword_mode = True and not self.force_command\n \n except LookupError:\n logging.info(\"No recognize words\")\n if not keyword_mode and not self.force_command:\n self.playMp3(self.lookup_error_response)\n\n except:\n e = sys.exc_info()[0]\n logging.error(traceback.format_exc())\n keyword_mode = True and not self.force_command\n self.saySomething(\"There's an error. Please check the log.\")\n\n return keyword_mode\n\n def processVoice(self):\n \"\"\" process the captured voices \"\"\"\n keyword_mode = True and not self.force_command\n while True:\n try:\n with self.thread_lock:\n audio = self.voiceQueue.popleft()\n except IndexError:\n # no data to process in the queue\n time.sleep(0.5) # delay for half a sec\n continue\n \n keyword_mode = self._processVoice(audio, keyword_mode) \n \n def listen_multi(self):\n \"\"\" multi-threading option for listening to commands \"\"\"\n # retrieve from google possible responses\n self.ready_response = GoogleTTS.audio_extract(self.config.get(\"recognizer\", \"ready_response\"))\n self.keyword_ack_response = GoogleTTS.audio_extract(self.config.get(\"recognizer\", \"keyword_ack\"))\n self.command_ack_response = GoogleTTS.audio_extract(self.config.get(\"recognizer\", \"command_ack\"))\n self.lookup_error_response = GoogleTTS.audio_extract(self.config.get(\"recognizer\", \"lookup_error_response\"))\n self.playMp3(self.ready_response)\n self.voiceQueue = deque() # initialize to an empy queue\n self.thread_lock = thread.allocate_lock()\n \n # starts the 2 threads\n thread.start_new_thread(self.captureVoice, ())\n thread.start_new_thread(self.processVoice, ())\n\n # wait forever\n while True:\n time.sleep(1)\n \n\n def listen(self):\n # retrieve from google possible responses\n self.ready_response = GoogleTTS.audio_extract(self.config.get(\"recognizer\", \"ready_response\"))\n self.keyword_ack_response = GoogleTTS.audio_extract(self.config.get(\"recognizer\", \"keyword_ack\"))\n self.command_ack_response = GoogleTTS.audio_extract(self.config.get(\"recognizer\", \"command_ack\"))\n self.lookup_error_response = GoogleTTS.audio_extract(self.config.get(\"recognizer\", \"lookup_error_response\"))\n self.playMp3(self.ready_response)\n \n # wait forever\n keyword_mode = True and not self.force_command\n while True:\n audio = self._captureVoice()\n keyword_mode = self._processVoice(audio, keyword_mode)\n \nif __name__ == \"__main__\":\n import os\n\n # change the working directory to where the script was located\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(abspath)\n os.chdir(dname) \n \n logging.basicConfig(level=logging.INFO)\n\n speechCommander = SpeechCommander()\n speechCommander.listen()\n","sub_path":"control/SpeechCommander.py","file_name":"SpeechCommander.py","file_ext":"py","file_size_in_byte":10333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"270763190","text":"# -*- coding: UTF-8 -*-\r\nimport requests\r\nimport base64\r\n\r\ndef token():\r\n AK='Runpjy2QX7h5XKi3mz8nGKVm'\r\n SK='cG4HKfw5H6BqSRTOpGFXG5CPMKdGsKQP'\r\n request_url = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id='+AK+'&client_secret='+SK\r\n response = requests.get(request_url)\r\n try:\r\n access_token = response.json()['access_token']\r\n echo=\"成功获取access_token\\n\"\r\n except:\r\n echo=\"access_token获取失败\\n\"\r\n finally:\r\n print(echo)\r\n return access_token\r\n\r\n\r\n\r\ndef baiduace(access_token):\r\n #识别图片,图片为py文件根目录的temp.jpg\r\n request_url = \"https://aip.baidubce.com/rest/2.0/image-classify/v2/advanced_general\"\r\n # 二进制方式打开图片文件\r\n f = open('temp.jpg', 'rb')\r\n img = base64.b64encode(f.read())\r\n f.close()\r\n params = {\"image\":img}\r\n request_url = request_url + \"?access_token=\" + str(access_token)\r\n headers = {'content-type': 'application/x-www-form-urlencoded'}\r\n response = requests.post(request_url, data=params, headers=headers)\r\n try:\r\n garbage1=response.json()['result'][0]['keyword']\r\n percent1=str(round(response.json()['result'][0]['score']*100,2))+'%'\r\n garbage2=response.json()['result'][1]['keyword']\r\n percent2=str(round(response.json()['result'][1]['score']*100,2))+'%'\r\n garbage3=response.json()['result'][2]['keyword']\r\n percent3=str(round(response.json()['result'][2]['score']*100,2))+'%'\r\n garbage4=response.json()['result'][3]['keyword']\r\n percent4=str(round(response.json()['result'][3]['score']*100,2))+'%'\r\n garbage5=response.json()['result'][4]['keyword']\r\n percent5=str(round(response.json()['result'][4]['score']*100,2))+'%'\r\n #data=[garbage1,garbage2,garbage3,garbage4,garbage5]\r\n data={garbage1:percent1,garbage2:percent2,garbage3:percent3,garbage4:percent4,garbage5:percent5}\r\n echo='该图片显示的可能是:\\n'+garbage1+',概率:'+percent1+'\\n'+garbage2+',概率:'+percent2+'\\n'+garbage3+',概率:'+percent3+'\\n'+garbage4+',概率:'+percent4+'\\n'+garbage5+',概率:'+percent5+'\\n'\r\n except:\r\n echo=\"图像识别失败\"\r\n finally:\r\n print(echo)\r\n return data\r\n\r\n\r\n\r\ndef classify(data):\r\n for i in data:\r\n try:\r\n request_url = 'https://api.muxiaoguo.cn/api/lajifl?api_key=9319f855d710556c&m='+i\r\n response = requests.get(request_url)\r\n type=response.json()['data']['type']#垃圾类别\r\n concept=response.json()['data']['description']['Concept']#垃圾基本概念\r\n including=response.json()['data']['description']['Including']#垃圾主要包括\r\n release_requirement=response.json()['data']['description']['Release_requirement']#投放要求\r\n echo='垃圾类别:'+type+'\\n垃圾基本概念:'+concept+'\\n垃圾主要包括:'+including+'\\n投放要求:'+release_requirement\r\n\r\n if concept is not None:\r\n break\r\n except:\r\n echo='垃圾分类失败,可能是图片过于离谱,或者服务器故障'\r\n\r\n print(echo)\r\n return type\r\n\r\n# token=token()\r\n# data=baiduace(token)\r\n# type=classify(data)\r\n# print(type)","sub_path":"Garbage_classification.py","file_name":"Garbage_classification.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"543125777","text":"#!/usr/bin/env python\r\n'''\r\nCondicionales [Python]\r\nEjercicios de práctica\r\n---------------------------\r\nAutor: Inove Coding School\r\nVersion: 1.1\r\n \r\nDescripcion:\r\nPrograma creado para que practiquen los conocimietos adquiridos durante la semana\r\n'''\r\n\r\n__author__ = \"Inove Coding School\"\r\n__email__ = \"alumnos@inove.com.ar\"\r\n__version__ = \"1.1\"\r\n\r\ndef ej1():\r\n # Ejercicios de práctica con números\r\n\r\n '''\r\n Realice un programa que solicite por consola 2 números\r\n Calcule la diferencia entre ellos e informe por pantalla\r\n si el resultado es positivo, negativo o cero.\r\n '''\r\n numero_1 = float(input(\"Ingrese el primer número: \"))\r\n numero_2 = float(input(\"Ingrese el segundo número: \"))\r\n\r\n resta = numero_1 - numero_2\r\n\r\n if resta > 0:\r\n print(\"El resultado es positivo\")\r\n elif resta < 0:\r\n print(\"El resultado es negativo\")\r\n else:\r\n print(\"El resultado es 0\")\r\n\r\ndef ej2():\r\n# Ejercicios de práctica con números\r\n\r\n '''\r\n Realice un programa que solicite el ingreso de tres números\r\n enteros, y luego en cada caso informe si el número es par\r\n o impar.\r\n Para cada caso imprimir el resultado en pantalla.\r\n '''\r\n numero_1 = int(input(\"Ingrese el primer número entero: \"))\r\n numero_2 = int(input(\"Ingrese el segundo número entero: \"))\r\n numero_3 = int(input(\"Ingrese el tercer número entero: \"))\r\n\r\n if (numero_1 % 2) == 0:\r\n print(f\"{numero_1} es par\")\r\n else:\r\n print(f\"{numero_1} es impar\")\r\n\r\n if (numero_2 % 2) == 0:\r\n print(f\"{numero_2} es par\")\r\n else:\r\n print(f\"{numero_2} es impar\")\r\n \r\n if (numero_3 % 2) == 0:\r\n print(f\"{numero_3} es par\")\r\n else:\r\n print(f\"{numero_3} es impar\")\r\n\r\ndef ej3():\r\n # Ejercicios de práctica con números\r\n\r\n '''\r\n Realice una calculadora, se ingresará por línea de comando dos números\r\n Luego se ingresará como tercera entrada al programa el símbolo de la operación\r\n que se desea ejecutar\r\n - Suma (+)\r\n - Resta (-)\r\n - Multiplicación (*)\r\n - División (/)\r\n - Exponente/Potencia (**)\r\n\r\n Se debe efectuar el cálculo correcto según la operación ingresada por consola\r\n Imprimir en pantalla la operación realizada y el resultado\r\n \r\n '''\r\n numero_1 = float(input(\"Ingrese el primer número: \"))\r\n numero_2 = float(input(\"Ingrese el segundo número: \"))\r\n print(\" - Suma (+)\")\r\n print(\" - Resta (-)\")\r\n print(\" - Multiplicación (*)\")\r\n print(\" - División (/)\")\r\n print(\"- Exponente/Potencia (**)\")\r\n simbolo = input(\"Ingrese la operación a realizar: \")\r\n\r\n if simbolo == '+':\r\n print(f\"La suma de {numero_1} y {numero_2} es {numero_1 + numero_2}\")\r\n elif simbolo == '-':\r\n print(f\"La resta de {numero_1} y {numero_2} es {numero_1 - numero_2}\")\r\n elif simbolo == '*':\r\n print(f\"La multiplicación de {numero_1} y {numero_2} es {numero_1 * numero_2}\")\r\n elif simbolo == '/':\r\n print(f\"La división de {numero_1} y {numero_2} es {numero_1 / numero_2}\")\r\n elif simbolo == '**':\r\n print(f\"La potencia de base {numero_1} y exponente {numero_2} es {numero_1 ** numero_2}\")\r\n else:\r\n print(\"Esta calculadora no realiza la operación pedida. ¡Lo siento!\")\r\n\r\ndef ej4():\r\n # Ejercicios de práctica con cadenas\r\n \r\n '''\r\n Realice un programa que solicite por consola 3 palabras cualesquiera\r\n Luego el programa debe consultar al usuario como quiere ordenar las palabras\r\n 1 - Ordenar por orden alfabético (usando el operador \">\")\r\n 2 - Ordenar por cantidad de letras (longitud de la palabra)\r\n\r\n Si se ingresa \"1\" por consola se deben ordenar las 3 palabras por orden alfabético\r\n e imprimir en pantalla de la mayor a la menor\r\n\r\n Si se ingresa \"2\" por consola se deben ordenar las 3 palabras por cantidad de letras\r\n e imprimir en pantalla de la mayor a la menor\r\n\r\n '''\r\n palabra_1 = input(\"Ingrese la primer palabra: \")\r\n palabra_2 = input(\"Ingrese la segunda palabra: \")\r\n palabra_3 = input(\"Ingrese la tercer palabra: \")\r\n orden_palabras = \" \" # inicio variable para ordenamiento\r\n opcion = 0\r\n \r\n # Comparo las palabras introducidas para saber que operaciones de\r\n # ordenamiento se puden realizar. Si hay al menos dos palabras repetidas \r\n # no se podrá hacer ordenamiento alguno, y si hay palabras diferentes \r\n # pero con igual cantidad de letras no se podrá hacer ordenamiento por\r\n # cantidad de letras.\r\n if (palabra_1 == palabra_2) or (palabra_1 == palabra_3) or (palabra_2 == palabra_3):\r\n print(\"¡ATENCIÓN!... Ha introducido palabras iguales\")\r\n print(\"No puede hacerse ordenamiento. El programa ha terminado\")\r\n else:\r\n if (len(palabra_1) == len(palabra_2)) or (len(palabra_1) == len(palabra_3)) or (len(palabra_2) == len(palabra_3)):\r\n print(\"¡ATENCIÓN!...palabras con igual cantidad de letras\")\r\n print(\"solo se podrá hacer ordenamiento alfabético\")\r\n opcion = 1\r\n else:\r\n print(\"-\" * 50)\r\n print(\"Ingrese 1 para orden alfabético\")\r\n print(\"Ingrese 2 para ordenar por cantidad de letras\")\r\n print(\"-\" * 50)\r\n opcion = int(input(\"Ingrese su opción: \"))\r\n # Ejecución de ordenamiento por órden alfabético de mayor a menor\r\n if opcion == 1:\r\n if (palabra_1 > palabra_2) and (palabra_1 > palabra_3):\r\n orden_palabras += palabra_1 + \" \"\r\n if palabra_2 > palabra_3:\r\n orden_palabras += palabra_2 + \" \"\r\n orden_palabras += palabra_3\r\n else:\r\n orden_palabras += palabra_3 + \" \"\r\n orden_palabras += palabra_2\r\n if (palabra_2 > palabra_1) and (palabra_2 > palabra_3):\r\n orden_palabras += palabra_2 + \" \"\r\n if palabra_1 > palabra_3:\r\n orden_palabras += palabra_1 + \" \"\r\n orden_palabras += palabra_3\r\n else:\r\n orden_palabras += palabra_3 + \" \"\r\n orden_palabras += palabra_1\r\n if (palabra_3 > palabra_1) and (palabra_3 > palabra_2):\r\n orden_palabras += palabra_3 + \" \"\r\n if palabra_1 > palabra_2:\r\n orden_palabras += palabra_1 + \" \"\r\n orden_palabras += palabra_2\r\n else:\r\n orden_palabras += palabra_2 + \" \"\r\n orden_palabras += palabra_1\r\n \r\n # Ejecución de ordenamiento de mayor a menor por cantidad de letras.\r\n if opcion == 2:\r\n if (len(palabra_1) > len(palabra_2)) and (len(palabra_1) > len(palabra_3)):\r\n orden_palabras += palabra_1 + \" \"\r\n if len(palabra_2) > len(palabra_3):\r\n orden_palabras += palabra_2 + \" \"\r\n orden_palabras += palabra_3\r\n else:\r\n orden_palabras += palabra_3 + \" \"\r\n orden_palabras += palabra_2\r\n if (len(palabra_2) > len(palabra_1)) and (len(palabra_2) > len(palabra_3)):\r\n orden_palabras += palabra_2 + \" \"\r\n if len(palabra_1) > len(palabra_3):\r\n orden_palabras += palabra_1 + \" \"\r\n orden_palabras += palabra_3\r\n else:\r\n orden_palabras += palabra_3 + \" \"\r\n orden_palabras += palabra_1\r\n if (len(palabra_3) > len(palabra_1)) and (len(palabra_3) > len(palabra_2)):\r\n orden_palabras += palabra_3 + \" \"\r\n if len(palabra_1) > len(palabra_2):\r\n orden_palabras += palabra_1 + \" \"\r\n orden_palabras += palabra_2\r\n else:\r\n orden_palabras += palabra_2 + \" \"\r\n orden_palabras += palabra_1\r\n print(orden_palabras)\r\n\r\ndef ej5():\r\n # Ejercicios de práctica con números\r\n \r\n '''\r\n Realice un programa que solicite ingresar tres valores de temperatura\r\n De las temperaturas ingresadas por consola determinar:\r\n 1 - ¿Cuáles de ellas es la máxima temperatura ingresada?\r\n 2 - ¿Cuáles de ellas es la mínima temperatura ingresada?\r\n 3 - ¿Cuál es el promedio de las temperaturas ingresadas?\r\n\r\n En cada caso imprimir en pantalla el resultado \r\n\r\n '''\r\n temp_1 = int(input(\"Ingrese el primer valor de temperatura: \"))\r\n temp_2 = int(input(\"Ingrese el segundo valor de temperatura: \"))\r\n temp_3 = int(input(\"Ingrese el tercer valor de temperatura: \"))\r\n\r\n if (temp_1 == temp_2) or (temp_1 == temp_3) or (temp_2 == temp_3):\r\n print(\"¡ATENCION!... Ha introducido valores repetidos\")\r\n print(\"Programa terminado\")\r\n else:\r\n if temp_1 > temp_2 and temp_1 > temp_3:\r\n print(f\"{temp_1} es la mayor temperatura ingresada\")\r\n elif temp_2 > temp_1 and temp_2 > temp_3:\r\n print(f\"{temp_2} es la mayor temperatura ingresada\")\r\n else:\r\n print(f\"{temp_3} es la mayor temperatura ingresada\")\r\n \r\n if temp_1 < temp_2 and temp_1 < temp_3:\r\n print(f\"{temp_1} es la menor temperatura ingresada\")\r\n elif temp_2 < temp_1 and temp_2 < temp_3:\r\n print(f\"{temp_2} es la menor temperatura ingresada\")\r\n else:\r\n print(f\"{temp_3} es la menor temperatura ingresada\")\r\n\r\n promedio = (temp_1 + temp_2 + temp_3) / 3\r\n print(\"El promedio de temperaturas es\",round(promedio, 2))\r\n\r\nif __name__ == '__main__':\r\n print(\"Ejercicios de práctica\")\r\n #ej1()\r\n #ej2()\r\n #ej3()\r\n #ej4()\r\n ej5()\r\n","sub_path":"ejercicios_practica.py","file_name":"ejercicios_practica.py","file_ext":"py","file_size_in_byte":9547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"117822892","text":"a = 'bbbbbbbbbbbbbbb'\r\nstr_ = []\r\nlenth = 0\r\nfor i in a:\r\n if i not in str_:\r\n str_.append(i)\r\n else:\r\n str_ = str_[str_.index(i)+1:]\r\n str_.append(i)\r\n lenth_update = len(str_)\r\n if lenth_update > lenth:\r\n lenth = lenth_update\r\nprint(lenth)\r\n\r\n\r\n","sub_path":"leetcode/str_len.py","file_name":"str_len.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"479477370","text":"import os\nimport subprocess\n\nfrom typing import List # noqa: F401\n\nfrom libqtile import bar, layout, widget, hook\nfrom libqtile.config import Click, Drag, Group, Key, Screen\nfrom libqtile.lazy import lazy\nfrom wmenv import colors\n\nmod = \"mod4\"\nterminal = \"st\"\nlauncher = \"rofi -show run -matching fuzzy\"\n\nkeys = [\n # Switch between windows in current stack pane\n Key([mod], \"k\", lazy.layout.down(),\n desc=\"Move focus down in stack pane\"),\n Key([mod], \"j\", lazy.layout.up(),\n desc=\"Move focus up in stack pane\"),\n\n # Move windows up or down in current stack\n Key([mod, \"control\"], \"k\", lazy.layout.shuffle_down(),\n desc=\"Move window down in current stack \"),\n Key([mod, \"control\"], \"j\", lazy.layout.shuffle_up(),\n desc=\"Move window up in current stack \"),\n\n # Switch window focus to other pane(s) of stack\n Key([mod], \"space\", lazy.layout.next(),\n desc=\"Switch window focus to other pane(s) of stack\"),\n\n # Swap panes of split stack\n Key([mod, \"shift\"], \"space\", lazy.layout.rotate(),\n desc=\"Swap panes of split stack\"),\n\n # Toggle between split and unsplit sides of stack.\n # Split = all windows displayed\n # Unsplit = 1 window displayed, like Max layout, but still with\n # multiple stack panes\n Key([mod, \"shift\"], \"Return\", lazy.layout.toggle_split(),\n desc=\"Toggle between split and unsplit sides of stack\"),\n Key([mod], \"Return\", lazy.spawn(terminal), desc=\"Launch terminal\"),\n\n # Toggle between different layouts as defined below\n Key([mod], \"Tab\", lazy.next_layout(), desc=\"Toggle between layouts\"),\n Key([mod, \"shift\"], \"c\", lazy.window.kill(), desc=\"Kill focused window\"),\n\n Key([mod], \"t\", lazy.window.toggle_floating(), desc=\"Toggle tiled/floating\"),\n Key([mod], \"f\", lazy.window.toggle_fullscreen(), desc=\"Toggle fullscreen\"),\n\n Key([mod, \"control\"], \"r\", lazy.restart(), desc=\"Restart qtile\"),\n Key([mod, \"control\"], \"q\", lazy.shutdown(), desc=\"Shutdown qtile\"),\n Key([mod], \"r\", lazy.spawn(launcher), desc=\"Open rofi\"),\n Key([mod], \"w\", lazy.to_screen(0), desc=\"Focus screen 0\"),\n Key([mod], \"e\", lazy.to_screen(1), desc=\"Focus screen 1\"),\n Key([mod, \"shift\"], \"w\", lazy.window.toscreen(0), desc=\"Move window to screen 0\"),\n Key([mod, \"shift\"], \"e\", lazy.window.toscreen(1), desc=\"Move window to screen 1\"),\n]\n\ngroup_labels = [(\"[1] term\", {'layout': 'monadtall'}),\n (\"[2] emacs\", {'layout': 'monadwide'}),\n (\"[3] browser\", {'layout': 'monadtall'}),\n (\"[4] debug\", {'layout': 'monadtall'}),\n (\"[5] music\", {'layout': 'floating'}),\n (\"[6] game\", {'layout': 'monadtall'})]\n\ngroups = [Group(name, **kwargs) for name, kwargs in group_labels]\n\nfor i, (name, kwargs) in enumerate(group_labels, 1):\n keys.append(Key([mod], str(i), lazy.group[name].toscreen(toggle=False))) # Switch to another group\n keys.append(Key([mod, \"shift\"], str(i), lazy.window.togroup(name))) # Send current window to another group\n\n# Add help key\nkeys_str = \"\"\nfor key in keys:\n keypress = key.modifiers + [key.key]\n keypress_str = \"-\".join(keypress)\n keys_str += keypress_str + \": \" + key.desc + \"\\n\"\n\nhelp_desc = \"Show qtile keys in rofi\"\nhelp_key = \"p\"\nkeys_str += f\"{mod}-{help_key}: {help_desc}\"\n\ndef get_rofi_command(s, prompt):\n return {\n \"shell\": True,\n \"cmd\":f\"echo -en '{s}' | rofi -dmenu -p '{prompt}'\",\n }\n\nkeys.extend([\n Key([mod], help_key, lazy.spawn(**get_rofi_command(keys_str, \"Qtile keys\")), desc=help_desc),\n])\n\ndefault_layout_settings = {\n \"margin\": 4,\n \"ratio\": 0.6,\n \"border_width\": 2,\n \"border_focus\": colors[\"border-focus\"],\n \"border_normal\": colors[\"border-normal\"],\n}\n\nlayouts = [\n layout.MonadTall(**default_layout_settings),\n layout.MonadWide(**default_layout_settings),\n layout.TreeTab(**default_layout_settings),\n layout.Floating(**default_layout_settings),\n]\n\nwidget_defaults = dict(\n margin=8,\n font='Iosevka Nerd Font',\n fontsize=16,\n padding=0,\n)\nextension_defaults = widget_defaults.copy()\n\nhas_battery = False\n\nif len(os.listdir('/sys/class/power_supply')) > 0:\n has_battery = True\n\ndef init_base_widgets():\n return [\n widget.CurrentLayoutIcon(\n scale=0.6,\n padding=8,\n ),\n widget.GroupBox(\n hide_unused=True,\n disable_drag=True,\n active=colors[\"bar-widget-group-active\"],\n inactive=colors[\"bar-widget-group-inactive\"],\n this_current_screen_border=colors[\"bar-accent\"],\n highlight_color=[colors[\"bar-bg\"], colors[\"bar-bg\"]],\n highlight_method=\"line\",\n )\n ]\n\nwidgets = init_base_widgets() + [\n widget.Prompt(),\n widget.Spacer(),\n widget.Chord(\n chords_colors={\n 'launch': (\"#ff0000\", \"#ffffff\"),\n },\n name_transform=lambda name: name.upper(),\n ),\n widget.Pomodoro(\n fontsize=24,\n background=colors[\"bar-widget-pomodoro\"],\n color_active=colors[\"bar-bg\"],\n color_break=colors[\"bar-bg\"],\n color_inactive=colors[\"bar-bg\"],\n timer_visible=False,\n prefix_active=\" \",\n prefix_break=\" \",\n prefix_inactive=\"\",\n prefix_long_break=\" \",\n prefix_paused=\"\",\n padding=10,\n ),\n widget.TextBox(\n text=\"  \",\n background=colors[\"bar-widget-time\"],\n foreground=colors[\"bar-bg\"],\n fontsize=28,\n ),\n widget.Clock(\n background=colors[\"bar-widget-time\"],\n foreground=colors[\"bar-bg\"],\n format='%Y-%m-%d %a %I:%M %p '),\n]\nif has_battery:\n widgets += [\n widget.TextBox(\n text=\"  \",\n background=colors[\"bar-widget-bat\"],\n foreground=colors[\"bar-bg\"],\n fontsize=28,\n ),\n widget.Battery(\n background=colors[\"bar-widget-bat\"],\n foreground=colors[\"bar-bg\"],\n format='{percent:2.0%} '\n )\n ]\n\nwidgets += [\n widget.TextBox(\n text=\"  \",\n foreground=colors[\"bar-bg\"],\n background=colors[\"bar-widget-pin\"],\n fontsize=28,\n ),\n widget.Systray(\n background=colors[\"bar-widget-pin\"],\n ),\n widget.TextBox(\n background=colors[\"bar-widget-pin\"],\n text=\" \",\n )\n]\n\nscreens = [\n Screen(\n top=bar.Bar(widgets,\n 28,\n background=colors[\"bar-bg\"],\n foreground=colors[\"bar-fg\"],\n margin=[0, 0, 0, 0]),\n ),\n Screen(\n top=bar.Bar(init_base_widgets(),\n 28,\n background=colors[\"bar-bg\"],\n foreground=colors[\"bar-fg\"],\n margin=[0, 0, 0, 0]),\n ),\n]\n\n# Drag floating layouts.\nmouse = [\n Drag([mod], \"Button1\", lazy.window.set_position_floating(),\n start=lazy.window.get_position()),\n Drag([mod], \"Button3\", lazy.window.set_size_floating(),\n start=lazy.window.get_size()),\n Click([mod], \"Button2\", lazy.window.bring_to_front())\n]\n\ndgroups_key_binder = None\ndgroups_app_rules = [] # type: List\nmain = None # WARNING: this is deprecated and will be removed soon\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nauto_fullscreen = True\nfocus_on_window_activation = \"smart\"\n\nwmname = \"qtile\"\n\n@hook.subscribe.startup_once\ndef start_once():\n home = os.path.expanduser(\"~\")\n subprocess.call([home + \"/.config/qtile/autostart.sh\"])\n","sub_path":"home/wm/qtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"126537916","text":"# 此文件采集人脸信息数据\n# 采集过程包括(以含有人脸的“example.jpg”图片为例):\n# (1)将“example.jpg”图片face_data.py所在的文件夹\n# (2)增加识别example.jpg的代码,并生成相应的人脸数据(example_face_encoding)\n# example_image = face_recognition.load_image_file(\"example.jpg\")\n# example_face_encoding = face_recognition.face_encodings(example_image)[0]\n# (3)将生成的 example_face_encoding数据存入known_face_encodings数据列表中\n# (4)将图片中存在的人的名字存入known_face_names数据列表中(以字符串形式存入)\n\nimport face_recognition\nimport cv2\nimport numpy as np\n\n# 识别obama图片\nobama_image = face_recognition.load_image_file(\"obama.jpg\")\nobama_face_encoding = face_recognition.face_encodings(obama_image)[0]\n\n# 识别biden图片\nbiden_image = face_recognition.load_image_file(\"biden.jpg\")\nbiden_face_encoding = face_recognition.face_encodings(biden_image)[0]\n\n# #可将下面的XX_image.jpg改成任意你所添加图片的名称,\n# #并更改known_face_encodings和known_face_names中相应的XX,即可添加新的人脸数据\n# # 识别XX图片,并编码成特征向量\n# XX_image = face_recognition.load_image_file(\"XX_image.jpg\")\n# XX_image_face_encoding = face_recognition.face_encodings(XX_image)[0]\n\n# 创建已识别的人脸信息编码向量和相应的姓名,编码向量和姓名的顺序保持一致\nknown_face_encodings = [\n obama_face_encoding,\n biden_face_encoding,\n XX\n]\nknown_face_names = [\n \"Barack Obama\",\n \"Joe Biden\",\n \"XX\"\n]\n\n# 初始化一些变量参数\nface_locations = []\nface_encodings = []\nface_names = []\n\n\n","sub_path":"face_data.py","file_name":"face_data.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"239195511","text":"import numpy\nfrom scipy import stats\n\n\nclass Sampler:\n def __init__(self):\n self._configs = {}\n\n self._weight = 1\n\n self._sample_map = {\n 'laplace': numpy.random.laplace,\n 'normal': numpy.random.normal\n }\n\n self._pdf_map = {\n 'laplace': stats.laplace.pdf,\n 'normal': stats.norm.pdf\n }\n\n def weight(self):\n return self._weight\n\n def add_config(self, old, new, f=lambda x: x, g=lambda x: x):\n assert isinstance(old, str) and old in self._sample_map, old + ' sampling mechanism not supported.'\n assert isinstance(old, str) and new in self._sample_map, new + ' sampling mechanism not supported.'\n self._configs[old] = (new, f, g)\n\n def _sample(self, mech, loc=0.0, scale=1.0):\n assert isinstance(mech, str) and mech in self._sample_map\n\n if mech in self._configs:\n new_mech, f, g = self._configs[mech]\n new_loc, new_scale = f(loc), g(scale)\n res = self._sample_map[new_mech](new_loc, new_scale)\n self._weight *= \\\n self._pdf_map[mech](res, new_loc, new_scale) / self._pdf_map[new_mech](res, new_loc, new_scale)\n return res\n else:\n return self._sample_map[mech](loc, scale)\n\n def laplace(self, loc=0.0, scale=1.0):\n return self._sample('laplace', loc, scale)\n\n def normal(self, loc=0.0, scale=1.0):\n return self._sample('normal', loc, scale)\n","sub_path":"statdp/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"48910037","text":"import os\nimport io\nfrom log import setup_logger\nimport subprocess\nimport datetime\nimport configparser\nimport shutil\nimport logging\nfrom multiprocessing import Manager, Lock, Process\nfrom threading import Thread\nfrom qlf_models import QLFModels\n\nqlf_root = os.getenv('QLF_ROOT')\ncfg = configparser.ConfigParser()\n\ncfg.read('%s/framework/config/qlf.cfg' % qlf_root)\nqlconfig = cfg.get('main', 'qlconfig')\nlogmain = cfg.get('main', 'logfile')\nlogpipeline = cfg.get('main', 'logpipeline')\ndesi_spectro_redux = cfg.get('namespace', 'desi_spectro_redux')\n\nlogger = logging.getLogger(\"main_logger\")\npipe_logger = setup_logger('logpipeline', logpipeline)\n\n\nclass QLFProcess(object):\n \"\"\" Class responsible for managing Quick Look pipeline process. \"\"\"\n\n def __init__(self, data):\n self.pipeline_name = 'Quick Look'\n self.data = data\n self.models = QLFModels()\n\n output_dir = os.path.join(\n 'exposures',\n self.data.get('night'),\n self.data.get('zfill')\n )\n\n output_full_dir = os.path.join(desi_spectro_redux, output_dir)\n\n # Remove old dir\n if os.path.isdir(output_full_dir):\n shutil.rmtree(output_full_dir)\n\n # Make output dir\n os.makedirs(output_full_dir)\n\n self.data['output_dir'] = output_dir\n\n def start_process(self):\n \"\"\" Start pipeline. \"\"\"\n\n self.data['start'] = datetime.datetime.now().replace(microsecond=0)\n\n # create process in database and obtain the process id\n process = self.models.insert_process(\n self.data,\n self.pipeline_name\n )\n\n self.data['process_id'] = process.id\n self.data['status'] = process.status\n\n # TODO: ingest configuration file used, this should be done by process\n # self.models.insert_config(process.id)\n\n pipe_logger.info('...{}'.format('\\n' * 20))\n pipe_logger.info('Process ID {}'.format(process.id))\n pipe_logger.info('ExpID {} started.'.format(self.data.get('expid')))\n\n return process.id\n\n def finish_process(self):\n \"\"\" Finish pipeline. \"\"\"\n\n self.data['end'] = datetime.datetime.now().replace(microsecond=0)\n\n self.data['duration'] = self.data.get('end') - self.data.get('start')\n\n pipe_logger.info(\"ExpID {} ended (runtime: {}).\".format(\n self.data.get('expid'),\n str(self.data.get('duration'))\n ))\n\n proc = Thread(target=self.ingest_parallel_qas)\n proc.start()\n\n\nclass Jobs(QLFProcess):\n\n def __init__(self, data):\n\n super().__init__(data)\n self.num_cameras = len(self.data.get('cameras'))\n\n # TODO: improvements - get stages/steps in database\n self.stages = [\n {\n \"display_name\": \"Pre Processing\",\n \"start\": {\"regex\": \"Starting to run step Preproc\", \"count\": 0},\n \"end\": {\"regex\": \"Starting to run step BoxcarExtract\", \"count\": 0}\n },\n {\n \"display_name\": \"Spectral Extraction\",\n \"start\": {\"regex\": \"Starting to run step BoxcarExtract\", \"count\": 0},\n \"end\": {\"regex\": \"Starting to run step ApplyFiberFlat_QL\", \"count\": 0}\n },\n {\n \"display_name\": \"Fiber Flattening\",\n \"start\": {\"regex\": \"Starting to run step ApplyFiberFlat_QL\", \"count\": 0},\n \"end\": {\"regex\": \"Starting to run step SkySub\", \"count\": 0}\n },\n {\n \"display_name\": \"Sky Subtraction\",\n \"start\": {\"regex\": \"Starting to run step SkySub\", \"count\": 0},\n \"end\": {\"regex\": \"Pipeline completed\", \"count\": 0}\n }\n ]\n\n def start_jobs(self):\n \"\"\" Distributes the cameras for parallel processing. \"\"\"\n\n procs = list()\n return_cameras = Manager().list()\n resumelog_lock = Lock()\n\n for camera in self.data.get('cameras'):\n camera['start'] = datetime.datetime.now().replace(\n microsecond=0\n )\n\n logname = os.path.join(\n self.data.get('output_dir'),\n \"run-%s.log\" % camera.get('name')\n )\n\n camera['logname'] = logname\n\n job = self.models.insert_job(\n process_id=self.data.get('process_id'),\n camera=camera.get('name'),\n start=camera.get('start'),\n logname=camera.get('logname')\n )\n\n camera['job_id'] = job.id\n\n args = (\n self.data,\n camera,\n return_cameras,\n resumelog_lock,\n )\n\n proc = Thread(target=self.start_parallel_job, args=args)\n proc.start()\n procs.append(proc)\n\n for proc in procs:\n proc.join()\n\n self.data['cameras'] = return_cameras\n\n def start_parallel_job(self, data, camera, return_cameras, lock):\n \"\"\" Execute QL Pipeline by camera \"\"\"\n\n cmd = [\n 'desi_quicklook',\n '-i', qlconfig,\n '-n', data.get('night'),\n '-c', camera.get('name'),\n '-e', str(data.get('expid')),\n '--rawdata_dir', data.get('desi_spectro_data'),\n '--mergeQA',\n '--specprod_dir', desi_spectro_redux\n ]\n\n logname = io.open(os.path.join(\n desi_spectro_redux,\n camera.get('logname')\n ), 'wb')\n\n cwd = os.path.join(\n desi_spectro_redux,\n data.get('output_dir')\n )\n\n with subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, cwd=cwd) as process:\n while process.poll() is None:\n line = process.stdout.readline()\n if not line:\n break\n self.resume_log(line, camera.get('name'), lock)\n logname.write(line)\n logname.flush()\n\n retcode = process.wait()\n\n logname.close()\n\n camera['end'] = datetime.datetime.now().replace(microsecond=0)\n camera['status'] = 0\n camera['duration'] = str(\n camera.get('end') - camera.get('start')\n )\n\n if retcode < 0:\n camera['status'] = 1\n\n return_cameras.append(camera)\n\n def ingest_parallel_qas(self):\n pipe_logger.info('Ingesting QAs...')\n start_ingestion = datetime.datetime.now().replace(microsecond=0)\n\n proc_qas = list()\n\n for camera in self.data.get('cameras'):\n output_path = os.path.join(\n desi_spectro_redux,\n self.data.get('output_dir'),\n 'ql-*-%s-%s.yaml' % (\n camera.get('name'),\n self.data.get('zfill')\n )\n )\n\n args = (\n camera.get('job_id'),\n camera.get('end'),\n camera.get('status'),\n output_path\n )\n\n proc = Process(target=self.models.update_job, args=args)\n proc.start()\n proc_qas.append(proc)\n\n for proc in proc_qas:\n proc.join()\n\n self.models.update_process(\n process_id=self.data.get('process_id'),\n end=self.data.get('end'),\n process_dir=self.data.get('output_dir'),\n status=self.data.get('status')\n )\n\n duration_ingestion = datetime.datetime.now().replace(microsecond=0) - start_ingestion\n\n pipe_logger.info(\"(ExpID {}) Ingestion complete: {}.\".format(\n self.data.get('expid'), str(duration_ingestion)))\n pipe_logger.info(\"Total runtime: %s.\" % (self.data.get('duration') + duration_ingestion))\n pipe_logger.info(\"ExpID {} is ready for analysis\".format(self.data.get('expid')))\n\n def resume_log(self, line, camera, lock):\n \"\"\" \"\"\"\n\n lock.acquire()\n\n try:\n line = line.decode(\"utf-8\").replace('\\n', '')\n line_str = line.split(':')[-1]\n\n if line.find('ERROR') > -1:\n pipe_logger.error(\"ERROR: Camera {}: {}\".format(camera, line_str))\n elif line.find('CRITICAL') > -1:\n pipe_logger.critical(\"CRITICAL: Camera {}: {}\".format(camera, line_str))\n else:\n for stage in self.stages:\n stage_start = stage.get('start')\n stage_end = stage.get('end')\n\n if line.find(stage_end.get('regex')) > -1:\n stage_end['count'] += 1\n\n if stage_end.get('count') == self.num_cameras:\n stage_end['time'] = datetime.datetime.now().replace(microsecond=0)\n pipe_logger.info(\n '{} ended (runtime: {}).'.format(\n stage.get('display_name'),\n stage_end.get('time') - stage_start.get('time')\n )\n )\n\n if line.find(stage_start.get('regex')) > -1:\n stage_start['count'] += 1\n\n if 'time' not in stage_start:\n stage_start['time'] = datetime.datetime.now().replace(microsecond=0)\n pipe_logger.info('{} started.'.format(stage.get('display_name')))\n\n except Exception as err:\n pipe_logger.info(err)\n\n lock.release()\n\n\nif __name__ == \"__main__\":\n print('Standalone execution...')\n","sub_path":"backend/framework/bin/qlf_pipeline.py","file_name":"qlf_pipeline.py","file_ext":"py","file_size_in_byte":9639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"395619741","text":"# Copyright 2021 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport unittest\n\nfrom towhee.models.multiscale_vision_transformers.create_multiscale_vision_transformers \\\n import create_multiscale_vision_transformers\n\n\nclass MultiscaleVisionTransformersTest(unittest.TestCase):\n def test_multiscale_vision_transformers(self):\n # Test MViT with 3D case.\n num_head = 100\n batch_size = 1\n fake_input = torch.rand(batch_size, 3, 4, 28, 28)\n model = create_multiscale_vision_transformers(\n spatial_size=28,\n temporal_size=4,\n patch_embed_dim=12,\n depth=1,\n head_num_classes=num_head,\n pool_kv_stride_adaptive=[1, 2, 2],\n )\n output = model(fake_input)\n gt_shape_tensor = torch.rand(batch_size, num_head)\n self.assertTrue(output.shape == gt_shape_tensor.shape)\n # Test MViT with 3D case with pool first.\n num_head = 100\n batch_size = 1\n fake_input = torch.rand(batch_size, 3, 4, 28, 28)\n model = create_multiscale_vision_transformers(\n spatial_size=28,\n temporal_size=4,\n patch_embed_dim=12,\n depth=1,\n head_num_classes=num_head,\n pool_first=True,\n pool_q_stride_size=[[0, 1, 2, 2]],\n )\n output = model(fake_input)\n gt_shape_tensor = torch.rand(batch_size, num_head)\n self.assertTrue(output.shape == gt_shape_tensor.shape)\n\n # Test MViT with 2D case for images.\n conv_patch_kernel = (7, 7)\n conv_patch_stride = (4, 4)\n conv_patch_padding = (3, 3)\n num_head = 100\n batch_size = 1\n fake_input = torch.rand(batch_size, 3, 28, 28)\n model = create_multiscale_vision_transformers(\n spatial_size=(28, 28),\n temporal_size=1,\n patch_embed_dim=12,\n depth=1,\n head_num_classes=num_head,\n use_2d_patch=True,\n conv_patch_embed_kernel=conv_patch_kernel,\n conv_patch_embed_stride=conv_patch_stride,\n conv_patch_embed_padding=conv_patch_padding,\n )\n output = model(fake_input)\n gt_shape_tensor = torch.rand(batch_size, num_head)\n self.assertTrue(output.shape == gt_shape_tensor.shape)\n\n # Test MViT without patch_embed.\n num_head = 100\n batch_size = 1\n fake_input = torch.rand(batch_size, 8, 12)\n model = create_multiscale_vision_transformers(\n spatial_size=(8, 1),\n temporal_size=1,\n patch_embed_dim=12,\n depth=1,\n enable_patch_embed=False,\n head_num_classes=num_head,\n )\n output = model(fake_input)\n gt_shape_tensor = torch.rand(batch_size, num_head)\n self.assertTrue(output.shape == gt_shape_tensor.shape)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"towhee/tests/models/mvit/test_multiscale_vision_transformers.py","file_name":"test_multiscale_vision_transformers.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"540525409","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\djangosubscribe\\src\\djangosubscribe\\migrations\\0001_initial.py\n# Compiled at: 2020-02-06 02:55:55\n# Size of source mod 2**32: 945 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [\n migrations.CreateModel(name='SubscriberModel',\n fields=[\n (\n 'id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n (\n 'first_name', models.CharField(blank=True, max_length=12, null=True)),\n (\n 'last_name', models.CharField(blank=True, max_length=12, null=True)),\n (\n 'username', models.CharField(blank=True, max_length=15, null=True)),\n (\n 'age', models.PositiveIntegerField(blank=True, null=True)),\n (\n 'mobile_number', models.PositiveIntegerField(blank=True, null=True)),\n (\n 'email', models.EmailField(max_length=75))])]","sub_path":"pycfiles/djangosubscribe-1.0.2-py3-none-any/0001_initial.cpython-37.py","file_name":"0001_initial.cpython-37.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"9772992","text":"\"\"\"Contains the models for the form_creator app\"\"\"\nfrom django.db import models\nfrom django.db.models import Q\n\n# Create your models here.\nclass Form(models.Model):\n \"Model for an form\"\n name = models.CharField(unique=True, max_length=50, error_messages={'unique':'Ya existe un formulario registrado con este nombre.'})\n\nclass Question(models.Model):\n \"Model for questions\"\n text = models.TextField()\n \"\"\"\n 1. Falso o verdadero\n 2. Texto\n 3. Multiples opciones, una respuesta\n 4. Multiples opciones, multiples respuestas\n 5. Fecha hora\n \"\"\"\n question_type = models.IntegerField()\n empty = models.BooleanField()\n form = models.ForeignKey(Form, on_delete=models.CASCADE, null=False)\n position = models.IntegerField(null=False)\n\n class Meta:\n constraints = [\n models.CheckConstraint(\n check=Q(question_type__lte=5),\n name=\"question_type_upper\"),\n models.CheckConstraint(\n check=Q(question_type__gte=1),\n name=\"question_type_lower\"),\n ]\n\nclass Answer(models.Model):\n \"Model for answers\"\n question = models.ForeignKey(Question, on_delete=models.CASCADE)\n text = models.TextField()\n\nclass PollCampaign(models.Model):\n \"\"\"Model for poll campaigns \"\"\"\n issabel_campaign = models.IntegerField(null=False, unique=True)\n form = models.ForeignKey(Form, on_delete=models.CASCADE, null=False)\n\nclass QuestionAnswers(models.Model):\n \"Model for question answers\"\n campaign = models.ForeignKey(PollCampaign, on_delete=models.CASCADE, null=False)\n client = models.CharField(max_length=20, null=False)\n question = models.TextField()\n asnwer = models.TextField()\n text_answer = models.TextField(null=True)\n class Meta:\n constraints = [\n models.CheckConstraint(\n check=(Q(asnwer__isnull=False) | Q(text_answer__isnull=False)),\n name=\"answer_not_null\")\n ]\n","sub_path":"form_creator/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"432627487","text":"class Solution:\n def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:\n # 自定义比较函数\n # d = {v: i for i, v in enumerate(arr2)}\n # return sorted(arr1, key=lambda v: d.get(v, len(arr2) + v))\n\n d = Counter(arr1)\n res = []\n for v in arr2:\n res.extend([v] * d.pop(v))\n # res.extend(sorted(v for v, i in d.items() for _ in range(i)))\n res.extend(v for v, n in sorted(d.items()) for _ in range(n))\n return res","sub_path":"Week_08/leetcode1122.py","file_name":"leetcode1122.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"236160936","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 18 13:48:15 2018\r\n\r\n@author: h.muhammed\r\n\"\"\"\r\n\r\nimport threading\r\nfrom queue import Queue\r\nfrom spider import Spider\r\nfrom domain import *\r\nfrom general import *\r\n\r\n\r\nPROJECT_NAME = 'file_name'\r\nHOMEPAGE = 'https://www.example.com/'\r\nDOMAIN_NAME = get_domain_name(HOMEPAGE)\r\nQUEUE_FILE = PROJECT_NAME + '/queue.txt'\r\nCRAWLED_FILE = PROJECT_NAME + '/crawled.txt'\r\nNUMBER_OF_THREADS = 8\r\n\r\nqueue = Queue()\r\n\r\nSpider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)\r\n\r\n#start create the worker\r\ndef create_workers():\r\n for _ in range(NUMBER_OF_THREADS):\r\n t = threading.Thread(target=work)\r\n t.daemon = True\r\n t.start()\r\n \r\n#give job to them\r\ndef work():\r\n while True:\r\n url = queue.get()\r\n Spider.crawl_page(threading.current_thread().name, url)\r\n queue.task_done()\r\n\r\n\r\n# each queue link is new job (this is like a to do list)\r\ndef create_jobs():\r\n for link in file_to_set(QUEUE_FILE):\r\n queue.put(link)\r\n queue.join()\r\n crawl()\r\n\r\n\r\n\r\n#check if there any files in the queue and start crawl them\r\ndef crawl():\r\n queued_links = file_to_set(QUEUE_FILE)\r\n if len(queued_links) > 0:\r\n print(str(len(queued_links)) + ' links in the queue')\r\n create_jobs()\r\n \r\n \r\n\r\ncreate_workers()\r\ncrawl()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"37908605","text":"import time\r\nimport pandas as pd\r\nfrom pymongo import MongoClient\r\n\r\nclient = MongoClient()\r\ndb = client.practica_mongo\r\npublications = db.publications\r\n\r\n# Pregunta 1.- Listado de todas las publicaciones de un autor determinado.\r\nstart = time.time()\r\npipeline_answer_1 = [{\"$unwind\" : \"$authors\"},{ \"$match\" : { \"authors\" : \"Joachim Biskup\" }}, {\"$project\" : { \"title\" : 1 }}]\r\nanswer_1 = db.publications.aggregate(pipeline_answer_1)\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 1 is ', end - start)\r\nprint ('El listado de las publicaciones de Joachim Biskup es:')\r\nlist_answer_1 = []\r\nfor line in answer_1:\r\n list_answer_1.append(line)\r\nanswer_1_pd = pd.DataFrame.from_records(list_answer_1)\r\nprint(answer_1_pd)\r\n\r\n# Pregunta 2.- Numero de publicaciones de un autor determinado.\r\nstart = time.time()\r\npipeline_answer_2 = [{\"$unwind\" : \"$authors\"},{ \"$match\" : { \"authors\" : \"Joachim Biskup\" }}, {\"$project\" : { \"title\" : 1 }}, {\"$count\": \"title\"}]\r\nanswer_2 = db.publications.aggregate(pipeline_answer_2)\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 2 is ', end - start)\r\nprint ('El numero de publicaciones de Joachim Biskup es ')\r\nlist_answer_2 = []\r\nfor line in answer_2:\r\n list_answer_2.append(line)\r\nanswer_2_pd = pd.DataFrame.from_records(list_answer_2)\r\nprint(answer_2_pd)\r\n\r\n\r\n# Pregunta 3.- Numero de articulos en revista para el año 2017.\r\nstart = time.time()\r\nanswer_3 = db.publications.find({\"$and\": [{\"date\" : {\"$regex\": \"2017\"}}, {\"type\" :\"article\"}]}).count()\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 3 is ', end - start)\r\nprint ('El numero de articulos en revista para el anyo 2017 es ',answer_3)\r\n\r\n# Pregunta 4.- Numero de autores ocasionales, es decir, que tengan menos de 5 publicaciones en total.\r\npipeline_answer_4 = [{\"$unwind\": \"$authors\"},\r\n {\"$sortByCount\":\"$authors\"},\r\n { \"$match\": {\"count\": { \"$lt\":5}}},\r\n {\"$count\": \"authors\"}]\r\nstart = time.time()\r\nanswer_4 = db.publications.aggregate(pipeline_answer_4, allowDiskUse=True).hint({\"authors_1\"})\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 4 is ', end - start)\r\nprint ('El numero de autores con menos de 5 publicaciones es ')\r\nlist_answer_4 = []\r\nfor line in answer_4:\r\n list_answer_4.append(line)\r\nanswer_4_pd = pd.DataFrame.from_records(list_answer_4)\r\nprint(answer_4_pd)\r\n\r\n# Pregunta 5.- Numero de articulos de revista (article) y numero de articulos en congresos\r\n# (inproceedings) de los diez autores con mas publicaciones totales.\r\npipeline_answer_5 =[{\"$unwind\": \"$authors\"},\r\n {\"$group\":{\"_id\": \"$authors\",\r\n \"count_all_publications\": {\"$sum\":1},\r\n \"count_article\": {\"$sum\" : {\"$cond\" : { \"if\": { \"$eq\": [\"$type\", \"article\"]}, \"then\": 1, \"else\": 0}}},\r\n \"count_inproceedings\": {\"$sum\" : {\"$cond\" : { \"if\": { \"$eq\": [\"$type\", \"inproceedings\"]}, \"then\": 1, \"else\": 0}}}}},\r\n {\"$sort\": {\"count_all_publications\": -1}},\r\n {\"$limit\": 10}]\r\nstart = time.time()\r\nanswer_5 = db.publications.aggregate(pipeline_answer_5, allowDiskUse=True).hint({\"authors_1\"})\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 5 is ', end - start)\r\nprint ('El numero de articulos de revista y numero de articulos en congresos de los diez autores con mas publicaciones totales viene dado a continuacion:')\r\npd.set_option('display.max_columns', 10)\r\nlist_answer_5 = []\r\nfor line in answer_5:\r\n list_answer_5.append(line)\r\nanswer_5_pd = pd.DataFrame.from_records(list_answer_5)\r\nprint(answer_5_pd)\r\n\r\n# Pregunta 6.- Numero medio de autores de todas las publicaciones que tenga en su conjunto de datos.\r\npipeline_answer_6 =[{\"$project\": { \"numAuthors\": { \"$size\": \"$authors\" }}},\r\n {\"$group\":{\"_id\": \"null\",\"MeanOfAuthors\": {\"$avg\": \"$numAuthors\"}}},\r\n {\"$project\" : {\"MeanOfAuthors\":1, \"_id\":0}}]\r\nstart = time.time()\r\nanswer_6 = db.publications.aggregate(pipeline_answer_6).hint({\"$natural\" :1})\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 6 is ', end - start)\r\nprint ('El numero medio de autores de todas las mublicaciones del conjunto de datos es:')\r\nlist_answer_6 = []\r\nfor line in answer_6:\r\n list_answer_6.append(line)\r\nanswer_6_pd = pd.DataFrame.from_records(list_answer_6)\r\nprint(answer_6_pd)\r\n\r\n# Pregunta 7.- Listado de coautores de un autor (Se denomina coautor a cualquier persona que haya\r\n# firmado una publicacion).\r\npipeline_answer_7 = [{\"$project\": {'authors': 1}},\r\n {\"$match\": {\"authors\": 'Joachim Biskup'}},\r\n {\"$unwind\" : '$authors' },\r\n {\"$group\": {\"_id\":\"$authors\",\r\n \"coauthors\": { \"$addToSet\": {\"$cond\" : { \"if\": { \"$eq\": [\"$authors\", \"Joachim Biskup\"]}, \"then\": \"null\", \"else\": \"$authors\"}}}}},\r\n {\"$project\" : {\"_id\":0,\r\n \"coauthors\": { \"$cond\": {\"if\": {\"$eq\": [ \"[null]\", \"$coauthors\" ] },\"then\": \"$$REMOVE\",\"else\": \"$coauthors\"}}}}]\r\nstart = time.time()\r\nanswer_7 = db.publications.aggregate(pipeline_answer_7)\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 7 is ', end - start).hint({\"authors_1\"})\r\nprint ('El listado de los coautores de Joachim Biskup es:')\r\nlist_answer_7 = []\r\nfor line in answer_7:\r\n list_answer_7.append(line)\r\nanswer_7_pd = pd.DataFrame.from_records(list_answer_7)\r\nprint(answer_7_pd)\r\n\r\n# Pregunta 8.- Edad de los 5 autores con un periodo de publicaciones mas largo (Se considera la Edad\r\n# de un autor al numero de anyos transcurridos desde la fecha de su primera publicacion\r\n# hasta la ultima registrada).\r\npipeline_answer_8 = [{\"$unwind\" : '$authors' },\r\n {\"$group\" : {\"_id\":\"$authors\",\r\n \"max_year\" : {\"$max\" : {\"$substr\": [ \"$date\", 0, 4 ]}},\r\n \"min_year\" :{\"$min\" : {\"$substr\": [ \"$date\", 0, 4 ]}}}},\r\n {\"$addFields\": {\"max_year_int\": {\"$toInt\": \"$max_year\"},\r\n \"min_year_int\": {\"$toInt\": \"$min_year\"}}},\r\n {\"$project\" : {\"max_year\":0, \"min_year\":0}},\r\n {\"$addFields\": {\"ageAuthor\": {\"$subtract\": [\"$max_year_int\",\"$min_year_int\"]}}},\r\n {\"$sort\": {\"ageAuthor\": -1}},{\"$limit\": 5}]\r\nstart = time.time()\r\nanswer_8 = db.publications.aggregate(pipeline_answer_8, allowDiskUse=True).hint({\"date_1_authors_1\"})\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 8 is ', end - start)\r\nprint ('La edad de los 5 autores con un periodo de publicacion mas largo es:')\r\nlist_answer_8 = []\r\nfor line in answer_8:\r\n list_answer_8.append(line)\r\nanswer_8_pd = pd.DataFrame.from_records(list_answer_8)\r\nprint(answer_8_pd)\r\n\r\n# Pregunta 9.- Numero de autores novatos, es decir, que tengan una Edad menor de 5 anyos. Se\r\n# considera la Edad de un autor al numero de anyos transcurridos desde la fecha de su\r\n# primera publicacion hasta la ultima registrada\r\npipeline_answer_9 = [{\"$unwind\" : '$authors' },\r\n {\"$group\" :\r\n {\"_id\":\"$authors\",\r\n \"max_year\" : {\"$max\" : {\"$substr\": [ \"$date\", 0, 4 ]}},\r\n \"min_year\" :{\"$min\" : {\"$substr\": [ \"$date\", 0, 4 ]}}}},\r\n {\"$addFields\": { \"max_year_int\": {\"$toInt\": \"$max_year\"},\r\n \"min_year_int\": {\"$toInt\": \"$min_year\"}}},\r\n {\"$project\" : {\"max_year\":0, \"min_year\":0}},\r\n {\"$addFields\": {\"ageAuthor\": {\"$subtract\": [\"$max_year_int\",\"$min_year_int\"]}}},\r\n {\"$match\": { \"ageAuthor\": { \"$lt\": 5} }},{\"$count\": \"ageAuthor\"}]\r\nstart = time.time()\r\nanswer_9 = db.publications.aggregate(pipeline_answer_9, allowDiskUse=True).hint({\"date_1_authors_1\"})\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 9 is ', end - start)\r\nprint ('El numero de autores novatos es el siguiente:')\r\nlist_answer_9 = []\r\nfor line in answer_9:\r\n list_answer_9.append(line)\r\nanswer_9_pd = pd.DataFrame.from_records(list_answer_9)\r\nprint(answer_9_pd)\r\n\r\n# Pregunta 10.- Porcentaje de publicaciones en revistas con respecto al total de publicaciones.\r\npipeline_answer_10 = [{\"$project\": {\"type\": 1}},\r\n {\"$group\":{\"_id\":\"null\",\r\n \"count_article\": {\"$sum\": { \"$cond\" : [{ \"$eq\" : [\"$type\", \"article\"]}, 1, 0]}},\r\n \"count_total\" : {\"$sum\":1}}},{\"$project\": { \"article_percentage\" :{\"$multiply\": [100,{ \"$divide\": [ \"$count_article\", \"$count_total\"]}]}}},\r\n {\"$project\": {\"_id\":0}}]\r\nstart = time.time()\r\nanswer_10 = db.publications.aggregate(pipeline_answer_10)\r\nend = time.time()\r\nprint('The time taken, in seconds, for query number 10 is ', end - start)\r\nprint('El porcentaje de publicaciones en revistas (articles) con respecto al total de publicaciones es:')\r\nlist_answer_10 = []\r\nfor line in answer_10:\r\n list_answer_10.append(line)\r\nanswer_10_pd = pd.DataFrame.from_records(list_answer_10)\r\nprint(answer_10_pd)\r\n\r\n\r\n","sub_path":"queries_mongodb.py","file_name":"queries_mongodb.py","file_ext":"py","file_size_in_byte":9295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"256258754","text":"import numpy as np\nfrom scipy import fftpack\nfrom scipy.io import wavfile\n\n## These are helper functions\ndef loadAudioFile(filePath):\n \"\"\"\n load the audio file\n :param path: path of the wav file\n :return: a dictionary with sampling freq. and data inside the file\n \"\"\"\n samplingFrequency, data = wavfile.read(filePath)\n dimensions = data.shape\n print(dimensions)\n if len(dimensions) == 2:\n data = data[:, 0]\n print(\"de channeled \")\n dimensions = (dimensions[0],)\n signalDict = {'frequency':samplingFrequency, 'data':data, 'dim': dimensions}\n return signalDict\n\n\ndef fourierTransform(signalDict):\n \"\"\"\n apply fourier transform on the data;\n :param signalDict: a dictionary with:\n 'frequency'-- Sampling frequency\n 'data' -- Signal Data\n :return: Data Transformed with:\n 'transformedData' --- absolute of the fourier transform\n 'dataFrequencies'--- Frequencies of the signal\n \"\"\"\n signal = signalDict['data']\n samplingFrequency = signalDict['frequency']\n dim = signalDict['dim']\n if len(dim) == 2 :\n data_ft = fftpack.fft2(signal)\n data_freqs = fftpack.fftfreq(len(signal), d= 1/samplingFrequency)\n else:\n data_ft = fftpack.rfft(signal)\n data_freqs = fftpack.rfftfreq(len(signal), d=1 / samplingFrequency)\n dataDict = {'transformedData': data_ft, 'dataFrequencies': data_freqs}\n print(\"the data\", data_ft)\n\n return dataDict\n\n\ndef inverseFourierTransform(transfomerdData, dim):\n \"\"\"\n apply inverse Fourier Transform\n :param: transformedData: the fourier transformed data\n :return: Real inverse transform data\n \"\"\"\n if len(dim) == 2:\n print(\"2 dimensional inverse\")\n dataInverse = np.real(fftpack.ifft2(transfomerdData))\n else:\n dataInverse = (fftpack.irfft(transfomerdData))\n print(\"inverse\", dataInverse)\n\n return dataInverse\n\n\ndef createBands(dataDict):\n \"\"\"\n create bands for the signal\n :param dataDict: a dictionary with:\n 'transformedData' -- the absolute of the fourier transform\n 'dataFrequencies' -- frequencies present in the signal\n\n :return: array of bands within the signal\n \"\"\"\n\n freqs = dataDict['dataFrequencies']\n data = dataDict['transformedData']\n # N = len(data) // 10\n freqBands = (0, 62.5, 125, 250, 500, 10**3, 2*10**3, 4*10**3, 8*10**3, 16*10**3, len(data))\n # freqBands = [N*i for i in range(10)]\n dataBands = []\n for i in range(len(freqBands)-1):\n # bands = [indx for indx, val in enumerate(freqs) if val >= freqBands[i] and val < freqBands[i+1]] ## equal sign هه\n\n bands = []\n for indx, val in enumerate(freqs):\n if val >= freqBands[i] and val < freqBands[i + 1]:\n bands.append(indx)\n\n dataBands.append(data[bands])\n return dataBands\n\ndef windowModification(dataModified, bandIndx, gains):\n \"\"\"\n a helper function to apply window\n :param dataModified: the data to be modified\n :param bandIndx: the indicies of the band\n :param gain: the gain desired\n :return: array data\n \"\"\"\n data = np.copy(dataModified)\n for slider, value in gains.items():\n if type(value) != type(...) :\n data[slider] = np.multiply(np.array(data[slider]), value)\n else: pass\n data = np.concatenate(data)\n return data\n\n\ndef applyWindowFunction(sliderID, sliderVal, dataBands, windowType = \"Rectangle\"):\n \"\"\"\n take the value from slider and apply the window given\n\n :param sliderID: integer representing the id of slider\n :param sliderVal: the gain value\n :param dataBands: list of arrays containing the bands os signal\n :param windowType: window mode\n :return: data modified with the mode and gain\n \"\"\"\n bandIndx = sliderID\n gain = sliderVal\n dataModified = np.copy(dataBands)\n if windowType == 'Rectangle':\n dataModified = windowModification(dataModified, bandIndx, gain)\n\n if windowType == 'Hanning':\n for slider, value in gain.items():\n if type(value) != type(...) :\n hanning = np.hanning(len(dataModified[slider])*2)\n low = len(hanning) // 4\n high = 3 * low\n gain[slider] = value * hanning[low : high]\n\n dataModified = windowModification(dataModified, bandIndx, gain)\n\n if windowType == 'Hamming':\n for slider, value in gain.items():\n if type(value) != type(...) :\n # if value is int :\n hamming = np.hamming(len(dataModified[slider])*2)\n low = len(hamming) // 4\n high = 3 * low\n gain[slider] = value * hamming[low : high]\n dataModified = windowModification(dataModified, bandIndx, gain)\n return dataModified\n\n\nif __name__ == '__main__':\n # audioFile = loadAudioFile('audio/Casio-MT-45-16-Beat.wav')\n # # print(audioFile['data'])\n # fourierDict= fourierTransform(audioFile)\n # # print(fourierDict['transformedData'])\n # dataBands = createBands(fourierDict)\n # # print(dataBands)\n # mod = applyWindowFunction(1, 5, dataBands)\n # # print(mod)\n # inv = inverseFourierTransform(mod, audioFile['dim'])\n # print(type(inv))\n # for i in dataBands:\n # print(i)\n # print(np.real(np.concatenate(dataBands)))\n # print(np.real(fourierDict['transformedData']))\n #\n # print(dataBands['dataBands'])\n # dataBands[1] = applyWindowFunction(1, 2, dataBands)\n # dataBands[1] = applyWindowFunction(1, 3, dataBands)\n # dataBands[1] = applyWindowFunction(1, 4, dataBands)\n\n\n array1 = np.array([10, 100, 20])\n array2 = np.array([3, 30, 1])\n result = array1 - array2\n print(result)\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"67831489","text":"#\n# @lc app=leetcode.cn id=72 lang=python3\n#\n# [72] 编辑距离\n#\n# https://leetcode.cn/problems/edit-distance/description/\n#\n# algorithms\n# Hard (62.29%)\n# Likes: 2412\n# Dislikes: 0\n# Total Accepted: 262.6K\n# Total Submissions: 421.5K\n# Testcase Example: '\"horse\"\\n\"ros\"'\n#\n# 给你两个单词 word1 和 word2, 请返回将 word1 转换成 word2 所使用的最少操作数  。\n# \n# 你可以对一个单词进行如下三种操作:\n# \n# \n# 插入一个字符\n# 删除一个字符\n# 替换一个字符\n# \n# \n# \n# \n# 示例 1:\n# \n# \n# 输入:word1 = \"horse\", word2 = \"ros\"\n# 输出:3\n# 解释:\n# horse -> rorse (将 'h' 替换为 'r')\n# rorse -> rose (删除 'r')\n# rose -> ros (删除 'e')\n# \n# \n# 示例 2:\n# \n# \n# 输入:word1 = \"intention\", word2 = \"execution\"\n# 输出:5\n# 解释:\n# intention -> inention (删除 't')\n# inention -> enention (将 'i' 替换为 'e')\n# enention -> exention (将 'n' 替换为 'x')\n# exention -> exection (将 'n' 替换为 'c')\n# exection -> execution (插入 'u')\n# \n# \n# \n# \n# 提示:\n# \n# \n# 0 <= word1.length, word2.length <= 500\n# word1 和 word2 由小写英文字母组成\n# \n# \n#\n\n# @lc code=start\nINF = float('inf')\n\n\nclass Solution:\n def minDistance(self, word1: str, word2: str) -> int:\n l1, l2 = len(word1), len(word2)\n dp = [[INF for _ in range(l2 + 1)] for _ in range(l1 + 1)]\n\n for i in range(l1 + 1):\n for j in range(l2 + 1):\n if i == 0 and j == 0:\n dp[i][j] = 0\n continue\n\n if i != 0:\n dp[i][j] = min(dp[i][j], dp[i - 1][j] + 1)\n if j != 0:\n dp[i][j] = min(dp[i][j], dp[i][j - 1] + 1)\n if i != 0 and j != 0:\n change_move = 0 if word1[i - 1] == word2[j - 1] else 1\n dp[i][j] = min(dp[i][j], dp[i - 1][j - 1] + change_move)\n\n return dp[l1][l2]\n# @lc code=end\n","sub_path":"5/72.编辑距离.py","file_name":"72.编辑距离.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"167806423","text":"# Matthew Hopwood\n# EDF Project\n# Finding optimum Bypass ratio for max efficiency\n\nimport matplotlib.pyplot as plt\nimport isentropic as isn\nimport Turbojet as tj\nimport Turbofan as tf\n\n# Assume Ideal Expansion\n# Ambient Conditions for Sea Level\nPa = 101325\nTa = 300\n\n# Additional Givens\nM1 = 0.1711\nTo4 = 1500 # Tmax\nhc = 45752000 # j/kg -- of butane\ny1 = 1.4 # inlet and comp\ny2 = 1.35 # burner and turbine and nozzle\nR = 287 # J/kgK\nfst = 0.06\ncp1 = R / (1 - (1 / y1))\ncp2 = R / (1 - (1 / y2))\n\n# Efficiencies/ratios\nnb = 1\nrc = 1.5\nrb = 0.97\n\n'''\nnd = 0.94\nnc = 0.87\n'''\n\n# Initialize iterators/ other variables\ndelta = 0.01\nrc_list = [0]\nnmax = 0\nI_array = [0]\nnth_array = [0]\nnp_array = [0]\nno_array = [0]\n\nprint(\"Walk Through:\")\n\nwhile rc <= 10:\n # Reset To4 = 1500\n To5 = 1500\n\n # Initial Conditions\n a1 = (y1 * R * Ta) ** 0.5\n v1 = M1 * a1\n\n # Inlet/Diffuser\n To2 = Ta * isn.T_ratio(M1, y1)\n Po2 = Pa * isn.P_ratio(M1, y1)\n\n # Compressor\n Po3 = rc * Po2\n To3 = To2 * (rc ** ((y1 - 1) / y1))\n Wc_in = cp1 * (To3 - To2)\n\n # Air Straightener\n To4 = To3\n Po4 = Po3\n\n # Burner\n # Check if fb > fst\n fb = tj.fb_test(To4, To5, hc, cp2, nb)\n if fb > fst:\n print('burner Tmax change')\n fb = fst\n To5 = tj.newTo4(To4, hc, cp2, nb, fb)\n Po5 = Po4 * rb\n\n # Nozzle\n Po6 = Po5\n To6 = To5\n T6 = To5 / ((Po5 / Pa) ** ((y2 - 1) / y2))\n\n # Exit Calculations\n ae = (y2 * R * T6) ** 0.5\n Me = (((To6 / T6) - 1) * (2 / (y2 - 1))) ** 0.5\n ve = Me * ae\n\n\n # Specific Thrust\n I = tj.I_ie(fb, ve, v1)\n\n '''\n # Efficiencies\n np = tj.np\n nth = tj.nth\n no = tj.no\n '''\n\n # Add Values to arrays\n I_array.append(I)\n rc_list.append(rc)\n '''\n nth_array.append(nth)\n np_array.append(np)\n no_array.append(no)\n '''\n\n rc += delta\n\n# Remove initial zero values\nI_array.pop(0)\nrc_list.pop(0)\n'''\nnth_array.pop(0)\nnp_array.pop(0)\nno_array.pop(0)\n'''\n\nprint(ve)\n\n# I vs rc\nplt.plot(rc_list, I_array)\nplt.suptitle('I vs rc')\nplt.xlabel('rc')\nplt.ylabel('I')\nplt.show()\n\n\n'''\n# np, nth, no vs rc\nplt.plot(B_list, nth_array, label='nth', linestyle='dashed', color='red')\nplt.plot(B_list, np_array, label='np', linestyle='dashed', color='green') # marker='o'\nplt.plot(B_list, no_array, label='no', color='blue')\nplt.suptitle('nth, np, & no vs B')\nplt.xlabel('B')\nplt.ylabel('Efficiencies')\nplt.legend()\nplt.show()\n'''","sub_path":"main_tmax.py","file_name":"main_tmax.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"590106609","text":"# valueIterationAgents.py\n# -----------------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n#\n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nimport mdp, util\n\nfrom learningAgents import ValueEstimationAgent\n\n\nclass ValueIterationAgent(ValueEstimationAgent):\n \"\"\"\n * Please read learningAgents.py before reading this.*\n A ValueIterationAgent takes a Markov decision process\n (see mdp.py) on initialization and runs value iteration\n for a given number of iterations using the supplied\n discount factor.\n \"\"\"\n\n def __init__(self, mdp, discount=0.9, iterations=100):\n \"\"\"\n Your value iteration agent should take an mdp on\n construction, run the indicated number of iterations\n and then act according to the resulting policy.\n Some useful mdp methods you will use:\n mdp.getStates()\n mdp.getPossibleActions(state)\n mdp.getTransitionStatesAndProbs(state, action)\n mdp.getReward(state, action, nextState)\n mdp.isTerminal(state)\n \"\"\"\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.values = util.Counter() # A Counter is a dict with default 0\n\n # Write value iteration code here\n \"*** YOUR CODE HERE ***\"\n\n iteration = 0\n\n while (iteration < self.iterations): # loop for k iterations\n\n states = self.mdp.getStates() # get states of mdp for current iteration\n current_iteration_values = util.Counter() # final values for this iteration\n\n for i in range(len(states)): # loop on states\n\n current_state = states[i]\n\n actions = self.mdp.getPossibleActions(current_state) # get possible actions from current state\n q_value = util.Counter()\n\n if (not self.mdp.isTerminal(current_state)): # if it's not a terminal state\n\n for action in actions: # loop on the possible actions\n\n # get Q_star for each action and store in a dict\n q_value[action] = self.computeQValueFromValues(current_state, action)\n\n current_iteration_values[current_state] = max(q_value.values())\n\n self.values = current_iteration_values # apply values to each state in the current iteration\n iteration += 1 # get to the next iteration\n\n def getValue(self, state):\n \"\"\"\n Return the value of the state (computed in __init__).\n \"\"\"\n return self.values[state]\n\n def computeQValueFromValues(self, state, action):\n \"\"\"\n Compute the Q-value of action in state from the\n value function stored in self.values.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n possible_transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n\n Q_star = 0\n\n for i in range(len(possible_transitions)):\n Q_star += possible_transitions[i][1] * (\n self.mdp.getReward(state, action, possible_transitions[i][0]) + self.discount * self.values[\n possible_transitions[i][0]])\n\n return Q_star\n\n\n def computeActionFromValues(self, state):\n \"\"\"\n The policy is the best action in the given state\n according to the values currently stored in self.values.\n You may break ties any way you see fit. Note that if\n there are no legal actions, which is the case at the\n terminal state, you should return None.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n actions = self.mdp.getPossibleActions(state)\n\n if (self.mdp.isTerminal(state) or len(actions) == 0):\n return None\n\n q_values = util.Counter()\n\n for action in actions:\n q_values[action] = self.computeQValueFromValues(state, action)\n\n return q_values.argMax()\n\n\n def getPolicy(self, state):\n return self.computeActionFromValues(state)\n\n def getAction(self, state):\n \"Returns the policy at the state (no exploration).\"\n return self.computeActionFromValues(state)\n\n def getQValue(self, state, action):\n return self.computeQValueFromValues(state, action)","sub_path":"valueIterationAgents.py","file_name":"valueIterationAgents.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"561653240","text":"from PyQt5.QtWidgets import (QApplication, QPushButton,QWidget,QGridLayout,\n QSizePolicy,QLineEdit,\n QMainWindow,QAction,QVBoxLayout\n ,QDockWidget,QListView,\n QAbstractItemView,QLabel,QFileDialog,QTextEdit,\n QInputDialog)\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import (\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\nimport sys\nimport numpy as np\nfrom scipy import signal\nclass Resolution(QMainWindow):\n def __init__(self,spectrum,width,distance,font):\n '''Expects the spectrum to be a 2d list having counts in the \n first column and energy in the second, width of the peaks to detect\n in channels and distance from peak to peak in channels. Font is a\n QFont'''\n super().__init__()\n self.spectrum=spectrum\n # self.peaks,self.eres,self.l,self.r=self.find_peaks(spectrum[0],\n # width,distance)\n self.menu()\n self.showMaximized()\n self.geometry()\n self.setWindowTitle('Peak Detection and Energy Resolution')\n self.font=font\n self.show()\n \n def geometry(self):\n self.calibrated_plot=QWidget()\n layout1=QVBoxLayout()\n self.figure1=Figure()\n self.canvas1=FigureCanvas(self.figure1)\n self.toolbar1=NavigationToolbar(self.canvas1,self)\n layout1.addWidget(self.toolbar1)\n layout1.addWidget(self.canvas1)\n self.calibrated_plot.setLayout(layout1)\n self.ax1=self.canvas1.figure.subplots()\n self.ax1.set_yscale('log')\n self.ax1.set_xlabel('Energy (MeV)')\n self.ax1.set_ylabel('Counts')\n self.ax1.set_title('Peak Finder')\n self.figure1.tight_layout()\n \n main=QWidget()\n main_lay=QVBoxLayout()\n main_lay.addWidget(self.calibrated_plot)\n main.setLayout(main_lay)\n self.setCentralWidget(main)\n \n def menu(self):\n self.menuFile=self.menuBar().addMenu('&File')\n self.save_image=QAction('&Save Image',self)\n self.save_image.triggered.connect(self.saver)\n self.menuFile.addAction(self.save_image)\n \n def saver(self):\n options='Portable Network Graphics (*.png);;'\n options_='Joint Photographic Experts Group(*.jpg)'\n options=options+options_\n file_name,ok=QFileDialog.getSaveFileName(self,'Spectrum Image Save',\"\"\n ,options)\n \n if file_name and ok:\n self.figure1.savefig(file_name[0],dpi=600,figsize=(10,10))\n\n\n def find_peaks(self,x,width,distance):\n peaks, properties = signal.find_peaks(x,width=width,distance=distance)\n e_res=[]\n widths=properties['widths'] #the fwhm of the peak\n left=properties['left_ips'] #left point of the fwhm\n right=properties['right_ips'] #right point of the fwhm\n sigma=[i/(2*np.sqrt(2*np.log(2))) for i in widths] #standard deviation\n left_sig=[]\n right_sig=[]\n #recalculate the peak location based on the average fo the left and right fwhm\n for i in range(len(peaks)):\n avg=(left[i]+right[i])/2\n peaks[i]=avg\n left_sig.append(avg-4*sigma[i])\n right_sig.append(avg+4*sigma[i])\n e_res.append(widths[i]/avg*100)\n \n return peaks,e_res,left_sig,right_sig\n \nif __name__==\"__main__\":\n app=QApplication(sys.argv)\n ex=Resolution(10,1,1,1)\n sys.exit(app.exec_())\n ","sub_path":"Resolution.py","file_name":"Resolution.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"265439670","text":"# -*- coding: utf-8 -*-\n\n\nimport time\nimport logging\n\ndef retryLogger(maxRetryNbr, sleepTimeIncr):\n '''\n Décorateur pour logger proprement une fonction de maj.\n \n maxRetryNbr (int) : nombre max de retry possibles\n sleepTimeIncr (int) : temps d'attente (incrémenté par lui même) entre 2 retry\n \n '''\n \n def func_retryLogger(func):\n def func_wrapper(*args, **kwargs):\n # Récupère le logger\n logger = logging.getLogger(__name__)\n retryNbr = 0 # Nombre d'essai\n sleepTime = 0 # Temps de sleep en minutes\n success = False\n returnValue = None # Initialise la valeur de retour\n while retryNbr < maxRetryNbr and not success:\n try:\n retryNbr += 1\n returnValue = func(*args, **kwargs)\n success = True\n except Exception as e:\n # Log l'erreur s'il y a eu maxRetryNbr essais infructueux\n if retryNbr == maxRetryNbr:\n logger.error('Erreur', exc_info=True)\n else:\n logger.warning(\"Début de sleep\")\n sleepTime += sleepTimeIncr\n time.sleep(sleepTime)\n return returnValue\n return func_wrapper\n return func_retryLogger\n\n\nclass BufferingSMTPHandler(logging.handlers.BufferingHandler):\n '''\n Classe définissant un nouveau handler pour envoyer tout le log d'un \n coup par mail.\n \n La méthode flush est appelée dès que le nombre de records dépasse la \n capacity, un mail est ensuite envoyé.\n \n http://stackoverflow.com/questions/1610845/collate-output-in-python-logging-memoryhandler-with-smtphandler/1611958#1611958\n https://gist.github.com/anonymous/1379446\n \n La classe a été modifiée depuis l'exemple pour fonctionner correctement\n avec python 3 et ce qu'on voulait faire. \n \n '''\n \n def __init__(self, capacity, mailhost, fromaddr, toaddrs, subject, credentials=None, \n secure=None, timeout=5.0):\n super().__init__(capacity)\n \n # Met en forme les arguments\n if isinstance(mailhost, (list, tuple)):\n self.mailhost, self.mailport = mailhost\n else:\n self.mailhost, self.mailport = mailhost, None\n if isinstance(credentials, (list, tuple)):\n self.username, self.password = credentials\n else:\n self.username = None\n self.fromaddr = fromaddr\n if isinstance(toaddrs, str):\n toaddrs = [toaddrs]\n self.toaddrs = toaddrs\n self.subject = subject\n self.secure = secure\n self.timeout = timeout\n \n\n def flush(self):\n if len(self.buffer) > 0:\n try:\n import smtplib\n from email.message import EmailMessage\n import email.utils\n \n port = self.mailport\n if not port:\n port = smtplib.SMTP_PORT\n smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)\n msg = EmailMessage()\n msg['From'] = self.fromaddr\n msg['To'] = ','.join(self.toaddrs)\n msg['Subject'] = self.subject\n msg['Date'] = email.utils.localtime()\n \n content = ''\n for record in self.buffer:\n s = self.format(record)\n content = content + s + \"\\r\\n\"\n msg.set_content(content)\n \n if self.username:\n if self.secure is not None:\n smtp.ehlo()\n smtp.starttls(*self.secure)\n smtp.ehlo()\n smtp.login(self.username, self.password)\n smtp.send_message(msg)\n smtp.quit()\n except Exception:\n # Ligne commentée car elle génère une erreur\n # self.handleError(None)\n pass\n self.buffer = []\n\n\n\n\n\n","sub_path":"stockvider/stockviderApp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"157819365","text":"#!/usr/bin/env python3\n\nimport logging\nimport os\nimport re\nimport shutil\nimport sys\n\nfrom datetime import datetime\nfrom scandir import scandir, walk\nfrom titlecase import titlecase\n\n# As part of refactoring, I found some issues with variable scope.\n# To address this, I renamed my global variables to start with g_.\n\ng_error_log = 'cuesheet.log'\ng_file_types = ('.html', 'htm', '.md', '.doc', '.docx', '.mht', '.mhtml', '.pdf', '.txt', '.rtf')\ng_input_folder = \"/Users/stephen/Music/sd mp3/cuesheets\"\ng_html_cuesheet_output = '/Users/stephen/Projects/sd-html-cuesheets'\n\n\nlogging.basicConfig(filename=g_error_log, \n filemode='w', \n format='%(levelname)s - %(message)s', \n level='INFO')\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef create_output_folder(folder_name):\n if not os.path.exists(folder_name):\n try:\n os.makedirs(folder_name)\n \n except Exception as my_error:\n print(f'Problem opening output folder. Error: {my_error}')\n logging.error(f'Problem opening output folder. Error: {my_error}')\n sys.exit()\n\ndef duplicate_checker(file_name, destination):\n if ( os.path.exists(os.path.join(destination, file_name)) ):\n destination = output_path(g_html_cuesheet_output, '_duplicates')\n time_stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n renamed_file = time_stamp + ' ' + renamed_file\n \n if not os.path.exists(destination):\n os.makedirs(destination)\n\n return file_name, destination\n\ndef get_folder_assignment(song_name):\n \"\"\"Determines folder and creates the folder if needed.\"\"\"\n # Put each file into a folder based on the first character of the\n # song name.\n #\n # Letters will go into folders A through Z\n # Numbers will go into a folder '0-9'\n # Non-alphanumeric characters will be skipped. The first number/letter \n # will be used instead. For now, this will be the second character.\n # A future to-do is to change this to regex.\n\n folder_letter = song_name[0]\n\n if folder_letter == \"\\'\":\n folder_letter = song_name[1]\n \n if folder_letter.isalpha():\n target_folder = output_path( g_html_cuesheet_output, folder_letter.upper() )\n\n elif folder_letter.isnumeric():\n target_folder = output_path( g_html_cuesheet_output, \"0-9\" )\n\n else:\n target_folder = output_path( g_html_cuesheet_output, \"_unknown\" )\n logging.error( \"Destination unknown: {}\".format(target_folder) )\n\n if not os.path.exists(target_folder):\n os.makedirs(target_folder)\n\n return(target_folder)\n\ndef get_files(input_folder):\n \"\"\"Put all files in a folder into a list.\"\"\"\n files = []\n\n # Get all files in input directory.\n # Omit hidden linux files that start with a dot.\n for entry in scandir(input_folder):\n if (entry.is_file() and not entry.name.startswith('.')):\n if ( entry.name.endswith( g_file_types ) ):\n files.append(entry.path)\n\n return files\n\ndef get_song_info(my_filename):\n \"\"\"Return The Label, Number, and Title of a Song based on the Filename.\"\"\"\n \n # 2019-11-25 Opted to change the a-zA-Z0-9 to \\w\n\n if ( re.match( r'([\\w+&-]+)+\\s([\\w-]+)\\s-\\s([\\w0-9()\\W]*)', my_filename ) ):\n title = re.match(r'([\\w+&-]+)+\\s([\\w-]+)\\s-\\s([\\w0-9()\\W]*)', my_filename )\n\n song_name = titlecase(title.group(3)).strip()\n song_label = title.group(1)\n song_number = title.group(2).lstrip('0')\n \n elif ( re.match( r'([\\w\\d()\\W]*)-([\\w\\d()]*)-([\\d()\\W]*)', my_filename ) ):\n title = re.match(r'([\\w\\d()\\W]*)-([\\w\\d()]*)-([\\d()\\W]*)', my_filename )\n\n song_name = titlecase(title.group(1)).strip()\n song_label = title.group(2)\n song_number = title.group(3).lstrip('0')\n\n else:\n print('Filename format error: {}'.format(my_filename))\n logging.error('Filename format error: {}'.format(my_filename))\n song_name = \"Error\"\n song_label = \"Error\"\n song_number = \"Error\"\n\n return song_label, song_number, song_name\n\ndef main():\n \"\"\"Rename square dance cuesheet and move it to an appropriate folder.\"\"\"\n clear()\n\n files = get_files(g_input_folder)\n\n for file in files:\n\n # Matches: Label Number - Song Name\n # (Needs to be renamed)\n if ( re.match( r'([a-zA-Z0-9+&-]+)+\\s([a-zA-Z0-9-]+)\\s-\\s([\\w0-9()\\W]*)', os.path.basename(file)) ):\n move_and_rename(file, g_html_cuesheet_output)\n\n # Matches: Song Name-Label-Number\n # (Already formatted correctly)\n elif ( re.match( r'([\\w\\d()\\W]*)-([\\w\\d()]*)-([\\d()\\W]*)', os.path.basename(file) ) ):\n move_cuesheet(file, g_html_cuesheet_output)\n \n else:\n print(f\"File format error: {os.path.basename(file)}\")\n logging.error(f\"File format error: {os.path.basename(file)}\")\n \n print(\"Complete\")\n\ndef move_and_rename(file_in, folder_out):\n\n file_wo_ext = os.path.basename(os.path.splitext(file_in)[0])\n input_file_ext = os.path.basename(os.path.splitext(file_in)[1])\n my_label, my_label_number, my_song_title = get_song_info(file_wo_ext)\n\n # Some filenames had unexplained unicode characters in place of the standard\n # English alphabet.\n #\n # Also, titlecase did strange things to song names that were\n # abbreviations in them (like Y.M.C.A. and E.A.R.), words that should\n # be in all caps (like ABBA), and ordinal numbers such as 3rd and 4th.\n # \n # It was easier to simply put in specific use cases.\n\n # 2019-11-25: Changed these if statements to have parenthesis.\n\n if (\"∩\" in my_song_title):\n my_song_title = my_song_title.replace(\"∩\", \"'\")\n logging.warning(\"∩ found in: {}: \".format(my_song_title))\n \n if ('Abba' in my_song_title):\n my_song_title = my_song_title.replace('Abba', 'ABBA')\n logging.warning(\"Abba found in: {}: \".format(my_song_title))\n\n if (\"Y.m.c.A\" in my_song_title):\n my_song_title = my_song_title.replace(\"Y.m.c.A\", \"Y.M.C.A.\")\n logging.warning(\"Y.m.c.A found in: {}: \".format(my_song_title))\n\n if (\"Wr \" in my_song_title):\n my_song_title = my_song_title.replace('Wr ', 'We ')\n logging.warning(\"Wr found in: {}: \".format(my_song_title))\n\n if ('Ymca' in my_song_title):\n my_song_title = my_song_title.replace('Ymca', 'YMCA') \n logging.warning(\"Ymca found in: {}: \".format(my_song_title)) \n\n if (\"1,2,3, F\" in my_song_title):\n my_song_title = my_song_title.replace(\"1,2,3, F\", \"1, 2, 3 F\") \n logging.warning(\"1,2,3, F found in: {}: \".format(my_song_title)) \n\n if ('59Th' in my_song_title):\n my_song_title = my_song_title.replace('59Th', '59th')\n logging.warning(\"59Th found in: {}: \".format(my_song_title))\n\n # 2019-11-25 found more exceptions. Sad.\n\n if ('What+s' in my_song_title):\n my_song_title = my_song_title.replace(\"What+s\", \"What's\")\n logging.warning(\"What+s found in: {}: \".format(my_song_title))\n\n if (\"Can+'t\" in my_song_title):\n my_song_title = my_song_title.replace(\"Can+'t\", \"Can't\")\n logging.warning(\"What+s found in: {}: \".format(my_song_title))\n\n if ('I+m' in my_song_title):\n my_song_title = my_song_title.replace('I+m', \"I'm\")\n logging.warning(\"I+m found in: {}: \".format(my_song_title))\n \n if ('E.a.r.' in my_song_title):\n my_song_title = my_song_title.replace('E.a.r.', 'E.A.R.')\n logging.warning(\"E.a.r found in: {}: \".format(my_song_title))\n\n renamed_file = my_song_title + '-' + my_label.upper() + '-' + my_label_number + input_file_ext\n \n destination = get_folder_assignment(renamed_file)\n\n try:\n if ( os.path.exists(os.path.join(destination, renamed_file)) ):\n destination = output_path(g_input_folder, '_duplicates')\n time_stamp = datetime.now().strftime(\"(%Y-%m-%d %I-%M %p)\")\n \n renamed_file = time_stamp + ' ' + renamed_file\n \n if not os.path.exists(destination):\n os.makedirs(destination)\n\n shutil.move(file_in, output_path(destination, renamed_file))\n logging.info( f\"File moved:\\n\\t{file_in}\\nTo:\\n\\t{output_path(destination, renamed_file)}\")\n\n except Exception as my_file_error:\n print(f'Problem opening output folder. Error: {my_file_error}')\n logging.error(f'Problem opening output folder. Error: {my_file_error}')\n\ndef move_cuesheet(file_name, html_cuesheet_output):\n\n my_label, my_label_number, my_song_title = get_song_info( os.path.basename(file_name) )\n\n destination = get_folder_assignment(my_song_title)\n\n try:\n if ( os.path.exists(os.path.join(destination, file_name)) ):\n destination = output_path(g_input_folder, '_duplicates')\n\n time_stamp = datetime.now().strftime(\"(%Y-%m-%d %I-%M %p)\")\n renamed_file = time_stamp + ' ' + os.path.basename(file_name)\n\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n shutil.move(file_name, output_path(destination, os.path.basename(renamed_file)))\n logging.info(f\"File moved:\\n\\t{file_name}\\nTo:\\n\\t{output_path(destination, renamed_file)}\")\n\n except Exception as my_file_error:\n print(f'Problem opening output folder. Error: {my_file_error}')\n logging.error(f'Problem opening output folder. Error: {my_file_error}')\n\n\ndef output_path(folder_path, appendage):\n \"\"\"Appends filename to folder path.\"\"\"\n return os.path.join(folder_path, appendage)\n\nif __name__ == '__main__':\n main()","sub_path":"code/cuesheet-v5.py","file_name":"cuesheet-v5.py","file_ext":"py","file_size_in_byte":9689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"567941213","text":"import random\nHK=\"Hey kid,\"\nTH=\"is tooooooooooo big,your head hole is too big.\"\nwhile(233):\n Play1= input(\"Wana play the guessing game?Type yes or no.\")\n if Play1==\"yes\":\n M233=random.randint(0,1000000000000000000)\n while(1):\n LOL= int (input (\"Guess a number between-1~1000000000000000001\"))\n if LOL>M233:\n print (HK,LOL,TH)\n elif LOL==M233:\n print (HK,\"you found the number!\")\n break\n else:\n print(HK,LOL,\"is toooooooooooooooooooo small.\")\n\n elif Play1==\"no\":\n break\n else:\n print (\"Please type yes or no,that's what's my programer said,or you own my programer 350 million and I will quit the program.\")\nprint(\"Thanks for playing this game.\")\n","sub_path":"Game/Display.py","file_name":"Display.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"379214715","text":"'''\nGiven an integer n, return the number of trailing zeroes in n!.\n\nNote: Your solution should be in logarithmic time complexity.\n\nCredits:\n Special thanks to @ts for adding this problem and creating all test cases.\n'''\n'''\n总结各种规律,并加以试错后,发现其实就是找出n比几个5的n次方大,数学游戏……不过如果像acm不给出bad case的话,这个题就不好做了\n'''\nclass Solution:\n # @return an integer\n def trailingZeroes(self, n):\n sum = 0\n i = 1\n factor = 5 ** i\n while factor <= n:\n sum += n / factor\n i += 1\n factor = 5 ** i\n return sum\n","sub_path":"FactorialTrailingZeroes.py","file_name":"FactorialTrailingZeroes.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"61529555","text":"\n# # Implement helper classes here\n\n# class LRCMap:\n# class Node:\n# def __init__(self, left = None, right = None, center = None, data = None):\n# self.left = left\n# self.right = right\n# self.center = center\n# self.data = data\n \n\n# def __init__(self, build = False):\n\n\n# def put_data(self, key, data):\n# pass # REMOVE THIS LINE WHEN YOU START IMPLEMENTING\n\n# def get_data(self, key): # returns data for that key or None if non-existant\n# pass # REMOVE THIS LINE WHEN YOU START IMPLEMENTING\n\n\nclass Item:\n def __init__(self, key = None, data = None):\n self.key = key\n self.data = data\n\nclass HashMap:\n def __init__(self):\n self.array_length = 16\n self.hash_table = [ [ ] for _ in range(self.array_length) ]\n self.item_count = 0\n\n def __setitem__(self, key, data): # overrides/updates if already there\n index = hash(key) % self.array_length\n item = Item(key, data)\n if not self.__getitem__(key):\n self.item_count += 1\n self.hash_table[index].append(item)\n\n def __getitem__(self, key): # returns data - returns None if nothing there\n index = hash(key) % self.array_length\n for item in self.hash_table[index]:\n if item.key == key:\n return item.data\n return None\n\n def __len__(self):\n return self.item_count\n\n\n# NO IMPLEMENTATION OF EXAM SOLUTIONS BELOW THIS LINE\nif __name__ == \"__main__\":\n\n # MAKE ALL TEST CODE BELOW THIS LINE\n # AND AT THIS INDENT LEVEL!!\n\n # tm = LRCMap()\n # tm.put_data(\"lrl\", \"THIS IS THE DATA FOR KEY lrl\")\n # tm.put_data(\"lc\", \"THIS IS THE DATA FOR KEY lc\")\n # print(tm.get_data(\"lrl\"))\n # print(tm.get_data(\"lrcclc\"))\n # print(tm.get_data(\"lc\"))\n\n # tm = LRCMap(True)\n # tm.put_data(\"lrlrccr\", \"THIS IS THE DATA FOR KEY lrlrccr\")\n # tm.put_data(\"lrlrcclc\", \"THIS IS THE DATA FOR KEY lrlrcclc\")\n # print(tm.get_data(\"lrlrcclc\"))\n # print(tm.get_data(\"lrlclc\"))\n # print(tm.get_data(\"lrlrccr\"))\n\n\n hm = HashMap()\n hm[\"key_value:345\"] = \"THIS IS THE DATA FOR KEY: key_value:345\"\n hm[345] = \"THIS IS THE DATA FOR KEY: 345\"\n print(hm[345])\n print(hm[346])\n print(hm[\"key_value:345\"])\n print(len(hm))\n hm[345] = \"THIS IS THE NEW DATA FOR KEY: 345\"\n print(hm[345])\n print(len(hm))\n\n ","sub_path":"Midterms/Hlutaprof3_2020/Hlutaprof3_2020/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"425493504","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nimport re\nfrom urllib.parse import urljoin\nimport requests\nimport lxml.html\nfrom pymongo import MongoClient\n\nURL_ARTIST_DEFAULT = 'http://www.uta-net.com/artist/684/'\n\n\ndef main():\n \"\"\"\n scrape_list_page(): artist の楽曲一覧から song ページの URL を取得\n scrape_song() : 1つの song のページから曲名、アーティスト名、歌詞を取得\n extract_key() : URL からキーを取り出す\n \"\"\"\n # 楽曲一覧ページの指定\n if len(sys.argv) > 1:\n root_url = sys.argv[1]\n else:\n root_url = URL_ARTIST_DEFAULT\n\n client = MongoClient('localhost', 27017) # 第2引数はポート番号\n # scraping データベースの songs コレクションを得る(ない場合は新規作成)\n collection = client.scraping.songs \n collection.create_index('key', unique=True)\n\n session = requests.Session() # Session によって複数ページを効率よくクローリング\n response = requests.get(root_url)\n urls = scrape_list_page(response)\n \n for url in urls:\n key = extract_key(url) # URL からキーを取得\n #print(key)\n song = collection.find_one({'key': key}) # MongoDB から key に該当するデータを検索\n if not song:\n time.sleep(1)\n response = session.get(url)\n song = scrape_song(response)\n collection.insert_one(song)\n\n #print(song)\n #print(song['lyric'].replace('\\n', ' '))\n print(song['lyric'])\n\n \n \ndef scrape_list_page(response):\n \"\"\"\n パーマリンク一覧の中で、楽曲に関するものを抽出するジェネレータ関数\n ex. <td class=\"side td1\"><a href=\"/song/69260/\">...\n \"\"\"\n root = lxml.html.fromstring(response.content)\n #root.make_links_absolute(response.url) # 相対パスをすべて絶対パスに変換\n \n for a in root.cssselect('td.side.td1 a[href^=\"/song/\"]'): \n url = urljoin(response.url, a.get('href'))\n #print(url, a.text)\n yield url\n\n \n\ndef scrape_song(response):\n \"\"\"\n 引数 response から曲名、アーティスト、作詞者、作曲者、歌詞を取得\n return: dict 型\n TODO: 編曲者がいる場合の対応\n \"\"\"\n root = lxml.html.fromstring(response.content)\n song = {'url' : response.url,\n 'key' : extract_key(response.url),\n 'title' : root.cssselect('div.title h2')[0].text,\n 'artist' : root.cssselect('div.kashi_artist span[itemprop=\"byArtist name\"]')[0].text,\n 'lyricist' : root.cssselect('div.artist_etc.clearfix h4')[0].text,\n 'comporser': root.cssselect('div.artist_etc.clearfix h4')[1].text,\n }\n\n # 歌詞に <br /> を残すための処理\n # 一旦 str 型の html に戻してから <br>, <br /> を \"\\n\" に置換\n # Note: \"<br />\" は \"<br>\" にすでに変換されている\n item = lxml.html.tostring(root.cssselect('#kashi_area')[0]).decode('utf-8').replace(u\"<br>\", \"\\n\")\n lyric = lxml.html.fromstring(item).text_content()\n song['lyric'] = lyric.replace('\\u3000', ' ') # 全角スペースを半角スペースに\n\n return song\n\n\n\ndef extract_key(url):\n # URL から末尾のISBNを取り出しキーとする\n return re.search(r'/song/([0-9]+)/$', url).group(1)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scrape_song_2.py","file_name":"scrape_song_2.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"311212094","text":"'''\n MFEM example 27p\n See c++ version in the MFEM library for more detail \n'''\nimport os\nimport mfem.par as mfem\nfrom mfem.par import intArray\nfrom os.path import expanduser, join, dirname\nimport numpy as np\nfrom numpy import sin, cos, exp, sqrt, pi, abs, array, floor, log, arcsin\n\nfrom mpi4py import MPI\nnum_procs = MPI.COMM_WORLD.size\nmyid = MPI.COMM_WORLD.rank\nsmyid = '.'+'{:0>6d}'.format(myid)\n\n\ndef generate_serial_mesh(ref, a_):\n\n mesh = mfem.Mesh(2, 29, 16, 24, 2)\n\n for i in range(2):\n o = 13 * i\n mesh.AddQuad([o + 0, o + 3, o + 4, o + 1])\n mesh.AddQuad([o + 1, o + 4, o + 5, o + 2])\n mesh.AddQuad([o + 5, o + 8, o + 9, o + 2])\n mesh.AddQuad([o + 8, o + 12, o + 15, o + 9])\n mesh.AddQuad([o + 11, o + 14, o + 15, o + 12])\n mesh.AddQuad([o + 10, o + 13, o + 14, o + 11])\n mesh.AddQuad([o + 6, o + 13, o + 10, o + 7])\n mesh.AddQuad([o + 0, o + 6, o + 7, o + 3])\n\n mesh.AddBdrSegment([0, 6], 1)\n mesh.AddBdrSegment([6, 13], 1)\n mesh.AddBdrSegment([13, 19], 1)\n mesh.AddBdrSegment([19, 26], 1)\n\n mesh.AddBdrSegment([28, 22], 2)\n mesh.AddBdrSegment([22, 15], 2)\n mesh.AddBdrSegment([15, 9], 2)\n mesh.AddBdrSegment([9, 2], 2)\n\n for i in range(2):\n o = 13 * i\n mesh.AddBdrSegment([o+7, o+3], 3 + i)\n mesh.AddBdrSegment([o+10, o+7], 3 + i)\n mesh.AddBdrSegment([o+11, o+10], 3 + i)\n mesh.AddBdrSegment([o+12, o+11], 3 + i)\n mesh.AddBdrSegment([o+8, o+12], 3 + i)\n mesh.AddBdrSegment([o+5, o+8], 3 + i)\n mesh.AddBdrSegment([o+4, o+5], 3 + i)\n mesh.AddBdrSegment([o+3, o+4], 3 + i)\n\n a = a_ / sqrt(2)\n mesh.AddVertex([-1.0, -0.5])\n mesh.AddVertex([-1.0, 0.0])\n mesh.AddVertex([-1.0, 0.5])\n\n mesh.AddVertex([-0.5 - a, -a])\n mesh.AddVertex([-0.5 - a, 0.0])\n mesh.AddVertex([-0.5 - a, a])\n\n mesh.AddVertex([-0.5, -0.5])\n mesh.AddVertex([-0.5, -a])\n mesh.AddVertex([-0.5, a])\n mesh.AddVertex([-0.5, 0.5])\n\n mesh.AddVertex([-0.5 + a, -a])\n mesh.AddVertex([-0.5 + a, 0.0])\n mesh.AddVertex([-0.5 + a, a])\n\n mesh.AddVertex([.0, -0.5])\n mesh.AddVertex([.0, 0.0])\n mesh.AddVertex([.0, 0.5])\n\n mesh.AddVertex([0.5 - a, -a])\n mesh.AddVertex([0.5 - a, 0.0])\n mesh.AddVertex([0.5 - a, a])\n\n mesh.AddVertex([0.5, -0.5])\n mesh.AddVertex([0.5, -a])\n mesh.AddVertex([0.5, a])\n mesh.AddVertex([0.5, 0.5])\n\n mesh.AddVertex([0.5 + a, -a])\n mesh.AddVertex([0.5 + a, 0.0])\n mesh.AddVertex([0.5 + a, a])\n\n mesh.AddVertex([1.0, -0.5])\n mesh.AddVertex([1.0, 0.0])\n mesh.AddVertex([1.0, 0.5])\n\n mesh.FinalizeTopology()\n\n mesh.SetCurvature(1, True)\n\n # Stitch the ends of the stack together\n # In Python, we just fill list like this\n v2v = [i for i in range(mesh.GetNV() - 3)]\n v2v.append(0)\n v2v.append(1)\n v2v.append(2)\n\n # renumber elements\n for i in range(mesh.GetNE()):\n el = mesh.GetElement(i)\n nv = el.GetNVertices()\n # We need to re-write the vertex data.\n # el.GetVertices returns *int. So we put it into intArray\n # to access its element\n v = mfem.intArray([el.GetVertices(), nv])\n for j in range(nv):\n v[j] = v2v[v[j]]\n\n # renumber boundary elements\n for i in range(mesh.GetNBE()):\n el = mesh.GetBdrElement(i)\n nv = el.GetNVertices()\n v = mfem.intArray([el.GetVertices(), nv])\n for j in range(nv):\n v[j] = v2v[v[j]]\n\n mesh.RemoveUnusedVertices()\n mesh.RemoveInternalBoundaries()\n\n mesh.SetCurvature(3, True)\n\n for l in range(ref):\n mesh.UniformRefinement()\n\n sdim = mesh.SpaceDimension()\n\n def quad_trans(u, v, write=False):\n a = a_\n d = 4.0 * a * (sqrt(2) - 2 * a) * (1.0 - 2.0 * v)\n v0 = ((1.0 + sqrt(2)) * (sqrt(2) * a - 2.0 * v) *\n ((4.0 - 3 * sqrt(2)) * a +\n (8.0 * (sqrt(2) - 1.0) * a - 2.0) * v) / d)\n\n r = (2.0 * ((sqrt(2) - 1.0) * a * a * (1.0 - 4.0 * v) +\n 2.0 * (1.0 + sqrt(2) *\n (1.0 + 2.0 * (2.0 * a - sqrt(2) - 1.0) * a)) * v * v\n ) / d)\n\n t = arcsin(v / r) * u / v\n if write:\n print(\"u, v, r, v0, t \" +\n \"{:g}\".format(u) + \" \" +\n \"{:g}\".format(v) + \" \" +\n \"{:g}\".format(r) + \" \" +\n \"{:g}\".format(v0) + \" \" +\n \"{:g}\".format(t))\n\n x = r * sin(t)\n y = r * cos(t) - v0\n\n return x, y\n\n class cTrans(mfem.VectorPyCoefficient):\n def __init__(self):\n mfem.VectorPyCoefficient.__init__(self, sdim)\n\n def EvalValue(self, u):\n tol = 1e-4\n x = u*0\n if u[1] > 0.5 - tol or u[1] < -0.5 + tol:\n return u\n if (u[0] > 1.0 - tol or u[0] < -1.0 + tol or abs(u[0]) < tol):\n return u\n if u[0] > 0.0:\n if u[1] > abs(u[0] - 0.5):\n x0, x1 = quad_trans(u[0] - 0.5, u[1])\n x[0] = x0 + 0.5\n x[1] = x1\n return x\n if u[1] < -abs(u[0] - 0.5):\n x0, x1 = quad_trans(u[0] - 0.5, -u[1])\n x[0] = x0 + 0.5\n x[1] = -x1\n return x\n if u[0] - 0.5 > abs(u[1]):\n x1, x0 = quad_trans(u[1], u[0] - 0.5)\n x[0] = x0+0.5\n x[1] = x1\n return x\n if u[0] - 0.5 < -abs(u[1]):\n x1, x0 = quad_trans(u[1], 0.5 - u[0])\n x[0] = -x0\n x[0] = x[0] + 0.5\n x[1] = x1\n return x\n else:\n pass\n else:\n if u[1] > abs(u[0] + 0.5):\n x0, x1 = quad_trans(u[0] + 0.5, u[1])\n x[0] = x0 - 0.5\n x[1] = x1\n return x\n if u[1] < -abs(u[0] + 0.5):\n x0, x1 = quad_trans(u[0] + 0.5, -u[1])\n x[0] = x0 - 0.5\n x[1] = -x1\n return x\n if u[0] + 0.5 > abs(u[1]):\n x1, x0 = quad_trans(u[1], u[0] + 0.5)\n x[0] = x0 - 0.5\n x[1] = x1\n return x\n if u[0] + 0.5 < -abs(u[1]):\n x1, x0 = quad_trans(u[1], -0.5 - u[0])\n x[0] = -x0\n x[0] = x[0] - 0.5\n x[1] = x1\n return x\n x = u\n return x\n\n mesh.Transform(cTrans())\n\n return mesh\n\n\ndef IntegrateBC(x, bdr, alpha, beta, gamma):\n nrm = 0.0\n avg = 0.0\n err = 0.0\n\n a_is_zero = (alpha == 0.0)\n b_is_zero = (beta == 0.0)\n\n fes = x.ParFESpace()\n assert fes.GetVDim() == 1, \"\"\n\n mesh = fes.GetParMesh()\n shape = mfem.Vector()\n loc_dofs = mfem.Vector()\n w_nor = mfem.Vector()\n dshape = mfem.DenseMatrix()\n\n dof_ids = mfem.intArray()\n\n battrs = mesh.GetBdrAttributeArray()\n\n for i in range(mesh.GetNBE()):\n if bdr[battrs[i]-1] == 0:\n continue\n\n FTr = mesh.GetBdrFaceTransformations(i)\n if FTr is None:\n continue\n\n fe = fes.GetFE(FTr.Elem1No)\n assert fe.GetMapType() == mfem.FiniteElement.VALUE, \"\"\n\n int_order = 2*fe.GetOrder() + 3\n\n ir = mfem.IntRules.Get(FTr.GetGeometryType(), int_order)\n\n dof_ids = fes.GetElementDofs(FTr.Elem1No)\n\n x.GetSubVector(mfem.intArray(dof_ids), loc_dofs)\n if not a_is_zero:\n sdim = FTr.Face.GetSpaceDim()\n w_nor.SetSize(sdim)\n dshape.SetSize(fe.GetDof(), sdim)\n if not b_is_zero:\n shape.SetSize(fe.GetDof())\n\n for j in range(ir.GetNPoints()):\n ip = ir.IntPoint(j)\n eip = mfem.IntegrationPoint()\n FTr.Loc1.Transform(ip, eip)\n FTr.Face.SetIntPoint(ip)\n face_weight = FTr.Face.Weight()\n val = 0.0\n if not a_is_zero:\n FTr.Elem1.SetIntPoint(eip)\n fe.CalcPhysDShape(FTr.Elem1, dshape)\n mfem.CalcOrtho(FTr.Face.Jacobian(), w_nor)\n val += alpha * \\\n dshape.InnerProduct(w_nor, loc_dofs) / face_weight\n if not b_is_zero:\n fe.CalcShape(eip, shape)\n val += beta * (shape * loc_dofs)\n\n # Measure the length of the boundary\n nrm += ip.weight * face_weight\n\n # Integrate alpha * n.Grad(x) + beta * x\n avg += val * ip.weight * face_weight\n\n # Integrate |alpha * n.Grad(x) + beta * x - gamma|^2\n val -= gamma\n err += (val*val) * ip.weight * face_weight\n\n # Normalize by the length of the boundary\n glb_nrm = MPI.COMM_WORLD.allreduce(nrm, op=MPI.SUM)\n glb_avg = MPI.COMM_WORLD.allreduce(avg, op=MPI.SUM)\n glb_err = MPI.COMM_WORLD.allreduce(err, op=MPI.SUM)\n\n if abs(glb_nrm) > 0.0:\n glb_err /= glb_nrm\n glb_avg /= glb_nrm\n\n # Compute l2 norm of the error in the boundary condition (negative\n # quadrature weights may produce negative 'err')\n glb_err = sqrt(abs(glb_err))\n\n # Return the average value of alpha * n.Grad(x) + beta * x\n return glb_err, glb_avg\n\n\ndef run(order=1,\n h1=True,\n sigma=-1.0,\n kappa=-1.0,\n ser_ref_levels=2,\n par_ref_levels=1,\n mat_val=1.0,\n dbc_val=0.0,\n nbc_val=1.0,\n rbc_a_val=1.0,\n rbc_b_val=1.0,\n a_=0.2,\n visualization=True):\n\n device = mfem.Device('cpu')\n if myid == 0:\n device.Print()\n\n if kappa < 0 and not h1:\n kappa = (order+1.0)**2\n if a_ < 0.01:\n print(\"Hole radius too small, resetting to 0.01.\")\n a_ = 0.01\n if a_ > 0.49:\n print(\"Hole radius too large, resetting to 0.49.\")\n a_ = 0.49\n\n # 2. Construct the (serial) mesh and refine it if requested.\n mesh = generate_serial_mesh(ser_ref_levels, a_)\n dim = mesh.Dimension()\n\n pmesh = mfem.ParMesh(MPI.COMM_WORLD, mesh)\n for i in range(par_ref_levels):\n pmesh.UniformRefinement()\n\n # 3. Define a finite element space on the serial mesh. Here we use either\n # continuous Lagrange finite elements or discontinuous Galerkin finite\n # elements of the specified order.\n fec = (mfem.H1_FECollection(order, dim) if h1 else\n mfem.DG_FECollection(order, dim))\n fespace = mfem.ParFiniteElementSpace(pmesh, fec)\n size = fespace.GlobalTrueVSize()\n if myid == 0:\n print(\"Number of finite element unknowns: \" + str(size))\n\n # 4. Create \"marker arrays\" to define the portions of boundary associated\n # with each type of boundary condition. These arrays have an entry\n # corresponding to each boundary attribute. Placing a '1' in entry i\n # marks attribute i+1 as being active, '0' is inactive.\n nbc_bdr = mfem.intArray(pmesh.bdr_attributes.Max())\n rbc_bdr = mfem.intArray(pmesh.bdr_attributes.Max())\n dbc_bdr = mfem.intArray(pmesh.bdr_attributes.Max())\n\n nbc_bdr.Assign(0)\n nbc_bdr[0] = 1\n rbc_bdr.Assign(0)\n rbc_bdr[1] = 1\n dbc_bdr.Assign(0)\n dbc_bdr[2] = 1\n\n ess_tdof_list = mfem.intArray()\n if h1 and pmesh.bdr_attributes.Size():\n # For a continuous basis the linear system must be modified to enforce an\n # essential (Dirichlet) boundary condition. In the DG case this is not\n # necessary as the boundary condition will only be enforced weakly.\n fespace.GetEssentialTrueDofs(dbc_bdr, ess_tdof_list)\n\n # 5. Setup the various coefficients needed for the Laplace operator and the\n # various boundary conditions. In general these coefficients could be\n # functions of position but here we use only constants.\n matCoef = mfem.ConstantCoefficient(mat_val)\n dbcCoef = mfem.ConstantCoefficient(dbc_val)\n nbcCoef = mfem.ConstantCoefficient(nbc_val)\n rbcACoef = mfem.ConstantCoefficient(rbc_a_val)\n rbcBCoef = mfem.ConstantCoefficient(rbc_b_val)\n\n # Since the n.Grad(u) terms arise by integrating -Div(m Grad(u)) by parts we\n # must introduce the coefficient 'm' into the boundary conditions.\n # Therefore, in the case of the Neumann BC, we actually enforce m n.Grad(u)\n # = m g rather than simply n.Grad(u) = g.\n m_nbcCoef = mfem.ProductCoefficient(matCoef, nbcCoef)\n m_rbcACoef = mfem.ProductCoefficient(matCoef, rbcACoef)\n m_rbcBCoef = mfem.ProductCoefficient(matCoef, rbcBCoef)\n\n # 6. Define the solution vector u as a finite element grid function\n # corresponding to fespace. Initialize u with initial guess of zero.\n u = mfem.ParGridFunction(fespace)\n u.Assign(0.0)\n\n # 7. Set up the bilinear form a(.,.) on the finite element space\n # corresponding to the Laplacian operator -Delta, by adding the Diffusion\n # domain integrator.\n a = mfem.ParBilinearForm(fespace)\n a.AddDomainIntegrator(mfem.DiffusionIntegrator(matCoef))\n if h1:\n # Add a Mass integrator on the Robin boundary\n a.AddBoundaryIntegrator(mfem.MassIntegrator(m_rbcACoef), rbc_bdr)\n else:\n # Add the interfacial portion of the Laplace operator\n a.AddInteriorFaceIntegrator(mfem.DGDiffusionIntegrator(matCoef,\n sigma, kappa))\n\n # Counteract the n.Grad(u) term on the Dirichlet portion of the boundary\n a.AddBdrFaceIntegrator(mfem.DGDiffusionIntegrator(matCoef, sigma, kappa),\n dbc_bdr)\n\n # Augment the n.Grad(u) term with a*u on the Robin portion of boundary\n a.AddBdrFaceIntegrator(\n mfem.BoundaryMassIntegrator(m_rbcACoef), rbc_bdr)\n a.Assemble()\n\n # 8. Assemble the linear form for the right hand side vector.\n b = mfem.ParLinearForm(fespace)\n\n if h1:\n # Set the Dirichlet values in the solution vector\n u.ProjectBdrCoefficient(dbcCoef, dbc_bdr)\n\n # Add the desired value for n.Grad(u) on the Neumann boundary\n b.AddBoundaryIntegrator(mfem.BoundaryLFIntegrator(m_nbcCoef), nbc_bdr)\n\n # Add the desired value for n.Grad(u) + a*u on the Robin boundary\n b.AddBoundaryIntegrator(mfem.BoundaryLFIntegrator(m_rbcBCoef), rbc_bdr)\n else:\n # Add the desired value for the Dirichlet boundary\n b.AddBdrFaceIntegrator(mfem.DGDirichletLFIntegrator(dbcCoef, matCoef,\n sigma, kappa),\n dbc_bdr)\n\n # Add the desired value for n.Grad(u) on the Neumann boundary\n b.AddBdrFaceIntegrator(mfem.BoundaryLFIntegrator(m_nbcCoef), nbc_bdr)\n\n # Add the desired value for n.Grad(u) + a*u on the Robin boundary\n b.AddBdrFaceIntegrator(mfem.BoundaryLFIntegrator(m_rbcBCoef), rbc_bdr)\n\n b.Assemble()\n\n # 9. Construct the linear system.\n A = mfem.OperatorPtr()\n B = mfem.Vector()\n X = mfem.Vector()\n a.FormLinearSystem(ess_tdof_list, u, b, A, X, B)\n\n # 10. Define a simple symmetric Gauss-Seidel preconditioner and use it to\n # solve the system AX=B with PCG in the symmetric case, and GMRES in the\n # non-symmetric one.\n amg = mfem.HypreBoomerAMG()\n if sigma == -1.0:\n pcg = mfem.HyprePCG(MPI.COMM_WORLD)\n pcg.SetTol(1e-12)\n pcg.SetMaxIter(200)\n pcg.SetPrintLevel(2)\n pcg.SetPreconditioner(amg)\n pcg.SetOperator(A.Ptr())\n pcg.Mult(B, X)\n else:\n gmres.GMRESSolver(MPI.COMM_WORLD)\n gmres.SetAbsTol(0.0)\n gmres.SetRelTol(1e-12)\n gmres.SetMaxIter(200)\n gmres.SetKDim(10)\n gmres.SetPrintLevel(1)\n gmres.SetPreconditioner(amg)\n gmres.SetOperator(A.Ptr())\n gmres.Mult(B, X)\n\n # 12. Recover the grid function corresponding to U. This is the local finite\n # element solution.\n a.RecoverFEMSolution(X, b, u)\n\n # 13. Build a mass matrix to help solve for n.Grad(u) where 'n' is a surface\n # normal.\n m = mfem.ParBilinearForm(fespace)\n m.AddDomainIntegrator(mfem.MassIntegrator())\n m.Assemble()\n\n ess_tdof_list.SetSize(0)\n M = mfem.OperatorPtr()\n m.FormSystemMatrix(ess_tdof_list, M)\n\n # 14. Compute the various boundary integrals.\n if myid == 0:\n print(\"Verifying boundary conditions\" +\n \"=============================\")\n\n # Integrate the solution on the Dirichlet boundary and compare to the\n # expected value.\n def print0(*args):\n if myid == 0:\n print(*args)\n\n err, avg = IntegrateBC(u, dbc_bdr, 0.0, 1.0, dbc_val)\n\n hom_dbc = (dbc_val == 0.0)\n err /= 1.0 if hom_dbc else abs(dbc_val)\n print0(\"Average of solution on Gamma_dbc:\\t\" +\n \"{:g}\".format(avg) + \", \\t\" +\n (\"absolute\" if hom_dbc else \"relative\") +\n \" error \" + \"{:g}\".format(err))\n\n # Integrate n.Grad(u) on the inhomogeneous Neumann boundary and compare\n # to the expected value.\n err, avg = IntegrateBC(u, nbc_bdr, 1.0, 0.0, nbc_val)\n\n hom_nbc = (nbc_val == 0.0)\n err /= 1.0 if hom_nbc else abs(nbc_val)\n print0(\"Average of n.Grad(u) on Gamma_ndbc:\\t\" +\n \"{:g}\".format(avg) + \", \\t\" +\n (\"absolute\" if hom_nbc else \"relative\") +\n \" error \" + \"{:g}\".format(err))\n\n # Integrate n.Grad(u) on the homogeneous Neumann boundary and compare to\n # the expected value of zero.\n nbc0_bdr = mfem.intArray(mesh.bdr_attributes.Max())\n nbc0_bdr.Assign(0)\n nbc0_bdr[3] = 1\n\n err, avg = IntegrateBC(u, nbc0_bdr, 1.0, 0.0, 0.0)\n hom_nbc = True\n print0(\"Average of n.Grad(u) on Gamma_ndbc0:\\t\" +\n \"{:g}\".format(avg) + \", \\t\" +\n (\"absolute\" if hom_nbc else \"relative\") +\n \" error \" + \"{:g}\".format(err))\n\n # Integrate n.Grad(u) + a * u on the Robin boundary and compare to the\n # expected value.\n err, avg = IntegrateBC(u, rbc_bdr, 1.0, rbc_a_val, rbc_b_val)\n\n hom_rbc = (rbc_b_val == 0.0)\n err /= 1.0 if hom_rbc else abs(nbc_val)\n print0(\"Average of n.Grad(u)+a*u on Gamma_rdbc:\\t\" +\n \"{:g}\".format(avg) + \", \\t\" +\n (\"absolute\" if hom_rbc else \"relative\") +\n \" error \" + \"{:g}\".format(err))\n\n # 15. Save the refined mesh and the solution. This output can be viewed\n # later using GLVis: \"glvis -m refined.mesh -g sol.gf\".\n pmesh.Print(\"mesh\"+smyid, 8)\n u.Save(\"sol\"+smyid, 8)\n\n # 16. Send the solution by socket to a GLVis server.\n if visualization:\n title_str = \"H1\" if h1 else \"DG\"\n sol_sock = mfem.socketstream(\"localhost\", 19916)\n sol_sock << \"parallel \" << num_procs << \" \" << myid << \"\\n\"\n sol_sock.precision(8)\n sol_sock << \"solution\\n\" << mesh << u\n sol_sock << \"window_title '\" << title_str << \" Solution'\"\n sol_sock << \" keys 'mmc'\"\n sol_sock.flush()\n\n\nif __name__ == \"__main__\":\n from mfem.common.arg_parser import ArgParser\n\n parser = ArgParser(description='Ex27 (Laplace boundary conditionss)')\n\n parser.add_argument(\"-h1\", \"--continuous\",\n action='store_true', default=True,\n help='Select continuous \"H1\" element')\n parser.add_argument(\"-dg\", \"--discontinuous\",\n action='store_true', default=False,\n help='Select continuous \"DG\" element')\n parser.add_argument('-o', '--order',\n action='store', default=1, type=int,\n help=\"Finite element order (polynomial degree)\")\n parser.add_argument(\"-s\", \"--sigma\",\n action='store', default=-1.0, type=float,\n help=\"One of the two DG penalty parameters, typically +1/-1.\" +\n \" See the documentation of class DGDiffusionIntegrator.\")\n parser.add_argument(\"-k\", \"--kappa\",\n action='store', default=-1.0, type=float,\n help=\"One of the two DG penalty parameters, should be positive.\" +\n \" Negative values are replaced with (order+1)^2.\")\n parser.add_argument(\"-rs\", \"--refine-serial\",\n action='store', default=2, type=int,\n help=\"Number of times to refine the mesh uniformly in serial.\")\n parser.add_argument(\"-rp\", \"--refine-parallel\",\n action='store', default=1, type=int,\n help=\"Number of times to refine the mesh uniformly in parallel.\")\n parser.add_argument(\"-mat\", \"--material-value\",\n action='store', default=1.0, type=float,\n help=\"Constant value for material coefficient \" +\n \"in the Laplace operator.\")\n parser.add_argument(\"-dbc\", \"--dirichlet-value\",\n action='store', default=0.0, type=float,\n help=\"Constant value for Dirichlet Boundary Condition.\")\n parser.add_argument(\"-nbc\", \"--neumann-value\",\n action='store', default=1.0, type=float,\n help=\"Constant value for Neumann Boundary Condition.\")\n parser.add_argument(\"-rbc-a\", \"--robin-a-value\",\n action='store', default=1.0, type=float,\n help=\"Constant 'a' value for Robin Boundary Condition. \" +\n \"du/dn + a * u = b.\")\n parser.add_argument(\"-rbc-b\", \"--robin-b-value\",\n action='store', default=1.0, type=float,\n help=\"Constant 'b' value for Robin Boundary Condition: \" +\n \"du/dn + a * u = b.\")\n parser.add_argument(\"-a\", \"--radius\",\n action='store', default=0.2, type=float,\n help=\"Radius of holes in the mesh.\")\n parser.add_argument('-vis', '--visualization',\n action='store_true', default=True,\n help='Enable GLVis visualization')\n parser.add_argument('-no-vis', '--no_visualization',\n action='store_true', default=False,\n help='Disable GLVis visualization')\n\n args = parser.parse_args()\n\n h1 = args.continuous\n if args.discontinuous:\n h1 = False\n args.continuous = False\n vis = True\n if args.no_visualization:\n vis = False\n args.visualization = False\n\n if myid == 0:\n parser.print_options(args)\n run(order=args.order,\n h1=h1,\n sigma=args.sigma,\n kappa=args.kappa,\n ser_ref_levels=args.refine_serial,\n par_ref_levels=args.refine_parallel,\n mat_val=args.material_value,\n dbc_val=args.dirichlet_value,\n nbc_val=args.neumann_value,\n rbc_a_val=args.robin_a_value,\n rbc_b_val=args.robin_b_value,\n a_=args.radius,\n visualization=vis)\n","sub_path":"examples/ex27p.py","file_name":"ex27p.py","file_ext":"py","file_size_in_byte":23045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"504255888","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport paho.mqtt.client as mqtt\nimport RPi.GPIO as gpio\n\ndef gpioSetup():\n\t\n\tgpio.setmode(gpio.BCM)\n\tgpio.setup(23, gpio.OUT)\n\ndef connectionStatus(client, userdata, flags, rc):\n\tmqttClient.subscribe(\"rpi/gpio\")\n\ndef messageDecoder(client, userdata, msg):\n\tmessage = msg.payload.decode(encoding='UTF-8')\n\t\n\tif message == \"on\":\n\t\tgpio.output(23, gpio.HIGH)\n\t\tprint(\"LED is ON!\")\n\telif message == \"off\":\n\t\tgpio.output(23, gpio.LOW)\n\t\tprint(\"LED is OFF!\")\n\telse:\n\t\tprint(\"Unknown message!\")\n\ngpioSetup()\n\nclientName = \"RPI\"\nserverAddress = \"192.168.1.116\"\n\nmqttClient = mqtt.Client(clientName)\n\nmqttClient.on_connect = connectionStatus\nmqttClient.on_message = messageDecoder\n\nmqttClient.connect(serverAddress)\nmqttClient.loop_forever()\n\n","sub_path":"PythonCode/SweetHome.py","file_name":"SweetHome.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"71509607","text":"#!/usr/bin/env python3\nimport os\nimport json\nfrom pathlib import Path\n\nimport troposphere.s3 as s3\nfrom troposphere import Template, GetAtt, Join, Ref, Output, Select, Split\nfrom troposphere.apigateway import RestApi, Method\nfrom troposphere.apigateway import Resource, MethodResponse\nfrom troposphere.apigateway import Integration, IntegrationResponse\nfrom troposphere.apigateway import Deployment, Stage\nfrom troposphere.iam import Role, Policy\nfrom troposphere.awslambda import Function, Code\nfrom troposphere.dynamodb import (KeySchema, AttributeDefinition, ProvisionedThroughput)\nfrom troposphere.dynamodb import Table\nfrom troposphere.s3 import *\nimport troposphere.cloudfront as cf\nfrom troposphere.certificatemanager import Certificate, DomainValidationOption\nfrom troposphere.route53 import RecordSetType, AliasTarget\n\n# Function that saves the file , but makes sure it exists first.\ndef save_to_file(template, settings_file_path='./cloudformation/template.json'):\n # Create settings file if it doesn't exist:\n settings_file = Path(settings_file_path)\n if settings_file.is_file():\n with open(settings_file_path, 'w+') as outfile:\n json.dump(template, outfile, indent=2)\n else:\n with open(settings_file_path, 'a'):\n os.utime(settings_file_path, None)\n with open(settings_file_path, 'w+') as outfile:\n json.dump(template, outfile, indent=2)\n\n\napp_group = \"Trent-Lab-Cloud-Resume-Challenge\"\napp_group_l = app_group.lower()\napp_group_ansi = app_group_l.replace(\"-\", \"\")\ncfront_zone_id = 'Z2FDTNDATAQYW2'\nenv = 'Staging-A'\nenv_l = env.lower()\n\n# Create Default Tags\nDefaultTags = Tags(Business='YIT') + \\\n Tags(Lab='True')\n\n# Set template variables\nstage_name = 'v1'\n\n##### Dynamo Variables\nreadunits = 5\nwriteunits = 5\n# The name of the table\nhashkeyname = 'counters'\n# Table data type (N is for number/integer)\nhashkeytype = 'N'\n\ndns_domain = 'trentnielsen.me'\n\n# Prepare Template\nt = Template()\nt.set_description('YIT: {}'.format(app_group))\n\n################################################################################################################\n# CloudFront and S3 for static hosting\n################################################################################################################\n\nredirect_domains = {\n 'resume.trentnielsen.me': {\n 'zone_name': 'trentnielsen.me',\n 'redirect_target': 'resume.trentnielsen.me',\n 'alt_sub': 'www',\n },\n}\n\nfor src_domain, domain_info in redirect_domains.items():\n\n bucketResourceName = \"{}0{}0Bucket\".format(app_group_ansi, src_domain.replace('.', '0'))\n\n redirectBucket = t.add_resource(Bucket(\n bucketResourceName,\n BucketName='{}'.format(src_domain),\n Tags=DefaultTags + Tags(Component='{}'.format(src_domain)),\n WebsiteConfiguration=(s3.WebsiteConfiguration(\n IndexDocument='index.html'\n ))\n ))\n\n # Set some cdn based values and defaults\n dns_domain = domain_info['zone_name']\n cdn_domain = '{}'.format(src_domain)\n max_ttl = 31536000,\n default_ttl = 86400,\n\n # If an alt_sub domain is not specified use empty string\n if domain_info['alt_sub'] != '':\n alternate_name = '{}.{}'.format(domain_info['alt_sub'], src_domain)\n else:\n alternate_name = ''\n\n # Provision certificate for CDN\n cdnCertificate = t.add_resource(Certificate(\n 'cdnCertificate{}'.format(src_domain.replace('.', '0')),\n DomainName=cdn_domain,\n DependsOn=redirectBucket,\n SubjectAlternativeNames=[alternate_name],\n DomainValidationOptions=[DomainValidationOption(\n DomainName=cdn_domain,\n ValidationDomain=dns_domain\n )],\n ValidationMethod='DNS',\n Tags=DefaultTags + Tags(Name='{}-{}'.format(env_l, app_group_l))\n ))\n\n # Provision the CDN Origin\n cdnOrigin = cf.Origin(\n Id='{}-{}-{}'.format(env_l, app_group_l, src_domain),\n DomainName=Select(1, Split('//', GetAtt(redirectBucket, 'WebsiteURL'))),\n CustomOriginConfig=cf.CustomOriginConfig(\n HTTPPort=80,\n HTTPSPort=443,\n OriginProtocolPolicy='http-only',\n OriginSSLProtocols=['TLSv1.2'],\n )\n )\n\n # Provision the CDN Distribution\n cdnDistribution = t.add_resource(cf.Distribution(\n 'cdnDistribution{}'.format(src_domain.replace('.', '0')),\n DependsOn='cdnCertificate{}'.format(src_domain.replace('.', '0')),\n DistributionConfig=cf.DistributionConfig(\n Comment='{} - {}'.format(env, cdn_domain),\n Enabled=True,\n PriceClass='PriceClass_All',\n HttpVersion='http2',\n Origins=[\n cdnOrigin,\n ],\n Aliases=[cdn_domain, alternate_name],\n ViewerCertificate=cf.ViewerCertificate(\n AcmCertificateArn=Ref(cdnCertificate),\n SslSupportMethod='sni-only',\n MinimumProtocolVersion='TLSv1.2_2018',\n ),\n DefaultCacheBehavior=cf.DefaultCacheBehavior(\n AllowedMethods=['GET', 'HEAD', 'OPTIONS'],\n CachedMethods=['GET', 'HEAD'],\n ViewerProtocolPolicy='redirect-to-https',\n TargetOriginId='{}-{}-{}'.format(env_l, app_group_l, src_domain),\n ForwardedValues=cf.ForwardedValues(\n Headers=[\n \"Accept-Encoding\"\n ],\n QueryString=True,\n ),\n MinTTL=0,\n MaxTTL=int(max_ttl[0]),\n DefaultTTL=int(default_ttl[0]),\n SmoothStreaming=False,\n Compress=True\n ),\n CustomErrorResponses=[\n cf.CustomErrorResponse(\n ErrorCachingMinTTL='0',\n ErrorCode='403',\n ),\n cf.CustomErrorResponse(\n ErrorCachingMinTTL='0',\n ErrorCode='404',\n ),\n cf.CustomErrorResponse(\n ErrorCachingMinTTL='0',\n ErrorCode='500',\n ),\n cf.CustomErrorResponse(\n ErrorCachingMinTTL='0',\n ErrorCode='501',\n ),\n cf.CustomErrorResponse(\n ErrorCachingMinTTL='0',\n ErrorCode='502',\n ),\n cf.CustomErrorResponse(\n ErrorCachingMinTTL='0',\n ErrorCode='503',\n ),\n cf.CustomErrorResponse(\n ErrorCachingMinTTL='0',\n ErrorCode='504',\n ),\n ],\n ),\n Tags=DefaultTags\n ))\n\n cdnARecord = t.add_resource(RecordSetType(\n \"{}{}Adns\".format(app_group_ansi, src_domain.replace('.', '0')),\n HostedZoneName='{}.'.format(dns_domain),\n Comment=\"{} domain record\".format(cdn_domain),\n Name='{}'.format(cdn_domain),\n Type=\"A\",\n AliasTarget=AliasTarget(\n HostedZoneId=cfront_zone_id,\n DNSName=GetAtt(cdnDistribution, \"DomainName\")\n )\n ))\n\n cdnAAAARecord = t.add_resource(RecordSetType(\n \"{}{}AAAAdns\".format(app_group_ansi, src_domain.replace('.', '0')),\n HostedZoneName='{}.'.format(dns_domain),\n Comment=\"{} domain record\".format(cdn_domain),\n Name='{}'.format(cdn_domain),\n Type=\"AAAA\",\n AliasTarget=AliasTarget(\n HostedZoneId=cfront_zone_id,\n DNSName=GetAtt(cdnDistribution, \"DomainName\")\n )\n ))\n\n if domain_info['alt_sub'] != '':\n cdnAlternativeARecord = t.add_resource(RecordSetType(\n \"{}{}AlternativeAdns\".format(app_group_ansi, src_domain.replace('.', '0')),\n HostedZoneName='{}.'.format(dns_domain),\n Comment=\"{} domain record\".format(alternate_name),\n Name='{}'.format(alternate_name),\n Type=\"A\",\n AliasTarget=AliasTarget(\n HostedZoneId=cfront_zone_id,\n DNSName=GetAtt(cdnDistribution, \"DomainName\")\n )\n ))\n\n cdnAlternativeAAAARecord = t.add_resource(RecordSetType(\n \"{}{}AlternativeAAAAdns\".format(app_group_ansi, src_domain.replace('.', '0')),\n HostedZoneName='{}.'.format(dns_domain),\n Comment=\"{} domain record\".format(alternate_name),\n Name='{}'.format(alternate_name),\n Type=\"AAAA\",\n AliasTarget=AliasTarget(\n HostedZoneId=cfront_zone_id,\n DNSName=GetAtt(cdnDistribution, \"DomainName\")\n )\n ))\n\n # Redirect outputs\n t.add_output([\n Output(\n 'CDNDomainOutput{}'.format(src_domain.replace('.', '0')),\n Description=\"Domain for CDN\",\n Value=GetAtt(cdnDistribution, 'DomainName'),\n )\n ])\n\n#####################################################################################################################\n# API Gateway\n#####################################################################################################################\nrest_api = t.add_resource(RestApi(\n \"api\",\n Name=\"{}-{}\".format(env_l, app_group_l)\n))\n\n#####################################################################################################################\n# DynamoDB table\n#####################################################################################################################\nmyDynamoDB = t.add_resource(Table(\n \"myDynamoDBTable\",\n TableName='counters',\n AttributeDefinitions=[\n AttributeDefinition(\n AttributeName='website',\n AttributeType='S'\n )\n ],\n KeySchema=[\n KeySchema(\n AttributeName='website',\n KeyType='HASH'\n )\n ],\n ProvisionedThroughput=ProvisionedThroughput(\n ReadCapacityUnits=readunits,\n WriteCapacityUnits=writeunits\n )\n))\n\n#####################################################################################################################\n# Lambda\n#####################################################################################################################\n# Create a Lambda function that will be mapped\ncode = [\n \"var response = require('cfn-response');\",\n \"exports.handler = function(event, context) {\",\n \" context.succeed('foobar!');\",\n \" return 'foobar!';\",\n \"};\",\n]\n\n# Create a role for the lambda function\nt.add_resource(Role(\n \"LambdaExecutionRole\",\n Path=\"/\",\n Policies=[Policy(\n PolicyName=\"root\",\n PolicyDocument={\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Action\": [\"logs:*\"],\n \"Resource\": \"arn:aws:logs:*:*:*\",\n \"Effect\": \"Allow\"\n },\n {\n \"Action\": [\"lambda:*\"],\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"dynamodb:BatchGetItem\",\n \"dynamodb:GetItem\",\n \"dynamodb:Query\",\n \"dynamodb:Scan\",\n \"dynamodb:BatchWriteItem\",\n \"dynamodb:PutItem\",\n \"dynamodb:UpdateItem\"\n ],\n \"Resource\": GetAtt('myDynamoDBTable', 'Arn')\n }]\n })],\n AssumeRolePolicyDocument={\"Version\": \"2012-10-17\", \"Statement\": [\n {\n \"Action\": [\"sts:AssumeRole\"],\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": [\n \"lambda.amazonaws.com\",\n \"apigateway.amazonaws.com\"\n ]\n }\n }\n ]},\n))\n\nfunction = t.add_resource(Function(\n \"function\",\n Code=Code(\n ZipFile=Join(\"\", code)\n ),\n Handler=\"index.handler\",\n Role=GetAtt(\"LambdaExecutionRole\", \"Arn\"),\n Runtime=\"python3.7\",\n))\n\n# Create a resource to map the lambda function to\nresource = t.add_resource(Resource(\n \"resource\",\n RestApiId=Ref(rest_api),\n PathPart=\"dynamo\",\n ParentId=GetAtt(\"api\", \"RootResourceId\"),\n))\n\n# Create a get method that integrates into Lambda\ngetmethod = t.add_resource(Method(\n \"getmethod\",\n RestApiId=Ref(rest_api),\n AuthorizationType=\"NONE\",\n ResourceId=Ref(resource),\n HttpMethod=\"GET\",\n MethodResponses=[\n MethodResponse(\n \"CatResponse\",\n StatusCode='200',\n ResponseModels={\n 'application/json': 'Empty'\n }\n )\n ],\n Integration=Integration(\n Credentials=GetAtt(\"LambdaExecutionRole\", \"Arn\"),\n PassthroughBehavior='WHEN_NO_MATCH',\n Type=\"AWS\",\n IntegrationHttpMethod='POST',\n IntegrationResponses=[\n IntegrationResponse(\n StatusCode='200',\n ResponseTemplates={\n 'application/json': ''\n },\n )\n ],\n Uri=Join(\"\", [\n \"arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/\",\n GetAtt(\"function\", \"Arn\"),\n \"/invocations\"\n ]),\n\n RequestTemplates={\n 'application/json': '{\"statusCode\": 200}'\n },\n ),\n))\n\ndeployment = t.add_resource(Deployment(\n \"%sDeployment\" % stage_name,\n DependsOn=\"optionsmethod\",\n RestApiId=Ref(rest_api),\n))\n\nstage = t.add_resource(Stage(\n '%sStage' % stage_name,\n StageName=stage_name,\n RestApiId=Ref(rest_api),\n DeploymentId=Ref(deployment)\n))\n\n# Create cname record for all mount points\napiCname = t.add_resource(RecordSetType(\n 'apiCname',\n HostedZoneName='{}.'.format(dns_domain),\n Comment=\"{} API gateway domain record\".format(app_group_l),\n Name='{}.{}'.format('api', dns_domain),\n Type=\"CNAME\",\n TTL=\"900\",\n ResourceRecords=[Join(\"\", [\n Ref(rest_api),\n \".execute-api.us-east-1.amazonaws.com\"\n ])]\n))\n\n#####################################################################################################################\n# Output\n#####################################################################################################################\n\n# API gateway outputs\nt.add_output([\n Output(\n \"ApiEndpoint\",\n Value=Join(\"\", [\n \"https://\",\n Ref(rest_api),\n \".execute-api.us-east-1.amazonaws.com/\",\n stage_name\n ]),\n Description=\"Endpoint for this stage of the api\"\n ),\n])\n# DynamoDB outputs\nt.add_output(Output(\n \"TableName\",\n Value=Ref(myDynamoDB),\n Description=\"Table name of the newly create DynamoDB table\",\n))\n\n# Load the data into a json object\njson_data = json.loads(t.to_json())\n\n# Save the file to disk\nsave_to_file(json_data)\n","sub_path":"cloudformation/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":14959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"171574816","text":"from datetime import datetime\n\n\n# IDADE: 22\n# TEM DIVIDA: SIM (150 reais no dia 04/06/2020)\n# CONSULTA NOS ÚLTIMOS 3 MESES: SIM (1)\n\ndef calcular(nome, idade):\n score = 1000\n resumo = {\n \"nome\": nome,\n \"score\": score,\n \"variaveis\": {\n 'idade': idade,\n 'consultado': True,\n 'dividas': [\n {\n 'valor_original': 150\n }\n ]\n }\n }\n\n score = calcular_indicador_idade(idade, score)\n score = calcular_indicador_consulta(score)\n score = calcular_indicador_dividas_atrasos(score, resumo)\n\n return int(score)\n\n\ndef calcular_indicador_idade(idade, score):\n if idade < 18:\n score = 0\n elif 18 >= idade <= 19:\n score /= 3\n elif 20 >= idade <= 21:\n score /= 2\n elif idade >= 22:\n score /= 1.5\n return score\n\n\ndef calcular_indicador_consulta(score):\n qtd_consultas = 1\n score -= (qtd_consultas * 50)\n return score\n\n\ndef calcular_indicador_dividas_atrasos(score, resumo):\n dividas = resumo['variaveis']['dividas']\n if len(dividas) > 0:\n for divida in dividas:\n data_divida = datetime.strptime('04/06/2020', '%d/%m/%Y').date()\n dias_atraso = (datetime.now().date() - data_divida).days\n valor_divida_atual = divida['valor_original'] + dias_atraso\n divida['dias_atraso'] = dias_atraso\n divida['valor_atual'] = valor_divida_atual\n score -= dias_atraso\n return score\n\n\nclass TestNatalia:\n\n def test_idade_negativa(self):\n assert calcular_indicador_idade(idade=-1, score=10) == 0\n\n def test_idade_dezoito_anos(self):\n assert calcular_indicador_idade(idade=18, score=12) == 4\n\n def test_idade_vinte_anos(self):\n assert calcular_indicador_idade(idade=20, score=10) == 5\n\n def test_idade_trinta_anos(self):\n assert calcular_indicador_idade(idade=30, score=3) == 2\n\n def test_calcular_indicador_consulta(self):\n assert calcular_indicador_consulta(score=1000) == 950\n\n def test_calcular_indicador_dividas_atrasos(self):\n resumo = {\n 'variaveis':{\n 'dividas': [\n {\n 'valor_original': 150\n }\n ]\n }\n }\n assert calcular_indicador_dividas_atrasos(score=1000, resumo=resumo) == 995\n","sub_path":"src/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"112753934","text":"from datetime import datetime\nimport numpy as np\nimport imagezmq\nimport argparse\nimport imutils\nimport cv2\n\nprint('Initializing ImageHub object...')\n# initialize the ImageHub object\nimageHub = imagezmq.ImageHub()\nprint('Successful.')\n\n# initialize the dictionary which will contain information regarding\n# when a device was last active, then store the last time the check\n# was made was now\nlastActive = {}\nlastActiveCheck = datetime.now()\n\nframeDict = {}\n\ni=0\n# start looping over all the frames\nwhile True:\n # receive RPi name and frame from the RPi and acknowledge\n # the receipt\n if i==0:\n print('Receiving image...')\n else:\n print('Frame {}...'.format(i))\n (rpiName, frame) = imageHub.recv_image()\n imageHub.send_reply(b'OK')\n # if a device is not in the last active dictionary then it means\n # that its a newly connected device\n if rpiName not in lastActive.keys():\n print(\"[INFO] receiving data from {}...\".format(rpiName))\n # record the last active time for the device from which we just\n # received a frame\n lastActive[rpiName] = datetime.now()\n print('{}|{}'.format(rpiName, lastActive[rpiName]))\n \n frameDict[rpiName] = frame\n \n cv2.imshow('wow',frame)\n \n # detect any kepresses\n key = cv2.waitKey(1) & 0xFF\n \n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n \n i = i + 1\n# do a bit of cleanup\ncv2.destroyAllWindows()","sub_path":"obj_det/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"514663184","text":"#!/usr/bin/env python\n\nimport argparse\n\nimport chainer\nimport fcn\nimport numpy as np\nimport tqdm\n\nimport instance_occlsegm_lib\nfrom instance_occlsegm_lib.contrib import synthetic2d\n\n\ndef main():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n item_data_choices = list(\n synthetic2d.datasets.InstanceImageDataset.item_data_dirs.keys()\n )\n parser.add_argument(\n '--item-data',\n default='arc2017_all',\n choices=item_data_choices,\n help='item data',\n )\n parser.add_argument('--gpu', '-g', type=int, default=0, help='gpu id')\n args = parser.parse_args()\n\n model = fcn.models.FCN8sAtOnce(n_class=2)\n model_file = instance_occlsegm_lib.data.download(\n url='https://drive.google.com/uc?id=1k7qCONNta6WDjqdXAXdem8rDQjWPWeFb',\n md5='5739cb23249993428dcc67e6d763b00a',\n )\n chainer.serializers.load_npz(model_file, model)\n\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n\n dataset = synthetic2d.datasets.InstanceImageDataset(\n item_data_dir=args.item_data, load_pred=False\n )\n obj_data_dir = dataset.item_data_dir\n obj_names, obj_data = instance_occlsegm_lib.datasets.apc.\\\n arc2017.load_item_data(obj_data_dir, skip_known=False)\n\n for obj_datum in tqdm.tqdm(obj_data):\n img_file = obj_datum['img_file']\n img = obj_datum['img']\n lbl_fgbg = obj_datum['mask'].astype(np.int32)\n\n x = fcn.datasets.transform_lsvrc2012_vgg16((img, lbl_fgbg))[0]\n x = chainer.cuda.to_gpu(x)\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n model(x[None])\n lbl_pred = model.xp.argmax(model.score.array, axis=1)\n lbl_pred = chainer.cuda.to_cpu(lbl_pred)[0]\n # lbl_fgbg = instance_occlsegm_lib.image.label2rgb(lbl_fgbg, img)\n # lbl_pred = instance_occlsegm_lib.image.label2rgb(lbl_pred, img)\n # instance_occlsegm_lib.io.imshow(\n # instance_occlsegm_lib.image.tile([img, lbl_fgbg, lbl_pred]))\n # instance_occlsegm_lib.io.waitkey()\n\n mask_pred = lbl_pred.astype(np.uint8) * 255\n mask_file = img_file + '.mask_pred.jpg'\n instance_occlsegm_lib.io.imsave(mask_file, mask_pred)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"demos/instance_occlsegm/examples/synthetic2d/instance_image_fg_extraction/generate_item_data_mask_pred.py","file_name":"generate_item_data_mask_pred.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"574617166","text":"import RPi.GPIO as GPIO\nimport time\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\n#GPIO.setmode(GPIO.BCM)\nMoSenPin = 16\nLEDPin = 15\nGPIO.setup(LEDPin,GPIO.OUT)\nGPIO.setup(MoSenPin,GPIO.IN)\ndef blink(LEDPin): # function to make LED Blink\n Stauts_On = GPIO.output(pin,GPIO.HIGH)\n time.sleep(1)\n return\ndef main(MoSenPin,LEDPin):\n while 1:\n if GPIO.input(MoSenPin,GPIO.HIGH):\n blink(LEDPin)\n return\n GPIO.cleanup()\n","sub_path":"Scripts/MotionSensor.py","file_name":"MotionSensor.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"235694221","text":"import scrapy \nimport pandas as pd\nimport numpy as np\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\n\nclass AraniaCrawlRanking2018(scrapy.Spider):\n name = 'ranking_universidades_2018'\n\n urls = [\n 'https://cwur.org/2018-19.php'\n ]\n\n def start_requests(self):\n for url in self.urls:\n yield scrapy.Request(url=url)\n\n def parse(self, response):\n\n titulos = response.css('table.table > thead > tr > th::text').extract()\n universidad_pais = response.css('table.table > tbody > tr > td > a::text').extract()\n datos_numericos = response.css('table.table > tbody > tr > td::text').extract()\n\n World_Rank = np.array([])\n Institution = np.array([])\n Location = np.array([])\n National_Rank = np.array([])\n Quality_of_Education = np.array([])\n Alumni_Employment = np.array([])\n Quality_of_Faculty = np.array([])\n Research_Output = np.array([])\n Quality_Publications = np.array([])\n Influence = np.array([])\n Citations = np.array([])\n Score = np.array([])\n\n i = 0\n while i < (len(universidad_pais) - 1):\n Institution = np.append(Institution, universidad_pais[i])\n Location = np.append(Location, universidad_pais[i + 1])\n i = i + 2\n \n i = 0\n while i < (len(datos_numericos) - 9):\n World_Rank = np.append(World_Rank, datos_numericos[i])\n National_Rank = np.append(National_Rank, datos_numericos[i + 1])\n Quality_of_Education = np.append(Quality_of_Education, datos_numericos[i + 2])\n Alumni_Employment = np.append(Alumni_Employment, datos_numericos[i + 3])\n Quality_of_Faculty = np.append(Quality_of_Faculty, datos_numericos[i + 4])\n Research_Output = np.append(Research_Output, datos_numericos[i + 5])\n Quality_Publications = np.append(Quality_Publications, datos_numericos[i + 6])\n Influence = np.append(Influence, datos_numericos[i + 7])\n Citations = np.append(Citations, datos_numericos[i + 8])\n Score = np.append(Score, datos_numericos[i + 9])\n i = i + 10\n\n datos = {\n 'World_Rank': World_Rank,\n 'Institution': Institution,\n 'Location': Location,\n 'National_Rank': National_Rank,\n 'Quality_of_Education': Quality_of_Education,\n 'Alumni_Employment': Alumni_Employment,\n 'Quality_of_Faculty': Quality_of_Faculty,\n 'Research_Output': Research_Output,\n 'Quality_Publications': Quality_Publications,\n 'Influence': Influence,\n 'Citations': Citations,\n 'Score': Score\n }\n\n data_frame = pd.DataFrame(datos, columns = ['World_Rank', 'Institution', 'Location', 'National_Rank', 'Quality_of_Education', 'Alumni_Employment','Quality_of_Faculty','Research_Output','Quality_Publications','Influence','Citations','Score'])\n data_frame = data_frame.replace({\" - \": '0', \" > 1000\": '1001', '> 1000': '1001', ' > 1000':'1001', '101+': '102'})\n data_frame.to_csv('data_scrapy_2018.csv')\n\n","sub_path":"Informe de dataset Scrapy/scrapy/arania_universidades/arania_universidades/spiders/arania_universidades_2018.py","file_name":"arania_universidades_2018.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"645577418","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 10 19:24:52 2018\n@author: heerokbanerjee\n\"\"\"\n\nimport numpy as np\n\nprint(\"*************BACKPROPAGATION NEURAL NETWORK IMPLEMENTATION*************\")\nprint(\"Constant Parameters-\")\nprint(\"Number of Input Nuerons- 2\")\nprint(\"Number of Hidden Layer Neurons-3\")\nprint(\"Number of Output Nuerons- 1\")\n\n\n#Input of Weight Matrix , W1:\n\nprint(\"Enter Input-->Hidden Layer Weights (3*2)-\")\n\nW1=[[0 for x in range(3)] for y in range(2)] \nfor i in range(0,2):\n for j in range(0,3):\n W1[i][j]=input(\"Enter Weight (\"+str(i)+\",\"+str(j)+\"):\")\n\n#Input of Weight Matrix , W2: \n \nprint(\"Enter Hidden-->Output Layer Weights (3*1)-\")\n\nW2=[[0 for x in range(1)] for y in range(3)] \nfor i in range(0,3):\n for j in range(0,1):\n W2[i][j]=input(\"Enter Weight (\"+str(i)+\",\"+str(j)+\"):\")\n \n\n#Training Examples for getting expected output\nX = np.asarray(([4, 2], [8, 3], [9, 12]), dtype=float)\ny = np.array(([87], [63], [49]), dtype=float)\n\n\n#Scaling to 0.xx form\nX = X/np.amax(X, axis=0)\ny = y/100 \n\nclass BackPropNeuralNetwork(object):\n def __init__(self):\n \n self.W1 = np.asarray(W1, dtype=np.float32)\n self.W2 = np.asarray(W2, dtype=np.float32)\n\n def NetInput(self, X):\n \n self.DotProd = np.dot(X, self.W1)\n self.DotProd2 = self.sigmoid(self.DotProd) \n self.DotProd3 = np.dot(self.DotProd2, self.W2) \n o = self.sigmoid(self.DotProd3)\n return o \n\n def sigmoid(self, s):\n \n return 1/(1+np.exp(-s))\n\n def sigmoidPrime(self, s):\n\n return s * (1 - s)\n\n def BackPropagate_Error(self, X, y, op_error):\n \n self.output_error = y - op_error\n self.op_delta = self.output_error*self.sigmoidPrime(op_error)\n\n self.DotProd2_error = self.op_delta.dot(self.W2.T)\n self.DotProd2_delta = self.DotProd2_error*self.sigmoidPrime(self.DotProd2) \n self.W1 += X.T.dot(self.DotProd2_delta) \n self.W2 += self.DotProd2.T.dot(self.op_delta) \n\n \n def trainNeuralNetwork (self, X, y):\n op_HidToOut = self.NetInput(X)\n self.BackPropagate_Error(X, y, op_HidToOut)\n\nNN = BackPropNeuralNetwork()\n\n#learning parameters, threshold value & Initial residual error set to 1.\nthreshold=0.0005\nmse=1\nmax_iterations=100000\n\nfor i in range(max_iterations) or mse<threshold :\n \n print(\"Input Matrix: \\n\" + str(X))\n print(\"Desired Output: \\n\" + str(NN.NetInput(X))) \n print(\"Actual Output: \\n\" + str(y)) \n\n mse=np.mean(np.square(y - NN.NetInput(X)))\n print(\"Error: \\n\" + str(mse))\n NN.trainNeuralNetwork(X, y)\n \n\nprint(\"Weight Matrix,W1 (Input --> Hidden Layer) :\")\nprint(NN.W1)\n\n\nprint(\"Weight Matrix,W2 (Hidden --> Output Layer) :\")\nprint(NN.W2)","sub_path":"BPNN Implementation/BPN_implement.py","file_name":"BPN_implement.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"318945597","text":"from django.core.mail import send_mail\nfrom django.conf import settings\nimport logging\n\nfrom celery_task.main import celery_app\n\nlogger = logging.getLogger('django')\n\"\"\"\nbind: 保证task对象会作为第一个参数自动传入\nname:异步任务名字\nretry_backoff:异常自动重试时间间隔,第n次(retry_backoff*2^(n-1))\nmax_reties:异常自动重试次数上线\n\"\"\"\n\n\n@celery_app.task(bind=True, name='send_verify_email', retry_backoff=3)\ndef send_verify_email(self, to_email, verify_url):\n \"\"\"\n 发送验证邮箱邮件\n :param to_email: 收件人邮箱\n :param verify_url: 验证链接\n :return: None\n send_mail(subject, message, from_email, recipient_list, html_message=None)\n subject 邮件标题\n message 普通邮件正文,普通字符串\n from_email 发件人\n recipient_list 收件人列表\n html_message 多媒体邮件正文,可以是html字符串\n \"\"\"\n subject = \"美多商城邮箱验证\"\n html_message = '<p>尊敬的用户您好!</p>' \\\n '<p>感谢您使用美多商城。</p>' \\\n '<p>您的邮箱为:%s 。请点击此链接激活您的邮箱:</p>' \\\n '<p><a href=\"%s\">%s<a></p>' % (to_email, verify_url, verify_url)\n try:\n send_mail(subject, \"\", settings.EMAIL_FROM, [to_email], html_message=html_message)\n except Exception as e:\n logger.error(e)\n # 有异常自动重试三次\n raise self.retry(exc=e, max_retries=3)","sub_path":"meiduo/meiduo_project/celery_task/email/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"485701656","text":"import requests\nfrom bs4 import BeautifulSoup\n\nr = requests.get(\"http://pythonhow.com/example.html\") # pobranie zawartości strony\nc = r.content\n\n# print(c) # wyświetlenie całej zawartości strony\n\n\nsoup = BeautifulSoup(c,\"html.parser\")\n# print(soup.prettify()) # wyrzucenie uporządkowanego html takiego jak na stronie\n\n\nall=soup.find_all(\"div\",{\"class\":\"cities\"}) # soup.find_all(\"div\",{\"class\":\"cities\"})[0] pierwszy element obiektu Tag, który obsługuje ineksowanie\n# Londyn= all[0].find_all(\"h2\")[0].text # to zero, żeby pozbyć się listy a potem bierzemy sam tekst czyli London\n\n\nfor item in all:\n print(item.find_all(\"h2\")[0].text)\n\n\n# Dla paragrafów\nfor item in all:\n print(item.find_all(\"p\")[0].text)\n","sub_path":"10 apps Mega Course/web scrapping/wprowadzenie.py","file_name":"wprowadzenie.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"501100341","text":"from operationsAC import useful_functions, acParser, processEvidence\nimport random\n\ndef parseDataset(dataFile,variables,lmap):\n content_data = useful_functions.read_file(dataFile)\n\n\n\n variable_list = content_data[0].split(',')\n # print variables\n # print variable_list\n\n leaves = extract_leaves(lmap, variable_list)\n\n\n\n instance_values = []\n for l, line in enumerate(content_data):\n if l > 0:\n # print 'For instance number ', l\n instance = line.split(',')\n instance_value = []\n il = []\n for i, ins in enumerate(instance):\n # print '\\n', ins, 'is var number ', variables[variable_list[i]].index(ins)\n # print 'Corresponds to node ', variable_list[i], ' and leaf ', leaves[variable_list[i]][variables[variable_list[i]].index(ins)]\n il.append(leaves[variable_list[i]][variables[variable_list[i]].index(ins)])\n\n instance_values.append(il)\n\n\n return variable_list,instance_values,content_data\n\n\ndef extract_leaves(lmap,variable_list):\n\n leaves={key: [] for key in variable_list}\n\n # print 'Leaves are ', leaves\n\n for var in variable_list:\n # print var\n lines=[lm for lm in lmap if '$'+var+'$' in lm and '$I$' in lm ]\n nums=[]\n for l in lines:\n syms=useful_functions.indices(l, lambda x: x=='$')\n nums.append(l[syms[1]+1:syms[2]])\n leaves.update({var:nums})\n\n # print 'Leaves are ', leaves\n\n return leaves\n\n\ndef extract_unique_instances():\n\n 1","sub_path":"benchmarks/parseDataset.py","file_name":"parseDataset.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"225387053","text":"\nimport sys\n# path to the custom module\nsys.path.append(r'C:\\Users\\Sameera\\Documents\\Github\\Lets-Play-With-Pytorch')\n\nfrom SkunkWork import Trainer as sw\n\nif __name__ == \"__main__\":\n print('main')\n swd = sw.dSet()\n swd.compile()","sub_path":"Sudoku/sw_module_test.py","file_name":"sw_module_test.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"415152010","text":"import tinify\ntinify.key=\"q3hGlkqcvZsC3td5Rllkw0gkkS69V1D9\"\n\nsource = tinify.from_file(\"gourav.jpg\")\nresized = source.resize(\n method=\"cover\",\n width=500,\n height=300\n)\nresized.to_file(\"new.jpg\")\nprint(\"image is successfully resized\")\n","sub_path":"image_compression.py","file_name":"image_compression.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"188395937","text":"from blog.models import *\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\nimport datetime\nfrom django.template import defaultfilters\n\n# Create Tweet\ndef tweet(self, request, queryset):\n\tapi = twitter.Api(username='indiesart', password='838dg34') \n\tfor p in queryset:\t\t\n\t\tapi.PostUpdate(p.post_title +' => '+ p.url())\n\tself.message_user(request, _(\"You have created %d tweets \") % len(queryset))\n\t\ndef fix(self, request, queryset):\n\tfor p in queryset:\n\t\td = p.post_date.day\n\t\th = p.post_date.hour\n\t\tif (p.post_date.hour > 19) :\n\t\t\td = d + 1\n\t\t\th = h - 24\n\t\tp.post_date_gmt = datetime.datetime(p.post_date.year, p.post_date.month, d, h + 4, p.post_date.minute, p.post_date.second)\n\t\t\n\t\t\n\t\td = p.post_modified.day\n\t\th = p.post_modified.hour\n\t\tif (p.post_modified.hour > 19) :\n\t\t\td = d + 1\n\t\t\th = h - 24\n\t\tp.post_modified_gmt = datetime.datetime(p.post_modified.year, p.post_modified.month, d, h + 4, p.post_modified.minute, p.post_modified.second)\n\t\tp.post_name = defaultfilters.slugify(p.post_title)\n\t\tp.save()\n\tself.message_user(request, _(\"%d posts have been fixed! \") % len(queryset))\n\t\t\ntweet.short_description = _(\"Create Tweet\")\n\nclass PostAdmin(admin.ModelAdmin):\n\tactions = [tweet, fix]\n\t\nadmin.site.register(Post, PostAdmin)","sub_path":"apps/blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"77351866","text":"from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\nfrom pyramid.wsgi import wsgiapp\n\ndef aboutme(request):\n\tfile = open('about/aboutme.html', 'r')\n\tdata = file.read()\n\tfile.close()\n\treturn Response(data)\n\ndef index(request):\n\tfile = open('index.html', 'r')\n\tdata = file.read()\n\tfile.close()\n\treturn Response(data)\n\nclass middleWare(object):\n\tdef __init__(self, app):\n\t\tself.app = app\n\tdef __call__(self, environ, start_response):\n\t\thtml = self.app(environ, start_response)[0].decode()\n\t\ttop = '<div class=''top''>Middleware TOP</div>'\n\t\tbottom = '<div class=''bottom''>Middleware BOTTOM</div>'\n\t\thead, body = html.split('<body>')\n\t\tdata, end = body.split('</body>')\n\t\tdata = '<body>' + top + data + bottom + '</body>'\n\t\treturn [head.encode('utf8') + data.encode('utf8') + end.encode('utf8')]\t\t\n\n\nif __name__ == '__main__':\n\tconfig = Configurator()\n\tconfig.add_route('default', '/')\n\tconfig.add_view(index, route_name = 'default')\n\tconfig.add_route('index', '/index.html')\n\tconfig.add_view(index, route_name = 'index')\n\tconfig.add_route('aboutme', '/about/aboutme.html')\n\tconfig.add_view(aboutme, route_name = 'aboutme')\n\twsgi_app = middleWare(config.make_wsgi_app())\n\tserver = make_server('0.0.0.0', 8000, wsgi_app)\n\tserver.serve_forever()\n","sub_path":"pyramid.py","file_name":"pyramid.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"123316863","text":"import bpy\nimport math\nimport random\nimport bmesh\nimport numpy as np\n\nfrom importlib import *\n# imp.reload(module)\n\nmainPath = \"C:\\\\Users\\Peter\\AppData\\Roaming\\Blender Foundation\\Blender\\\\2.79\\scripts\\\\addons\\Dynamat\\dynamats\"\ndynaPath = \"C:\\\\Users\\Peter\\AppData\\Roaming\\Blender Foundation\\Blender\\\\2.79\\scripts\\\\addons\\Dynamat\\dynamats\"\ntexturesPath = \"C:\\\\Users\\\\Peter\\\\Documents\\\\Textures\"\n# bpy.ops.mesh.primitive_plane_add(radius=2, view_align=False, enter_editmode=True);\n# verts = [(0,0,0),(0,5,0),(5,5,0),(5,0,0),(2.5,2.5,4.5)]\n# faces = [(0,1,2,3), (0,4,1), (1,4,2), (2,4,3), (3,4,0)]\n# mesh = bpy.data.meshes.new(\"Floor\")\n# object = bpy.data.objects.new(\"Floor\", mesh)\n# object.location = bpy.context.scene.cursor_location\n# bpy.context.scene.objects.link(object)\n# mesh.from_pydata(verts,[],faces)\n# mesh.update(calc_edges=True)\n\ndef clearEverthing():\n try:\n bpy.ops.object.mode_set(mode = 'OBJECT')\n except:\n pass\n bpy.context.scene.cursor_location = (0,0,0)\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete(use_global=False)\n\nwhatmsg = None\nsobs = bpy.context.scene.objects\ndef debugMsg(msg):\n print (\"msg\")\n print (msg)\n sobs = bpy.context.scene.objects\n if not \"debugText\" in sobs:\n msg = bpy.ops.object.text_add(view_align=False, enter_editmode=False, location=(-15,15,0))\n bpy.context.object.name = \"debugText\"\n sobs[\"debugText\"].data.body = str(msg)\n\ndef printCursorLocation():\n debugMsg(bpy.context.scene.cursor_location)\n\n\nprintfMsg = None\ndef printf(msg, start = False, done = False):\n global printfMsg\n\n if not isinstance(msg, str):\n msg = str(msg)\n\n if start:\n printfMsg = msg + \"\\n\"\n else:\n printfMsg = printfMsg + \"\\n\" + msg\n\n if done:\n debugMsg(printfMsg)\n\ndef start(msg = \"Start\"):\n printf(msg, start = True, done = True)\n\ndef end(msg = \"End\"):\n printf(msg, done = True)\n\n\ndef nomod(coord):\n return (coord[0], coord[1], coord[2])\n\ndef mod(coord, dire = \"z\"):\n xmod = 0\n ymod = 0\n zmod = 0\n if dire == \"x\":\n xmod = .1\n elif dire == \"y\":\n ymod = .1\n elif dire == \"z\":\n zmod = .1\n return (coord[0] + xmod, coord[1] + ymod, coord[2] + zmod)\n\ndef stats(obdata):\n# obdata = bpy.context.object.data\n# print('Vertices:')\n# for v in obdata.vertices:\n# print('{}. {} {} {}'.format(v.index, v.co.x, v.co.y, v.co.z))\n# \n# print('Edges:')\n# for e in obdata.edges:\n# print('{}. {} {}'.format(e.index, e.vertices[0], e.vertices[1]))\n \n printf('Faces:', start = True, done = True)\n\n for f in obdata.polygons:\n# printf('{}. '.format(f.index))\n for e in f.edge_keys:\n# printf('{}'.format(e))\n p1 = e[0]\n p2 = e[1]\n v1 = obdata.vertices[p1].co\n v2 = obdata.vertices[p2].co\n# printf('point {} : {} '.format(p1, v1))\n# printf('point {} : {} '.format(p2, v2))\n# cube = bpy.ops.mesh.primitive_cube_add(radius=.2, view_align=True, enter_editmode=False, location=v1)\n# cube = bpy.ops.mesh.primitive_cube_add(radius=.2, view_align=True, enter_editmode=False, location=v2)\n\n maintyple = tuple()\n maintyple = maintyple + (p1,)\n maintyple = maintyple + (p2,)\n\n verts = list()\n verts.append(nomod(v1))\n verts.append(mod(v1))\n verts.append(mod(v2))\n verts.append(nomod(v2))\n faces = [(0,1,2,3)]\n printf(str(verts))\n# faces = list()\n# faces.append(maintyple)\n\n\n mesh = bpy.data.meshes.new(\"Floor\")\n object = bpy.data.objects.new(\"Floor\", mesh)\n object.location = bpy.context.scene.cursor_location\n bpy.context.scene.objects.link(object)\n mesh.from_pydata(verts,[],faces)\n mesh.update(calc_edges=True)\n\n# for v in f.vertices:\n# print('{} '.format(v), end='')\n printf('', done = True)\n\ndef contained():\n bpy.context.scene.tool_settings.use_mesh_automerge = True\n \n clearEverthing()\n bpy.ops.mesh.primitive_plane_add(radius=2, view_align=False, enter_editmode=False);\n bpy.context.object.name = \"added_plane\"\n\n b = bpy.context.object.data\n stats(b)\n\n# contained()\n\n\ndef floorplan(mesh, dist):\n bpy.ops.object.duplicate_move(OBJECT_OT_duplicate={\"linked\":False, \"mode\":'TRANSLATION'}, TRANSFORM_OT_translate={\"value\":(0, 0, dist), \"constraint_axis\":(False, False, False)})\n\ndef get_distance_between(obj1, obj2):\n sobs = bpy.context.scene.objects\n p1 = np.array(sobs[obj1].location)\n p2 = np.array(sobs[obj2].location)\n squared_dist = np.sum(p1**2 + p2**2, axis=0)\n dist = np.sqrt(squared_dist)\n return dist\n\ndef get_random_color():\n ''' generate rgb using a list comprehension '''\n r, g, b = [random.random() for i in range(3)]\n return (r, g, b)\n\ndef clearEverything():\n try:\n bpy.ops.object.mode_set(mode = 'OBJECT')\n except:\n pass\n bpy.context.scene.cursor_location = (0,0,0)\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete(use_global=False)\n\ndef get_override(area_type, region_type):\n for area in bpy.context.screen.areas: \n if area.type == area_type: \n for region in area.regions: \n if region.type == region_type: \n override = {'area': area, 'region': region} \n return override\n #error message if the area or region wasn't found\n raise RuntimeError(\"Wasn't able to find\", region_type,\" in area \", area_type,\n \"\\n Make sure it's open while executing script.\")\n\ndef area_of_type(type_name):\n for area in bpy.context.screen.areas:\n if area.type == type_name:\n return area\n\ndef get_3d_view():\n return area_of_type('VIEW_3D').spaces[0]\n\nimport os\ndef copyToClipBoard():\n file = open(\"testfile.txt\",\"w\") \n file.write(\"bpy.context.active_object.location = ({}, {}, {})\\n\".format(\n bpy.context.active_object.location[0], \n bpy.context.active_object.location[1], \n bpy.context.active_object.location[2]))\n file.write(\"bpy.context.active_object.rotation_euler = ({}, {}, {})\".format(\n bpy.context.active_object.rotation_euler[0], \n bpy.context.active_object.rotation_euler[1], \n bpy.context.active_object.rotation_euler[2]))\n\n file.close() \n os.system(\"type testfile.txt | clip\")\n\nprint (\"hllo world\")\n# C.object.location\ndef getbm():\n obj = bpy.context.edit_object\n me = obj.data\n bm = bmesh.from_edit_mesh(me)\n return b\n \nC = bpy.context\nD = bpy.data\nS = D.scenes['Scene']\n\ndef getfaces():\n return getbm().faces\n\ndef well():\n # active face bm.faces.active\n bm = getbm()\n print (bm.faces.active.normal)\n# for face in bm.faces:\n# print (face)\n\n#copyToClipBoard()\n","sub_path":"blender/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"297046024","text":"import os\nimport json\n\ndef save_meta(dcgan, checkpoint_dir):\n data = {\n 'output_height':dcgan.generator.output_height,\n 'output_width':dcgan.generator.output_width,\n 'z_dim':dcgan.generator.z_dim,\n 'gf_dim':dcgan.generator.filter_dim,\n 'df_dim':dcgan.discriminator.filter_dim,\n 'channel':dcgan.generator.channel}\n with open(os.path.join(checkpoint_dir, 'meta.json'), 'w') as f:\n json.dump(data,f)\n\ndef load_meta(checkpoint_dir):\n with open(os.path.join(checkpoint_dir, 'meta.json'), 'r') as f:\n return json.load(f)\n","sub_path":"meta_saver.py","file_name":"meta_saver.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"506843412","text":"from pprint import pprint\nimport requests\n\nclass YaUploader:\n def __init__(self, file_path: str):\n self.file_path = file_path\n\n def upload(self):\n upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n headers = {'Connect-type': 'application/json', 'Authorization': ''}\n params = {\"path\": self.file_path, \"overwrite\": \"true\"}\n response = requests.get(upload_url, headers=headers, params=params)\n response.json()\n\n href = response.json().get(\"href\", \"\")\n response = requests.put(href, data=open(self.file_path, 'rb'))\n response.raise_for_status()\n if response.status_code == 201:\n print()\n print(\"Success\")\n\n return response.raise_for_status()\n\nif __name__ == '__main__':\n uploader = YaUploader(\"hw_python.txt\")\n result = uploader.upload()","sub_path":"yandex.py","file_name":"yandex.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"201931216","text":"#\n# Copyright 2012-2013 eNovance <licensing@enovance.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport itertools\n\nfrom oslo_config import cfg\nfrom oslo_context import context\nfrom oslo_log import log\nimport oslo_messaging\nfrom stevedore import extension\n\nfrom ceilometer.agent import plugin_base as base\nfrom ceilometer import coordination\nfrom ceilometer.event import endpoint as event_endpoint\nfrom ceilometer.i18n import _, _LI, _LW\nfrom ceilometer import exchange_control\nfrom ceilometer import messaging\nfrom ceilometer import pipeline\nfrom ceilometer import service_base\nfrom ceilometer import utils\n\n\nLOG = log.getLogger(__name__)\n\n\nOPTS = [\n cfg.IntOpt('pipeline_processing_queues',\n default=10,\n min=1,\n help='Number of queues to parallelize workload across. This '\n 'value should be larger than the number of active '\n 'notification agents for optimal results.'),\n cfg.BoolOpt('ack_on_event_error',\n default=True,\n deprecated_group='collector',\n help='Acknowledge message when event persistence fails.'),\n cfg.BoolOpt('store_events',\n deprecated_group='collector',\n default=False,\n help='Save event details.'),\n cfg.BoolOpt('disable_non_metric_meters',\n default=True,\n help='WARNING: Ceilometer historically offered the ability to '\n 'store events as meters. This usage is NOT advised as it '\n 'can flood the metering database and cause performance '\n 'degradation.'),\n cfg.BoolOpt('workload_partitioning',\n default=False,\n help='Enable workload partitioning, allowing multiple '\n 'notification agents to be run simultaneously.'),\n cfg.MultiStrOpt('messaging_urls',\n default=[],\n secret=True,\n help=\"Messaging URLs to listen for notifications. \"\n \"Example: transport://user:pass@host1:port\"\n \"[,hostN:portN]/virtual_host \"\n \"(DEFAULT/transport_url is used if empty)\"),\n]\n\ncfg.CONF.register_opts(exchange_control.EXCHANGE_OPTS)\ncfg.CONF.register_opts(OPTS, group=\"notification\")\ncfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging',\n group='publisher_notifier')\n\n\nclass NotificationService(service_base.BaseService):\n \"\"\"Notification service.\n\n When running multiple agents, additional queuing sequence is required for\n inter process communication. Each agent has two listeners: one to listen\n to the main OpenStack queue and another listener(and notifier) for IPC to\n divide pipeline sink endpoints. Coordination should be enabled to have\n proper active/active HA.\n \"\"\"\n\n NOTIFICATION_NAMESPACE = 'ceilometer.notification'\n NOTIFICATION_IPC = 'ceilometer-pipe'\n\n def __init__(self, *args, **kwargs):\n super(NotificationService, self).__init__(*args, **kwargs)\n self.partition_coordinator = None\n self.listeners, self.pipeline_listeners = [], []\n self.group_id = None\n\n @classmethod\n def _get_notifications_manager(cls, pm):\n return extension.ExtensionManager(\n namespace=cls.NOTIFICATION_NAMESPACE,\n invoke_on_load=True,\n invoke_args=(pm, )\n )\n\n def _get_notifiers(self, transport, pipe):\n notifiers = []\n for x in range(cfg.CONF.notification.pipeline_processing_queues):\n notifiers.append(oslo_messaging.Notifier(\n transport,\n driver=cfg.CONF.publisher_notifier.telemetry_driver,\n publisher_id='ceilometer.notification',\n topic='%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, x)))\n return notifiers\n\n def _get_pipe_manager(self, transport, pipeline_manager):\n\n if cfg.CONF.notification.workload_partitioning:\n pipe_manager = pipeline.SamplePipelineTransportManager()\n for pipe in pipeline_manager.pipelines:\n key = pipeline.get_pipeline_grouping_key(pipe)\n pipe_manager.add_transporter(\n (pipe.source.support_meter, key or ['resource_id'],\n self._get_notifiers(transport, pipe)))\n else:\n pipe_manager = pipeline_manager\n\n return pipe_manager\n\n def _get_event_pipeline_manager(self, transport):\n\n if cfg.CONF.notification.store_events:\n if cfg.CONF.notification.workload_partitioning:\n event_pipe_manager = pipeline.EventPipelineTransportManager()\n for pipe in self.event_pipeline_manager.pipelines:\n event_pipe_manager.add_transporter(\n (pipe.source.support_event, ['event_type'],\n self._get_notifiers(transport, pipe)))\n else:\n event_pipe_manager = self.event_pipeline_manager\n\n return event_pipe_manager\n\n def start(self):\n super(NotificationService, self).start()\n\n self.pipeline_manager = pipeline.setup_pipeline()\n\n if cfg.CONF.notification.store_events:\n self.event_pipeline_manager = pipeline.setup_event_pipeline()\n\n self.transport = messaging.get_transport()\n\n if cfg.CONF.notification.workload_partitioning:\n self.ctxt = context.get_admin_context()\n self.group_id = self.NOTIFICATION_NAMESPACE\n self.partition_coordinator = coordination.PartitionCoordinator()\n self.partition_coordinator.start()\n self.partition_coordinator.join_group(self.group_id)\n else:\n # FIXME(sileht): endpoint uses the notification_topics option\n # and it should not because this is an oslo_messaging option\n # not a ceilometer. Until we have something to get the\n # notification_topics in another way, we must create a transport\n # to ensure the option has been registered by oslo_messaging.\n messaging.get_notifier(self.transport, '')\n self.group_id = None\n\n self.pipe_manager = self._get_pipe_manager(self.transport,\n self.pipeline_manager)\n self.event_pipe_manager = self._get_event_pipeline_manager(\n self.transport)\n\n self.listeners, self.pipeline_listeners = [], []\n self._configure_main_queue_listeners(self.pipe_manager,\n self.event_pipe_manager)\n\n if cfg.CONF.notification.workload_partitioning:\n self._configure_pipeline_listeners()\n self.partition_coordinator.watch_group(self.group_id,\n self._refresh_agent)\n\n self.tg.add_timer(cfg.CONF.coordination.heartbeat,\n self.partition_coordinator.heartbeat)\n self.tg.add_timer(cfg.CONF.coordination.check_watchers,\n self.partition_coordinator.run_watchers)\n\n if not cfg.CONF.notification.disable_non_metric_meters:\n LOG.warning(_LW('Non-metric meters may be collected. It is highly '\n 'advisable to disable these meters using '\n 'ceilometer.conf or the pipeline.yaml'))\n # Add a dummy thread to have wait() working\n self.tg.add_timer(604800, lambda: None)\n\n self.init_pipeline_refresh()\n\n def _configure_main_queue_listeners(self, pipe_manager,\n event_pipe_manager):\n notification_manager = self._get_notifications_manager(pipe_manager)\n if not list(notification_manager):\n LOG.warning(_('Failed to load any notification handlers for %s'),\n self.NOTIFICATION_NAMESPACE)\n\n ack_on_error = cfg.CONF.notification.ack_on_event_error\n\n endpoints = []\n if cfg.CONF.notification.store_events:\n endpoints.append(\n event_endpoint.EventsNotificationEndpoint(event_pipe_manager))\n\n targets = []\n for ext in notification_manager:\n handler = ext.obj\n if (cfg.CONF.notification.disable_non_metric_meters and\n isinstance(handler, base.NonMetricNotificationBase)):\n continue\n LOG.debug('Event types from %(name)s: %(type)s'\n ' (ack_on_error=%(error)s)',\n {'name': ext.name,\n 'type': ', '.join(handler.event_types),\n 'error': ack_on_error})\n # NOTE(gordc): this could be a set check but oslo_messaging issue\n # https://bugs.launchpad.net/oslo.messaging/+bug/1398511\n # This ensures we don't create multiple duplicate consumers.\n for new_tar in handler.get_targets(cfg.CONF):\n if new_tar not in targets:\n targets.append(new_tar)\n endpoints.append(handler)\n\n urls = cfg.CONF.notification.messaging_urls or [None]\n for url in urls:\n transport = messaging.get_transport(url)\n listener = messaging.get_notification_listener(\n transport, targets, endpoints)\n listener.start()\n self.listeners.append(listener)\n\n def _refresh_agent(self, event):\n self._configure_pipeline_listeners(True)\n\n def _configure_pipeline_listeners(self, reuse_listeners=False):\n ev_pipes = []\n if cfg.CONF.notification.store_events:\n ev_pipes = self.event_pipeline_manager.pipelines\n pipelines = self.pipeline_manager.pipelines + ev_pipes\n transport = messaging.get_transport()\n partitioned = self.partition_coordinator.extract_my_subset(\n self.group_id,\n range(cfg.CONF.notification.pipeline_processing_queues))\n\n queue_set = {}\n for pipe_set, pipe in itertools.product(partitioned, pipelines):\n queue_set['%s-%s-%s' %\n (self.NOTIFICATION_IPC, pipe.name, pipe_set)] = pipe\n\n if reuse_listeners:\n topics = queue_set.keys()\n kill_list = []\n for listener in self.pipeline_listeners:\n if listener.dispatcher.targets[0].topic in topics:\n queue_set.pop(listener.dispatcher.targets[0].topic)\n else:\n kill_list.append(listener)\n for listener in kill_list:\n utils.kill_listeners([listener])\n self.pipeline_listeners.remove(listener)\n else:\n utils.kill_listeners(self.pipeline_listeners)\n self.pipeline_listeners = []\n\n for topic, pipe in queue_set.items():\n LOG.debug('Pipeline endpoint: %s from set: %s', pipe.name,\n pipe_set)\n pipe_endpoint = (pipeline.EventPipelineEndpoint\n if isinstance(pipe, pipeline.EventPipeline)\n else pipeline.SamplePipelineEndpoint)\n listener = messaging.get_notification_listener(\n transport,\n [oslo_messaging.Target(topic=topic)],\n [pipe_endpoint(self.ctxt, pipe)])\n listener.start()\n self.pipeline_listeners.append(listener)\n\n def stop(self):\n if self.partition_coordinator:\n self.partition_coordinator.stop()\n utils.kill_listeners(self.listeners + self.pipeline_listeners)\n super(NotificationService, self).stop()\n\n def reload_pipeline(self):\n LOG.info(_LI(\"Reloading notification agent and listeners.\"))\n\n if self.pipeline_validated:\n self.pipe_manager = self._get_pipe_manager(\n self.transport, self.pipeline_manager)\n\n if self.event_pipeline_validated:\n self.event_pipe_manager = self._get_event_pipeline_manager(\n self.transport)\n\n # re-start the main queue listeners.\n utils.kill_listeners(self.listeners)\n self._configure_main_queue_listeners(\n self.pipe_manager, self.event_pipe_manager)\n\n # re-start the pipeline listeners if workload partitioning\n # is enabled.\n if cfg.CONF.notification.workload_partitioning:\n self._configure_pipeline_listeners()\n","sub_path":"ceilometer/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":12976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"}